query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Get DNS entries for a specific domain
def get_domain_dns_records(domain): url_suffix = "v1/domains/{}/records".format(domain) ret = _call_endpoint(url_suffix) if isinstance(ret, dict) and ret.get('code', None) == "UNKNOWN_DOMAIN": # e.g. {'code': 'UNKNOWN_DOMAIN', 'message': 'The given domain is not registered, or does not have a zone file'} raise Exception(f"Can't find domain {domain}. Are you sure your API key and secret are correct?: {ret}") return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list(self, domain):\n return request(\n API_LIST.DNS_LIST.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'domain': domain\n }\n )", "def print_all_dns_records():\n for domain in sorted(get_domains()):\n dns_records = get_domain_dns_records(domain)\n print(domain)\n pprint(dns_records)\n print(\"*\" * 50)\n # TODO: poor man's rate limiter. improve?\n time.sleep(2)", "def __resolve_domain(self, domain=''):\n _ip = []\n if self.__is_ip_address(domain):\n # print hostname + \" is IP address\"\n _ip.append(domain)\n return _ip\n r = dns.resolver.get_default_resolver()\n r.nameservers = ['8.8.8.8']\n #answers = dns.resolver.query(hostname, 'A')\n try:\n answers = r.query(domain, 'A')\n for rdata in answers:\n # print rdata.address\n _ip.append(rdata.address)\n except dns.resolver.NoAnswer:\n print \"no answer\"\n\n if domain.find(\"www.\") != 0:\n domain = \"www.\" + domain\n # print \"querying \" + hostname\n try:\n answers = dns.resolver.query(domain, 'A')\n for rdata in answers:\n # print rdata.address\n _ip.append(rdata.address)\n except dns.resolver.NoAnswer:\n print \"no answer\"\n # print(\"processed %s, it has %d ips.\" % (hostname, len(_ip)))\n\n return list(set(_ip))", "def get(domain_name=None):\n url = 'https://api.cloudns.net/dns/soa-details.json'\n\n params = Parameters({'domain-name': domain_name})\n\n return requests.get(url, params=params.to_dict())", "def infoDnsRecords(self, domainname: str) -> DNSRecordSet:\n response = self._send(self.nc_request(action=\"infoDnsRecords\", parameters={\"domainname\": domainname}))\n\n # build records\n rset = DNSRecordSet(dnsrecords=[])\n for r in response[\"dnsrecords\"]:\n dr = DNSRecord(id=int(r[\"id\"]),\n hostname=r[\"hostname\"],\n type=r[\"type\"],\n priority=int(r[\"priority\"]),\n destination=r[\"destination\"],\n deleterecord=r[\"deleterecord\"],\n state=r[\"state\"])\n\n rset.dnsrecords.append(dr)\n\n return rset", "def dns(self, **kwargs):\n self.logger.debug(f\"Get RealTime DNS data\")\n url_path = 'dns'\n body = self._make_body(kwargs)\n return self._common_post(request_path=url_path, body=body)", "def getIPs(self, domain = \"localhost\"):\n # convert 'domain' to string, in case of erroneous type being passed\n domain = str(domain)\n\n # Kind warning for those who entered an IP address instead of a domain\n try: \n inet_aton(domain)\n print(\"Warning: an IP address was given instead of a domain name.\")\n except:\n pass\n\n # Try to query DNS records to populate A-Record IP list\n # Prints errors and returns None if exceptions found\n try:\n iplist = gethost(domain)[2]\n except gaierror as ge:\n if ge.errno == -2:\n print(\"Error: Domain '{}' invalid, or unknown. \"\\\n \"Please check proper spelling and format.\\n\"\\\n \"(e.g.: python dns_get_A_record_IPs.py google.com )\".format(domain))\n elif ge.errno == -3:\n print(\"Error: Domain '{}' unreachable. Please check your connection.\".format(domain))\n return None\n except timeout:\n print(\"Error: Connection to {} timed out.\".format(domain))\n return None\n\n return iplist", "def gethostbyname(self, hostname, dnsserv='192.112.36.4'):\n ipaddrlist = []\n cnames = []\n temp = []\n if(self.caching):\n rcache = RecordCache(self.ttl)\n rcord = rcache.lookup(hostname, Type.ANY, Class.IN)\n if(rcord):\n for rec in rcord:\n if rec.type_ == Type.A:\n arec = rec.rdata\n ipaddrlist.append(arec.address)\n elif rec.type_ == Type.CNAME:\n crec = rec.rdata\n cnames.append(crec.cname)\n if ipaddrlist:\n return hostname, cnames, ipaddrlist\n elif cnames:\n return self.gethostbyname(cnames[0], dnsserv)\n \n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(self.timeout)\n\n # Create and send query\n question = Question(Name(str(hostname)), Type.A, Class.IN)\n header = Header(9001, 0, 1, 0, 0, 0)\n header.qr = 0\n header.opcode = 0\n header.rd = 1\n query = Message(header, [question])\n sock.sendto(query.to_bytes(), (str(dnsserv), 53))\n\n # Receive response\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n print(\"Number of answers: \" +str(len(response.answers)))\n print(\"Number of authorities: \" + str(len(response.authorities)))\n print(\"Number of additionals: \" + str(len(response.additionals)))\n\n # Get data\n aliaslist = cnames\n ipaddrlist = []\n dnslist = []\n \n while response.answers:\n for answer in response.answers:\n if answer.type_ == Type.A:\n print(\"found A RR\")\n if(self.caching):\n rcache.add_record(answer)\n ipaddrlist.append(answer.rdata.address)\n if answer.type_ == Type.CNAME:\n aliaslist.append(answer.rdata.cname)\n if answer.type_ == Type.NS:\n dnslist.append(answer.rdata.nsdname)\n if ipaddrlist:\n return hostname, aliaslist, ipaddrlist\n elif aliaslist:\n question = Question(Name(aliaslist[0]), Type.A, Class.IN)\n query = Message(header, [question])\n sock.sendto(query.to_bytes(), (dnsserv, 53))\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n elif dnslist:\n nsname = dnslist.pop()\n maybe_dnsserv = self.getnsaddr(nsname, response.additionals)\n if maybe_dnsserv:\n dnsserv = maybe_dnsserv\n else:\n pass\n sock.sendto(query.to_bytes(), (dnsserv, 53))\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n else:\n break\n\n if response.authorities:\n for authority in response.authorities:\n if authority.type_ != Type.NS:\n pass\n dnslist.append(authority.rdata.nsdname)\n while dnslist:\n nsname = dnslist.pop()\n maybe_next_dnsserv = self.getnsaddr(nsname, response.additionals)\n if maybe_next_dnsserv:\n next_dns_serv = maybe_next_dnsserv\n else:\n pass\n (hname, aliasl, ipaddrl) = self.gethostbyname(hostname, nsname)\n if ipaddrl:\n return hname, aliasl, ipaddrl", "def get_dns_list(self):\n return self.get_ipv4_dns_list()", "def list_domain_names():\n pass", "def cb_listdomains(self, cmd):\n for cur in sorted(self.d.listDomains(),\n key=lambda x: _domreverse(x['domain'])):\n print \"%(domain)60s %(expiration_date)15s\" % cur", "def get_dns_records_from_godaddy(self) -> list:\n\n headers = {\"Authorization\": \"sso-key {}:{}\".format(self.api_key, self.secret_key)}\n dns_records = []\n for dns_record in self.dns_records:\n url = \"https://api.godaddy.com/v1/domains/{}/records/{}/{}\".format(dns_record[\"domain\"],\n dns_record[\"dns_record_type\"],\n dns_record[\"name\"])\n dns_records.append(get(url, headers=headers).text)\n return dns_records", "def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', '').get('hits', []):\n return\n for hit in resp.json()[\"hits\"][\"hits\"]:\n domain = hit.get(\"_source\", {}).get(\"domain\", \"\")\n if not domain:\n continue\n if not domain in domain_list:\n domain_list.append(domain)\n #print(hit[\"_source\"].get(\"title\", \"\").encode(\"ascii\",\"ignore\"))\n if domain not in ALLOWED_DOMAINS:\n print(domain)", "def cli(ctx, domain, ip_address, hostname):\n zone = getzone(domain)\n #print('.%s:%s:%s' % (domain, ip_address, hostname))\n for r in zone:\n if r['type'] == 'CNAME':\n print('C%s:%s' %( r['name'], r['content']))\n elif r['type'] == 'TXT':\n print('\\'%s:%s' %( r['name'], r['content']))\n elif r['type'] == 'MX':\n pass\n elif r['type'] == 'A':\n print('=%s:%s' %( r['name'], r['content']))\n else:\n exit('unknown DNS record type: %s' % r['type'])", "def create_dns_dictionary(self, path_tracefile):\n responses = self.get_dns_responses(path_tracefile)\n dns_dict = dict()\n for response in responses:\n for x in range(response[DNS].ancount): # answer count, how many IP adresses are returned for the query\n try: # answer count could also include 'DNS SRV Resource Record' which does not have a 'rrname' attribute so ancount is wrong if there is such a record -> TODO get amount of DNSRR instead of using ancount\n domain = getattr(response[DNSRR][x], 'rrname').decode(\"utf-8\") # domain (this is returned in bytes so decode)\n ip = getattr(response[DNSRR][x], 'rdata') # IP adres of the domain, TODO make this work for multiple ip adresses for one domain (Test with [0] at end)\n dns_dict[ip] = domain[:-1] #remove last char '.' \n except:\n continue\n return dns_dict", "def query_dns_records(event, context):\n ids = ['SOA', 'TXT', 'MX', 'NS', 'DNSKEY']\n dn = event['queryStringParameters'][query_parameter].lower()\n body = {'scanDate': (datetime.datetime.now(datetime.timezone.utc) +\n datetime.timedelta(hours=8)).isoformat().upper()[:26],\n 'scanRecordTypes': ids,\n 'domain': dn,\n 'records': {}}\n\n try:\n try:\n for record_type in ids:\n try:\n answers = dns.resolver.query(dn, record_type)\n records = []\n for data in answers:\n records.append(data.to_text())\n body['records'][record_type] = records\n except (dns.resolver.NoAnswer, dns.resolver.NoNameservers, dns.exception.Timeout):\n pass # might fail per record_type, perfectly fine\n\n # insert into DynamoDB\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(table_name)\n table.put_item(Item=body)\n status_code = 200\n result = json.dumps(body)\n\n except dns.resolver.NXDOMAIN:\n status_code = 404 # domain no longer exists, or domain not found :)\n result = ''\n\n except KeyError: # insufficient queryStringParameters\n status_code = 400\n result = ''\n\n return {'statusCode': status_code,\n 'headers': headers,\n 'body': result}", "def domain_command():\n # 1. Get input host from Demisto\n domain = demisto.args().get('domain')\n # 2. Get the host reputation from SlashNext API\n response = domain_lookup(domain=domain)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n dbot_score_cont, domain_cont = get_dbot_std_context(\n domain, 'Domain', response.get('threatData').get('verdict'), response.get('threatData').get('threatType'))\n\n snx_ioc_cont = get_snx_host_ioc_context(domain, 'Domain', response.get('threatData'))\n\n ec = {\n 'SlashNext.Domain(val.Value === obj.Value)': snx_ioc_cont,\n 'DBotScore': dbot_score_cont,\n 'Domain': domain_cont\n }\n\n domain = domain.encode('idna')\n\n title = 'SlashNext Phishing Incident Response - Domain Lookup\\n' \\\n '##### domain = {}'.format(domain.decode())\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont,\n ['Value',\n 'Type',\n 'Verdict',\n 'ThreatStatus',\n 'ThreatName',\n 'ThreatType',\n 'FirstSeen',\n 'LastSeen']\n )\n\n return_outputs(md, ec, snx_ioc_cont)", "def get_domains(filename):\n with open(filename, 'r') as file:\n result = []\n for line in file.readlines():\n domain = line.strip()[1:]\n result.append(domain)\n return result", "def domain_lookup(domain):\n # Create the required data dictionary for Host/Reputation\n api_data = {\n 'host': domain\n }\n response = http_request(endpoint=HOST_REPUTE_API, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return response", "def get_urls(self, queries):\n domains = defaultdict(list)\n for q in queries:\n q = \"\\\"\" + q + \"\\\"\"\n results = self.engine.search(q)\n\n for result in results: \n url = result.url\n domain = self.get_domain(url)\n domains[domain].append(q) \n return domains", "def get_domain(id):\n return query(WEB_EXAMPLE_BASE + f\"/classical/domain/{id}\")", "def fastlydomain(args):\n pprint(api.domain(service_id, args[0], args[1]).attrs)", "def dict_of_domains(fc):\r\n # need to find root database (GDB or SDE)\r\n db_root = os.path.dirname(fc)\r\n while db_root[-4:].lower() != '.gdb' and db_root[-4:].lower() != '.sde':\r\n old_db_root = db_root # protect against infinite loop\r\n db_root = os.path.dirname(db_root)\r\n if old_db_root == db_root: # protect against infinite loop\r\n break\r\n arcpy.AddMessage(\"Retrieving Domains from \" + str(db_root))\r\n return {domain.name: domain.codedValues for domain in arcpy.da.ListDomains(db_root)}", "def get_ds(self, domain: str, as_json: bool = False):\n formatted_answer = {'domain': domain, 'rr_types': [\"ds\"], 'answer': None}\n\n status, result = Resolver.ctx_dnssec.resolve(domain, rrtype=ub.RR_TYPE_DS)\n\n if status == 0 and result.havedata:\n print(\"ds record returned.\")\n formatted_answer['answer'] = {}\n ds_records_list = result.data.data\n i = 0\n for ds in ds_records_list:\n if as_json:\n formatted_answer['answer'][i] = str(ds)\n else:\n formatted_answer['answer'][i] = ds\n i += 0\n elif status != 0: # throw/raise error\n print(\"Resolve error: \", ub.ub_strerror(status))\n elif result.havedata == 0: # if no data in result\n print(\"No data.\")\n if as_json:\n return json.dumps(formatted_answer)\n return DNSFormattedResponse(formatted_answer)", "def get_dns(self):\n dns = []\n for id, user in self.users_by_id.items():\n if not user.dns:\n continue\n for dn in user.dns:\n dns.append(dn)\n return dns", "def fetch_domain_certs(domain):\n url = BASE_URL.format(domain)\n result = requests.get(url)\n if result.status_code != 200:\n result.raise_for_status()\n return result.json()", "def info(self):\n\n return self.call(method='getDomain', args=[self.domainname])", "def get_domains(self):\n\n response = self.call(method='getDomains')\n domains = []\n for d in response:\n domain = self.domain(domain=d['domain'])\n domains.append(domain)\n return domains", "def list_keystone_v3_domains(self):\n LOG_OBJ.debug(\"List the domains.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating domain\")\n print (\"No response from Server while creating domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Listing domains Failed with status %s \"\n \"and error : %s\" % response.status, response.data)\n print (\" Listing domains Failed with status %s and error : %s\" %\n response.status, response.data)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domains list : %s \" % output)\n print (\"Domains list : %s \" % output)\n return output['domains']", "def list_domain_names(self) -> Dict:\n pass", "def resolv(hostname):\n\n ips = list()\n\n # Create resolver object\n res = resolver.Resolver()\n\n # Choose the correct DNS servers\n # Blue DNS servers\n if hostname.startswith('b-'):\n res.nameservers = ['172.16.2.10', '172.16.2.11']\n # Green DNS servers\n elif hostname.startswith('g-'):\n res.nameservers = ['10.0.2.10', '10.0.2.11']\n # Default to white DNS servers\n else:\n res.nameservers = ['194.47.252.134', '194.47.252.135']\n\n # Query\n try:\n query = res.query(hostname)\n for answer in query:\n ips.append(answer.address)\n except resolver.NXDOMAIN:\n raise CouldNotResolv\n\n # Return query result\n return ips", "def getNodeDNS(self,node):\n data = self.connect('get','nodes/%s/dns' % (node),None)\n return data", "def test_dns(self):\n rv = extract_ids(X509_DNS_ONLY)\n assert [\n DNSPattern(b\"www.twistedmatrix.com\"),\n DNSPattern(b\"twistedmatrix.com\")\n ] == rv", "def _read_dns_(dns, cnt):\r\n \r\n dn_names = None\r\n dn_ids = None\r\n dn_iaps = [None]*10\r\n \r\n for dn in dns.DN:\r\n if dn.ref == 'Name':\r\n dn_names = dn.value\r\n if dn.ref == 'DNId':\r\n dn_ids = dn.value\r\n if dn.ref == 'IAP':\r\n dn_iaps[0] = dn.value\r\n if dn.ref == 'IAP2':\r\n dn_iaps[1] = dn.value\r\n if dn.ref == 'IAP3':\r\n dn_iaps[2] = dn.value\r\n if dn.ref == 'IAP4':\r\n dn_iaps[3] = dn.value\r\n if dn.ref == 'IAP5':\r\n dn_iaps[4] = dn.value\r\n if dn.ref == 'IAP6':\r\n dn_iaps[5] = dn.value\r\n if dn.ref == 'IAP7':\r\n dn_iaps[6] = dn.value\r\n if dn.ref == 'IAP8':\r\n dn_iaps[7] = dn.value\r\n if dn.ref == 'IAP9':\r\n dn_iaps[8] = dn.value\r\n if dn.ref == 'IAP10':\r\n dn_iaps[9] = dn.value\r\n \r\n logger.info('Parsed DN names: %s' % dn_names)\r\n logger.info('Parsed DN ids: %s' % dn_ids)\r\n logger.info('Parsed DN iaps: %s' % dn_iaps)\r\n \r\n for i in range(len(dn_names)):\r\n mydn = Dn()\r\n mydn.set_id(dn_ids[i])\r\n mydn.set_name(dn_names[i])\r\n myiaps = [None]*10\r\n for j in range(10):\r\n myiaps[j] = dn_iaps[j][i]\r\n mydn.set_iaps(myiaps)\r\n cnt.add_dn(mydn)\r\n return cnt", "def get_botnet_domains():\n\n fw = \"<HTTPS://YOUR_FORTIGATE_IP:YOUR_FORTIGATE_PORT>\"\n\n path = \"/api/v2/monitor/system/botnet-domains/hits/?access_token=\"\n\n token = \"<YOUR_API_KEY>\"\n\n content_filter = \"\"\n\n if content_filter != \"\":\n url = fw + path + token + content_filter\n else:\n url = fw + path + token\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n \n try:\n r = requests.get(url, verify=False).json()\n except Exception:\n print(\"Something went wrong. Is the url correct? Exiting...\")\n sys.exit()\n\n for key in r['results']:\n print()\n for k,v in key.items():\n print(\"{0:6} : {1}\".format(k.upper(), str(v)))", "def query(self, wireframe):\n\n headers = {\n 'content-type': 'application/dns-message'\n }\n\n dnsdata = dnslib.DNSRecord.parse(wireframe)\n dnsdomain = dnsdata.q.get_qname()\n qtype = dnslib.QTYPE.get(k=dnsdata.q.qtype)\n\n print(f\"Handling query: ({qtype}) {dnsdomain}\")\n\n retval = None\n domconfig = self.get_domain_config(globals.config.default, wireframe)\n\n if 'static' in domconfig:\n # handle \"static\" domain configuration\n\n if qtype in domconfig.static:\n # reply for static configured domain match\n d = dnsdata.reply()\n qanswer = domconfig.static[qtype]\n d.add_answer(*dnslib.RR.fromZone(f\"{dnsdomain} 60 {qtype} {qanswer}\"))\n\n d.header.id = dnsdata.header.id\n d.q.qtype = dnsdata.q.qtype\n d.header.qr = 1\n\n return d.pack()\n \n else:\n # return NXDOMAIN\n r = dnsdata.reply()\n r.header.rcode = dnslib.RCODE.NXDOMAIN\n return r.pack()\n\n for retries in range(0, domconfig.doh_max_retries):\n if domconfig.doh_url_select == \"random\":\n url = self.get_random_doh(domconfig.doh_urls)\n elif domconfig.doh_url_select == \"roundrobin\":\n url = self.get_roundrobin_doh(domconfig.doh_urls)\n else:\n print(\"Error, no DOH url select method\")\n r = dnsdata.reply()\n r.header.rcode = dnslib.RCODE.NXDOMAIN\n return r.pack()\n\n print(\"Using\", url)\n\n try:\n r = requests.post(url, headers=headers, data=wireframe, stream=True, verify=globals.config.service.check_doh_ssl)\n assert r.status_code == 200\n retval = r.content\n break\n\n except Exception as ex:\n print(\"Error requesting DOH: \", ex)\n continue\n\n return retval", "def test_getdnsrecords(self, kasserver):\n assert kasserver.get_dns_records(\"example.com\") == self.RESPONSE_PARSED", "def _get_records(self, domain, domain_id, record):\n for needed in [\"type\", \"source\", \"target\"]:\n if needed not in record:\n raise ValueError(\"{} not provided in record dict\".format(needed))\n\n if record[\"source\"] == \".\":\n fqdn = domain\n else:\n fqdn = \"{source}.{domain}\".format(source=record[\"source\"], domain=domain)\n return list(\n filter(\n lambda x: (\n x[\"source_idn\"] == fqdn\n and x[\"type\"] == record[\"type\"]\n and x[\"target\"] == record[\"target\"]\n ),\n self._get_request(\"/1/domain/{domain_id}/dns/record\".format(domain_id=domain_id)),\n )\n )", "def extract_domains(self, resp):\n return", "def run_whois(self,domain):\n try:\n who = whois.whois(domain)\n results = {}\n # Check if info was returned before proceeding because sometimes records are protected\n if who.registrar:\n results['domain_name'] = who.domain_name\n results['registrar'] = who.registrar\n results['expiration_date'] = who.expiration_date\n results['registrant'] = who.name\n results['org'] = who.org\n results['admin_email'] = who.emails[0]\n results['tech_email'] = who.emails[1]\n results['address'] = \"{}, {}{}, {}, {}\".format(who.address,who.city,who.zipcode,who.state,who.country)\n results['dnssec'] = who.dnssec\n else:\n click.secho(\"[*] WHOIS record for {} came back empty. You might try looking at dnsstuff.com.\".format(domain),fg=\"yellow\")\n return results\n except Exception as error:\n click.secho(\"[!] The WHOIS lookup for {} failed!\".format(domain),fg=\"red\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"red\")", "def domain_resolve(self, domain):\n a_result = server.resolve(domain, 'A')\n mx_result = server.resolve(domain, 'MX')\n ns_result = server.resolve(domain, 'NS')\n\n domain_data = {}\n domain_data.setdefault('resolving', {'A': a_result})\n domain_data['resolving']['MX'] = mx_result\n domain_data['resolving']['NS'] = ns_result\n\n return domain_data", "def lookupNameservers(self, name, timeout=None):\n name_is_self = name in [self.wildcard_domain, self.ns_domain]\n if name.endswith('.' + self.wildcard_domain) or name_is_self:\n # If we're responsible for this domain, return NS records\n payload = dns.Record_NS(name=self.ns_domain)\n answer = dns.RRHeader(name=name, type=dns.NS,\n payload=payload, auth=True, ttl=TTL)\n\n # Additional section: NS ip address\n additional_payload = dns.Record_A(address=self.my_ip)\n additional_answer = dns.RRHeader(name=name,\n payload=additional_payload, ttl=TTL)\n\n answers = [answer]\n authority = []\n additional = [additional_answer]\n\n return defer.succeed((answers, authority, additional))\n\n # fail for domains that are not handled by our server\n return defer.fail(failure.Failure(dns.AuthoritativeDomainError(name)))", "def dns_retentions(self):\n url_path = 'dns/retentions'\n self.logger.debug(\"Get possible retentions for '/dns' per each granularity\")\n return self._common_get(url_path)", "def items_by_domain(self, domain: str) -> List[dict]:\n if not self.connected:\n raise NotConnected(\"Please call connect first.\")\n return [value for key, value in self._states.items() if key.startswith(domain)]", "def _get_IP_addresses(hostname):\n try:\n answers, auth, addit = yield DNSclient.lookupAddress(hostname)\n except Exception as exc: # Too many different DNS failures to catch...\n log.exception('DNS Resolution failure: %r for name: %r', exc, hostname)\n returnValue([])\n\n returnValue(\n [answer.payload.dottedQuad()\n for answer in answers if answer.type == dns.A])", "def list_domain(self, feed_id=None):\n domains = self.list_resource(feed_id=feed_id,\n resource_type_id='Domain Host',\n cls=Domain,\n list_children=True,\n include_data=True)\n return domains", "def resolveOriginalDomains():\n print('[+] Populating Domain Name Resolution for later check ')\n\n try:\n for domain in domains:\n response = dns.resolver.query(domain)\n d = Domain_Poison_Check(domain)\n print('[+] Domain: %s' % domain)\n for record in response:\n print(' |____> maps to %s.' % (record.address))\n d.pushAddr(record)\n check_domain_poison_results.append(d)\n return time.time()\n except Exception as err:\n print('[+] Exception: %s' % err)\n traceback.print_exc()\n return time.time()", "def _fallback_get_mx_domains(domain):\n try:\n query = dns.message.make_query(domain, dns.rdatatype.MX)\n answers = dns.query.udp(query, GOOGLE_DNS_IP).answer[0]\n return [a for a in answers if a.rdtype == dns.rdatatype.MX]\n except Exception:\n return []", "def domain_info(self, domain):\n endpoint = '/Domain/Info'\n\n params = {\n 'Domain' : domain\n }\n\n response = self.__perform_get_request(endpoint, params)\n \n if response.status_code == 200:\n parsed_response = response.json()\n return parsed_response", "def _lv_dns_lookup(name):\n if dns is None:\n return _lv_pydns_lookup(name)\n resp = dns.resolver.query(name, \"srv\")\n if resp.response.flags & dns.flags.TC:\n resp = dns.resolver.query(name, \"srv\", tcp=True)\n return [(a.priority, a.weight, a.port, a.target.to_text(True)) for a in resp]", "def _lv_pydns_lookup(name):\n if not DNS.defaults[\"server\"]:\n DNS.DiscoverNameServers()\n req = DNS.Request(name=name, qtype=\"srv\", protocol=\"udp\")\n for retries_left in [3, 2, 1, 0]:\n try:\n response = req.req()\n if response and response.header[\"tc\"]:\n # truncated, rerun with tcp\n req = DNS.Request(name=name, qtype=\"srv\", protocol=\"tcp\")\n continue\n break\n except DNS.Base.DNSError:\n if not retries_left:\n raise\n time.sleep(1) # retry after sleeping a second\n if not response or not response.answers:\n return []\n result = []\n for a in response.answers:\n if a[\"typename\"].lower() != \"srv\":\n continue\n if isinstance(a[\"data\"], list):\n result.extend(a[\"data\"])\n else:\n result.append(a[\"data\"])\n return result", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfg_GetDnsServers', self.handle))", "def resolve(self,\n ns_servers: List[Dict[str, str]] = [{'IPv4 address': '8.8.8.8', 'MAC address': '01:23:45:67:89:0a'}],\n domain: str = 'google.com',\n subdomains_list: List[str] = ['www', 'mail', 'ns', 'test'],\n subdomains_file: Union[None, str] = None,\n subdomains_brute: bool = False,\n max_threats_count: int = 10,\n udp_destination_port: int = 53,\n timeout: int = 30) -> List[Dict[str, str]]:\n\n try:\n\n # region Clear results list\n self.index_of_dns_query = 0\n self.results.clear()\n self.uniq_hosts.clear()\n # endregion\n\n # region Set target domain\n assert not (domain == ''), \\\n 'Target domain is empty, please set target domain in this parameter: ' + self.base.info_text('domain')\n self.domain = domain\n # endregion\n\n # region Subdomains list\n if len(subdomains_list) > 0:\n self.subdomains = subdomains_list\n # endregion\n\n # region Subdomains file\n if subdomains_file is not None:\n assert isfile(subdomains_file), \\\n 'File with subdomain list:' + self.base.error_text(subdomains_file) + ' not found!'\n with open(subdomains_file) as subdomains_file_descriptor:\n for subdomain in subdomains_file_descriptor.read().splitlines():\n self.subdomains.append(subdomain)\n # endregion\n\n # region Subdomains brute\n if subdomains_brute:\n\n if not self.quiet:\n self.base.print_info('Make subdomains list for brute .... ')\n\n for character1 in RawDnsResolver.available_characters:\n self.subdomains.append(character1)\n for character2 in RawDnsResolver.available_characters:\n self.subdomains.append(character1 + character2)\n for character3 in RawDnsResolver.available_characters:\n self.subdomains.append(character1 + character2 + character3)\n # endregion\n\n # region Check length of subdomains list\n assert len(self.subdomains) != 0, \\\n 'List containing subdomains is empty, please set any of this parameters: ' \\\n + self.base.info_text('subdomain_list') + ' or ' \\\n + self.base.info_text('subdomain_file') + ' or ' \\\n + self.base.info_text('subdomain_brute')\n # endregion\n\n # region Create raw socket\n raw_socket: socket = socket(AF_PACKET, SOCK_RAW)\n raw_socket.bind((self.network_interface, 0))\n # endregion\n\n # region Truncate temporary results file\n temporary_results_file = open(RawDnsResolver.temporary_results_filename, 'r+')\n temporary_results_file.truncate()\n temporary_results_file.close()\n # endregion\n\n # region Sniff DNS answers\n if not self.quiet:\n self.base.print_info('Start DNS answers sniffer for domain: ', self.domain)\n\n threats: ThreadManager = ThreadManager(max_threats_count)\n self._sniff_start(self.your_mac_address, self.your_ipv4_address,\n self.your_ipv6_address, udp_destination_port)\n threats.add_task(self._sniff_check)\n # endregion\n\n # region Send DNS queries\n if not self.quiet:\n self.base.print_info('Start sending DNS queries, time: ', str(datetime.now()))\n\n self._send_queries(send_socket=raw_socket,\n source_mac_address=self.your_mac_address,\n source_ipv4_address=self.your_ipv4_address,\n source_ipv6_address=self.your_ipv6_address,\n domain=domain,\n ns_servers=ns_servers,\n destination_port=udp_destination_port,\n max_threats_count=int(max_threats_count) - 1,\n subdomains=self.subdomains)\n # endregion\n\n # region Timeout\n if not self.quiet:\n self.base.print_info('Wait timeout: ', str(timeout) + ' sec')\n sleep(timeout)\n # endregion\n\n # region Return results\n self._sniff_stop()\n if not self.quiet:\n if len(self.results) > 0:\n self.base.print_success('Found ', str(len(self.results)),\n ' subdomains and addresses for domain: ', self.domain)\n else:\n self.base.print_error('Not found subdomains in domain: ', self.domain)\n return self.results\n # endregion\n\n except AssertionError as Error:\n self.base.print_error(Error.args[0])\n exit(1)", "async def aio_rdap_domain_lookup(url: str, http_client: Optional[Any] = None) -> PyWhoIs:\n whois = await PyWhoIs._aio_rdap_domain_from_url(url, http_client)\n return whois", "def query_dns_server(packet):\n\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n except socket.error:\n print \"[Error]: Faild to create socket. Exiting...\"\n exit(1)\n\n # get DNS server IPs from dns_servers.conf file\n dns_servers = serverconf.read_file()\n # default port for DNS\n server_port = 53\n\n for server_ip in dns_servers:\n got_response = False\n\n # send message to server\n sock.sendto(packet, (server_ip, server_port))\n # receive answer\n recv = sock.recvfrom(1024)\n\n # if no answer is received, try another server\n if recv:\n got_response = True\n break\n\n # output error message if no server could respond\n if not got_response:\n print \"[Error]: No response received from server. Exiting...\"\n exit(0)\n\n return recv[0]", "def list_type_A_domain(self, domain):\n r53 = self.connections.get_route53()\n # Get Zone ID\n zone = r53.get_zone(domain)\n zone_id = zone.id\n # Get all type A records\n records = r53.get_all_rrsets(hosted_zone_id=zone_id, name='A')\n for record in records:\n print(record)", "def reverse_dns_sna(ipaddress):\n\n r = requests.get(\"http://api.statdns.com/x/%s\" % ipaddress)\n\n if r.status_code == 200:\n names = []\n\n for item in r.json()['answer']:\n name = str(item['rdata']).strip(\".\")\n names.append(name)\n\n return names\n elif r.json()['code'] == 503:\n # NXDOMAIN - no PTR record\n return None", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfgNet_GetDnsServers', self.handle))", "def list_domain(self, feed_id=None):\n resources = self.list_resource(feed_id=feed_id, resource_type_id='Host Controller')\n domains = []\n if resources:\n for resource in resources:\n resource_data = self.get_config_data(\n feed_id=resource.path.feed_id, resource_id=resource.id)\n domain_data = resource_data.value\n domains.append(Domain(resource.id, resource.name, resource.path, domain_data))\n return domains", "def get_dns_servers(self):\n self.__not_implemented()", "def query_records(self, context, rrs):\n records = self.dns_manager.query_records(context, rrs)\n return records", "def lookup(self, domain, get_last_full_query=True):\n\n data = {}\n\n try:\n\n last_full_builtwith_scan_date = None\n\n if self.api_version == 7 and isinstance(domain, list):\n domain = ','.join(domain)\n\n if self.api_version in [2, 7]:\n\n last_updates_resp = requests.get(ENDPOINTS_BY_API_VERSION[self.api_version], params={'UPDATE': 1})\n last_updated_data = last_updates_resp.json()\n\n if get_last_full_query and last_updated_data['FULL']:\n last_full_builtwith_scan_date = datetime.datetime.strptime(last_updated_data['FULL'],\n '%Y-%m-%d').date()\n print \"last_full_builtwith_scan_date >\", last_full_builtwith_scan_date\n\n params = {'KEY': self.key, 'LOOKUP': domain,}\n\n response = requests.get(ENDPOINTS_BY_API_VERSION[self.api_version], params=params)\n\n if self.api_version == 1:\n data = response.json()\n elif self.api_version == 2:\n data = BuiltWithDomainInfo(response.json(), last_full_builtwith_scan_date)\n elif self.api_version == 7:\n domain_info = list()\n for result in response.json()['Results']:\n domain_info.append(BuiltWithDomainInfo(result['Result'], last_full_builtwith_scan_date))\n return domain_info\n elif self.api_version == 12:\n data = response.json()\n\n except Exception as e:\n try:\n error = e.get(\"Message\")\n data[\"Errors\"] = error\n except Exception as error:\n data[\"Errors\"] = error\n\n return data", "def test_get_all_email_domains(self):\n email_dom2 = 'testgetall.com'\n org = 'o=%s' % (self.org_name)\n dn = '%s,%s' % (org, self.base_dn)\n dn_info = {'aenetPostfixDomain': [self.email_dom, email_dom2]}\n expected_result = [(dn, dn_info)] \n domain = SpokeEmailDomain(self.org_name)\n domain.create(email_dom2)\n result = domain.get()['data']\n self.assertEqual(result, expected_result)", "def parse(domains):\n subdomains = []\n for domain in domains:\n url = 'https://urlscan.io/api/v1/search/?q=domain:{}'.format(domain)\n json_resp = json.loads(requests.get(url).text)\n subdomains += list(set(find('domain', json_resp)))\n return list(set(subdomains))", "def dns_entry(self, msg):\n if msg['message'].find('Calling getaddrinfo') > -1:\n match = re.search(r'Calling getaddrinfo for host \\[(?P<host>[^\\]]+)\\]', msg['message'])\n if match:\n hostname = match.groupdict().get('host')\n if hostname not in self.dns:\n self.dns[hostname] = {'start': msg['timestamp']}\n elif msg['message'].find('lookup completed for host') > -1:\n match = re.search(r'lookup completed for host \\[(?P<host>[^\\]]+)\\]', msg['message'])\n if match:\n hostname = match.groupdict().get('host')\n if hostname in self.dns and 'end' not in self.dns[hostname]:\n self.dns[hostname]['end'] = msg['timestamp']", "def _get_domain(self):\n self.ensure_one()\n domain = []\n return domain", "def get_dns(self) -> Set:\n if self.dn_set.should_update():\n contacts_data = self.get_contacts_data()\n self.dn_set.update(set(contacts_data.get_dns()))\n return self.dn_set.data", "def recursive_dns_lookup(target_name, qtype, root_servers_list):\n\n # Base case\n if not root_servers_list:\n return None\n\n # Create dns query based on the target_name (website)\n # and qtype (queue type: CNAME, A, AAAA, or MX)\n dns_query = dns.message.make_query(target_name, qtype)\n\n for server in root_servers_list:\n # Doing a try catch to check if the dns server times out,\n # if it does then we continue and try another server\n try:\n query_response = dns.query.udp(dns_query, server, 3)\n except dns.exception.Timeout:\n continue\n # If there's an answer in the response\n if query_response.answer:\n # Search through the response.answer for possible answers\n for response_answers in query_response.answer:\n #print(\"response_answers: \", response_answers)\n for response_answer in response_answers:\n #print(\"Response_answer\", response_answer)\n target_name = str(response_answer)[:-1] # Removes the period at the end\n #print(\"Target_name\", target_name)\n # If we don't get the reponse we're after then\n # continue searching through the root_servers\n if response_answer.rdtype != qtype:\n if response_answer.rdtype == 5:\n return recursive_dns_lookup(target_name, qtype, ROOT_SERVERS)\n else:\n # Return the answer we wanted\n return query_response\n else: # If there isn't an answer in the response then we check additional\n\n # If we do have something in additional then get the stuff inside\n if query_response.additional:\n ip_addresses = []\n for response_additional in query_response.additional:\n #print(\"response_additional: \", response_additional)\n # Convert to string then send to function for parsing the address out\n response_additional_str = str(response_additional)\n\n #print(\"function get_address resp:\", resp)\n resp_elements = response_additional_str.split()\n #print(\"function get_address resp_elements:\", resp_elements)\n ip_address = []\n for resp_element in resp_elements:\n #print(\"function get_address resp_element:\", resp_element)\n if resp_element != 'A':\n continue\n else:\n #print(\"function get_address resp_element = A:\", resp_element)\n #print(\"function get_address address:\", resp_elements[-1])\n ip_address.append(resp_elements[-1])\n ip_addresses += ip_address\n\n return recursive_dns_lookup(target_name, qtype, ip_addresses)", "def _get_ip_addresses_for_domain(self, domain):\n result = []\n\n if platform.system() != \"Linux\":\n # Only Linux is supported atm\n return result\n\n if \"///\" not in self._uri:\n # Only local libvirtd is supported atm\n return result\n\n mac_addresses = self._get_mac_addresses_for_domain(domain=domain)\n\n arp_table = {}\n try:\n cmd = [\"arp\", \"-an\"]\n child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, _ = child.communicate()\n arp_table = self._parse_ip_table_arp(arp_output=stdout)\n except OSError as e:\n if e.errno == 2:\n cmd = [\"ip\", \"neigh\"]\n child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, _ = child.communicate()\n arp_table = self._parse_ip_table_neigh(ip_output=stdout)\n\n for mac_address in mac_addresses:\n if mac_address in arp_table:\n ip_addresses = arp_table[mac_address]\n result.extend(ip_addresses)\n\n return result", "def by_domains(self):\n\t\t\n\t\t# TODO: use urllib instead\n\t\turl_format = r'^\\s*(?:(?P<protocol>\\w+)://)?(?P<domain>[\\w\\d\\-\\.]+)(?::(?P<port>\\d+))?/?(?P<everything_else>.*)$'\n\t\tsites = {}\n\t\tfor line in self.source.lines:\n\t\t\ttry:\n\t\t\t\tif self.filter(line):\n\t\t\t\t\tresult = re.match(url_format, line.content.url)\n\t\t\t\t\tif result.group('domain') not in sites.keys():\n\t\t\t\t\t\tsites[result.group('domain')] = 0\n\t\t\t\t\tsites[result.group('domain')] += int(line.content.size)\n\t\t\texcept AttributeError:\n\t\t\t\tpass\n\t\t\n\t\t# TODO: sort; convert to lists is even better\n\t\t\n\t\treturn sites", "def listDomains(self):\n reply = self.rpc.getDomains(self.username,\n self.password)\n if reply[0] == 'UNKNOWN_ERROR':\n raise Exception(\"RPC returned error: \" + reply[0])\n return reply", "def public_ip_dns(resolv, nameservers, rdatatype, server, responsetype):\n for ns in nameservers:\n try:\n answer = resolv.query(ns, rdatatype)\n nameserver = answer[0].to_text()\n except Exception as e:\n print(e)\n continue\n resolve_public_ip(nameserver, server, responsetype)", "def getdns(self):\r\n filename = r\"dns_profiles.txt\"\r\n fp = open(filename)\r\n data = []\r\n for lines in fp.readlines():\r\n data.append(list(map(float, lines.split())))\r\n #use the fundamental string function 'append','split' to extract floating point number\r\n fp.close()\r\n dns_data = np.array(data) #transfer list to array\r\n self.dns_z = dns_data[:, 0] / 1000 #z-plus -> z/h\r\n self.dns_u = dns_data[:, 1] # u-plus\r\n self.dns_uw = dns_data[:, 2]\r\n self.dns_uu = dns_data[:, 3]\r\n self.dns_ww = dns_data[:, 4]\r\n self.dns_vv = dns_data[:, 5]\r\n self.dns_tau = dns_data[:, 7]\r\n self.dns_tot = dns_data[:, 8]", "def return_domains(hostname, username):\n myconnection = ssh_connection(hostname, username)\n if myconnection == 1:\n return \"Connection to %s failed\" % hostname\n else:\n # Send the command (non-blocking)\n stdin, stdout, stderr = myconnection.exec_command(\"sudo /usr/sbin/postconf -P */unix/syslog_name | cut -d '/' -f 1\")\n\n #On récupère la sortie standard\n out=stdout.read().splitlines()\n\n if not out:\n return \"No domains for this hostname\"\n else:\n #On retourne la liste des domaines\n return out\n # Disconnect from the host\n myconnection.close()", "def getDomain(self, *args, **kwargs):\n\n return_json = dict()\n jdatas = list()\n try:\n result, name = is_file(kwargs.get('value')[0])\n if result:\n jdatas = [load_file(name)]\n kwargs['dump'] = False\n md5_hash = ''\n\n except IndexError:\n print('[-] Something going wrong')\n return\n\n if not jdatas:\n if isinstance(kwargs.get('value'), list) and len(kwargs.get('value')) == 1 and \\\n os.path.exists(kwargs.get(\"value\")[0]) and kwargs.get(\"value\")[0].endswith(\".txt\"):\n kwargs[\"value\"] = [domain.strip() for domain in open(kwargs.get(\"value\")[0], \"rb\").readlines()]\n elif isinstance(kwargs.get('value'), six.string_types):\n kwargs['value'] = [kwargs.get('value')]\n\n kwargs['value'] = [urlparse(domain).netloc.lower() if domain.startswith(('http://', 'https://')) else domain for domain in kwargs.get('value')]\n\n url = self.base.format('domains/')\n\n for domain in kwargs.get('value'):\n url = self.base.format('domains/{}'.format(domain))\n if kwargs.get('domain_post_comments'):\n url += '/comments'\n method = 'post'\n data = '{\"data\": {\"type\": \"comment\", \"attributes\": {\"text\": \"Lorem ipsum dolor sit ...\"}}}'\n elif kwargs.get('domain_get_comments'):\n url += '/comments'\n method = 'get'\n else:\n #url += '/' + kwargs['domain_get_relationships']\n self.params[\"relationships\"] = 'communicating_files,downloaded_files,graphs,referrer_files,resolutions,siblings,subdomains,urls'\n method = \"get\"\n jdata, response = get_response(url, apikey=self.apikey, method=method, params=self.params)\n jdatas.append((domain, jdata))\n\n if kwargs.get('return_raw'):\n return jdatas\n\n for domain, jdata in jdatas:\n if jdata.get('data'):\n jdata = jdata['data']\n\n if not (kwargs.get('return_json') or kwargs.get('return_raw')) and kwargs.get('verbose'):\n print('\\n[+] Domain:', domain)\n\n single_dict = (\n 'TrendMicro category',\n 'Dr.Web category',\n 'BitDefender category',\n 'Websense ThreatSeeker category',\n 'Alexa category',\n 'Alexa domain info',\n 'Alexa rank',\n 'Opera domain info',\n 'subdomains',\n 'siblings',\n )\n\n complicated_dict = (\n 'WOT domain info',\n 'Webutation domain info',\n )\n\n for key in single_dict:\n if jdata.get(key) and ((kwargs.get(key) or key in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json.update({key: jdata[key]})\n else:\n self.print_key(key)\n if isinstance(jdata[key], list):\n print('\\t', '\\n\\t'.join(jdata[key]))\n else:\n print('\\t{0}'.format(jdata[key]))\n\n for key in complicated_dict:\n if jdata.get(key) and ((kwargs.get(key) or key in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json.update({key: jdata[key]})\n else:\n self.__print_complex_dict(jdata, key, kwargs)\n\n if jdata['attributes'].get('whois') and ((kwargs.get('whois') or 'whois' in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json.update({'whois': jdata['attributes']['whois']})\n else:\n print('\\n[+] Whois data:\\n')\n try:\n print('\\t', jdata['attributes']['whois'].replace('\\n', '\\n\\t'))\n except:\n try:\n print('\\t', jdata['attributes']['whois'].encode('utf-8', 'replace').replace('\\n', '\\n\\t'))\n except:\n print('Old version of python has some problems with converting chars to ansii')\n\n self._print_complex_dict(jdata['attributes'], 'categories')\n self.__parse_relationships(jdata['relationships'], domain)\n if kwargs.get(\"domain_get_comments\", False) is True:\n simple_list = (\n \"date\",\n \"tags\",\n \"text\",\n \"votes\",\n \"links\"\n )\n for block in jdata:\n print(\"[+] Comment ID: {}\".format(block[\"id\"]))\n for key in simple_list:\n if block.get(key) and ((kwargs.get(key) or key in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json.update({key: block[\"attributes\"][key]})\n else:\n self.print_key(key, indent='', separator='\\t[+]')\n if key == \"date\":\n print('\\t', datetime_from_timestamp(block.get(key)))\n else:\n print('\\t', block.get(key))\n\n # ToDo\n #elif kwargs.get(\"post_post_comments\", False) is True:\n\n elif kwargs.get('domain_get_relationships', False):\n self._print_complex_dict(jdata['attributes'], 'categories')\n self.__parse_relationships(jdata['relationships'], domain)\n \"\"\"\n simple_list = (\n \"url\",\n \"last_final_url\",\n \"tags\",\n \"total_votes\",\n \"last_analysis_date\",\n \"last_analysis_stats\",\n )\n for block in jdata['attributes']:\n print(block)\n for key in simple_list:\n if block.get(key, \"\") and ((kwargs.get(key) or key in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json.update({key:block[key]})\n else:\n self.print_key(key, indent='', separator='\\t[+]')\n if key == \"last_analysis_date\":\n print('\\t', datetime_from_timestamp(block.get(key)))\n else:\n print('\\t', block.get(key))\n #[{u'attributes': {u'total_votes': {u'harmless': 0, u'malicious': 0}, u'last_final_url': u'https://msg3.club/', u'tags': [], u'url': u'https://msg3.club/', u'last_analysis_date': 1551639858, u'last_analysis_stats': {u'harmless': 57, u'malicious': 1, u'suspicious': 0, u'undetected': 8, u'timeout': 0}, u'first_submission_date': 1551639858,\n self.last_analysis_results(block, args, kwargs)\n \"\"\"\n\n if kwargs.get('return_json'):\n return_json.update(self.__detected_samples(jdata, *args, **kwargs))\n else:\n return_json = self.__detected_samples(jdata, *args, **kwargs)\n\n if jdata.get('pcaps') and ((kwargs.get('pcaps') or 'pcaps' in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json.update({'pcaps': jdata['pcaps']})\n else:\n print('\\n')\n pretty_print(jdata['pcaps'], ['pcaps'], [70], ['c'], kwargs.get('email_template'))\n\n if jdata.get('resolutions') and ((kwargs.get('resolutions') or 'resolutions' in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json.update({'passive_dns': jdata['resolutions']['data']})\n else:\n print('\\n[+] Passive DNS replication\\n')\n pretty_print(jdata['resolutions']['data'],\n ['ip_address', 'type'],\n [25, 20],\n ['c', 'c'],\n kwargs.get('email_template')\n )\n\n if kwargs.get('walk') and jdata.get('resolutions', {}).get(\"data\", []):\n filter_ip = list()\n for ip in jdata['resolutions']['data']:\n ip = ip['id'].replace(domain, '')\n if ip not in filter_ip:\n print('\\n\\n[+] Checking data for ip: {0}'.format(ip))\n kwargs['value'] = ip\n self.getIP(**kwargs)\n\n if kwargs.get('dump') is True:\n md5_hash = hashlib.md5(name.encode(\"utf-8\")).hexdigest()\n jsondump(jdata, md5_hash)\n\n if kwargs.get('return_json'):\n return return_json", "def get_dns(self):\n return self.mycam.devicemgmt.GetDNS()", "def get_delta_domains():\n url = os.getenv('DELTAS_URL')\n if url is None:\n raise Exception('Delta report URL configuration not set!')\n\n json = requests.get(url, timeout=10).json()\n return [domain\n for (domain,)\n in json['values']\n if dnstwist.is_valid_domain(domain)]", "def group_by_domain(hash_entries):\n entries = (get_entry(h) for h in hash_entries)\n domains = {}\n for e in entries:\n domains[e['url_domain']] = domains.get(e['url_domain']) or []\n domains[e['url_domain']].append(e)\n return [{'domain': name, 'entries': ent} for name, ent in domains.items()]", "def domainlist_reversewhois(self, response):\n data = response.json()\n for domain in data['response']['domains']:\n yield(domain.lower())", "def rdap_domain_lookup(url: str, http_client: Optional[Any] = None) -> PyWhoIs:\n whois = PyWhoIs._rdap_domain_from_url(url, http_client)\n return whois", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevNet_GetDnsServers', self.handle))", "def domainnames(l):\n mapping = {}\n # locate all the samba domains in the ldap\n r = l.search_s('dc=elex', ldap.SCOPE_SUBTREE, '(objectClass=sambaDomain)', ['sambaDomainName','sambaSID'])\n for dn, entry in r:\n mapping[dn] = (entry['sambaDomainName'][0], entry['sambaSID'][0])\n return mapping", "def dns(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns\")", "def get_name_servers(self, \n ipv4_gateway_mac: str = '01:23:45:67:89:0a',\n ipv6_gateway_mac: str = '01:23:45:67:89:0b',\n domain: str = 'google.com') -> List[Dict[str, str]]:\n\n # region Clear results list\n ns_servers: List[Dict[str, str]] = list()\n self.results.clear()\n # endregion\n\n # region Start sniffer\n if not self.quiet:\n self.base.print_info('Get NS records of domain: ' + domain + ' ...')\n self._sniff_start(self.your_mac_address, self.your_ipv4_address, self.your_ipv6_address, 53)\n # endregion\n\n # region Send DNS queries\n raw_socket: socket = socket(AF_PACKET, SOCK_RAW)\n raw_socket.bind((self.network_interface, 0))\n\n name_servers_addresses = self.base.get_system_name_servers()\n for name_server_address in name_servers_addresses:\n if self.base.ip_address_validation(name_server_address):\n if self.base.ip_address_in_network(name_server_address, self.your_ipv4_network):\n name_server_mac: str = self.arp_scan.get_mac_address(self.network_interface, name_server_address)\n else:\n name_server_mac: str = ipv4_gateway_mac\n dns_query = self.dns.make_ns_query(ethernet_src_mac=self.your_mac_address,\n ethernet_dst_mac=name_server_mac,\n ip_src=self.your_ipv4_address,\n ip_dst=name_server_address,\n udp_src_port=randint(2049, 65535),\n udp_dst_port=53,\n transaction_id=randint(1, 65535),\n name=domain)\n raw_socket.send(dns_query)\n # endregion\n\n # region Resolve NS servers\n sleep(5)\n self._sniff_stop()\n\n ns_servers_names: List[str] = list()\n ns_servers_addresses: List[str] = list()\n\n for ns_server in self.results:\n ns_servers_names.append(ns_server['NS'])\n\n for ns_server_name in ns_servers_names:\n try:\n ns_server_addresses = gethostbyname_ex(ns_server_name)\n if len(ns_server_addresses) > 0:\n for ns_server_address in ns_server_addresses[2]:\n if ns_server_address not in ns_servers_addresses:\n ns_servers_addresses.append(ns_server_address)\n except herror:\n pass\n\n for ns_server_address in ns_servers_addresses:\n if self.base.ip_address_validation(ns_server_address):\n ns_servers.append({'IPv4 address': ns_server_address,\n 'MAC address': ipv4_gateway_mac})\n if self.base.ipv6_address_validation(ns_server_address):\n ns_servers.append({'IPv6 address': ns_server_address,\n 'MAC address': ipv6_gateway_mac})\n\n return ns_servers\n # endregion", "def get_domain_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/domain\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def main():\n argument_parser = argparse.ArgumentParser()\n argument_parser.add_argument(\"name\", nargs=\"+\",\n help=\"DNS name(s) to look up\")\n argument_parser.add_argument(\"-v\", \"--verbose\",\n help=\"increase output verbosity\",\n action=\"store_true\")\n program_args = argument_parser.parse_args()\n fuckall = []\n for a_domain_name in program_args.name:\n if a_domain_name not in fuckall:\n print_results(collect_results(a_domain_name))\n fuckall.append(a_domain_name)", "def queue_dns_lookups(ips):\n loop = asyncio.get_event_loop()\n resolver = aiodns.DNSResolver(loop=loop)\n if settings.CUSTOM_DNS_SERVERS and settings.DNS_SERVERS:\n resolver.nameservers = settings.DNS_SERVERS\n queue = asyncio.gather(*(reverse_lookup(resolver, ip) for ip in ips))\n results = loop.run_until_complete(queue)\n return results", "def domain_list_all(self):\n page = 1\n on_page = 100\n ret = []\n while True:\n r = self.domain_list(page=page, on_page=on_page)\n ret += r['domains']\n if len(ret) >= r['total']:\n break\n page += 1\n return ret", "def whois_parsed(self, domain):\n return self.apiquery('/v1/{}/whois/parsed/'.format(domain))", "def get_dns_info(self, name_or_ip) :\n self._logger.debug(\"get_dns_info: entering with name_or_ip=%s\" % \\\n (name_or_ip))\n if not is_name(name_or_ip) : # check for matching ipaddress\n for hostname in afs.CONFIG.hosts :\n if name_or_ip in afs.CONFIG.hosts[hostname] :\n self._logger.debug(\"%s is hard-mapped to (%s,%s)\" % \\\n (name_or_ip, [hostname,],afs.CONFIG.hosts[hostname]))\n self._logger.debug(\"returning %s\" % ({ \"names\" : [hostname, ], \\\n \"ipaddrs\" : afs.CONFIG.hosts[hostname] }) )\n return { \"names\" : [hostname, ], \\\n \"ipaddrs\" : afs.CONFIG.hosts[hostname] }\n\n # is a hostname\n \n # hard-mapped, primary Hostname given \n if name_or_ip in afs.CONFIG.hosts.keys() :\n self._logger.debug(\"%s is hard-mapped to (%s,%s)\" % ( name_or_ip, \\\n [name_or_ip, ], afs.CONFIG.hosts[name_or_ip]))\n self._logger.debug(\"returning %s\" % ({\"names\" : [name_or_ip,], \"ipaddrs\" : \\\n afs.CONFIG.hosts[name_or_ip] }) )\n return {\"names\" : [name_or_ip,], \"ipaddrs\" : \\\n afs.CONFIG.hosts[name_or_ip] }\n\n \n # memory_cache \n if name_or_ip in self.memory_cache[\"dns_info\"] :\n self._logger.debug(\"%s in localcache hard-mapped (%s)\" % \\\n (name_or_ip,self.memory_cache[\"dns_info\"][name_or_ip] ))\n self._logger.debug(\"returning %s\" % (self.memory_cache[\"dns_info\"][name_or_ip]))\n return self.memory_cache[\"dns_info\"][name_or_ip]\n \n for srv in self.memory_cache[\"dns_info\"] :\n if name_or_ip in self.memory_cache[\"dns_info\"][srv][\"names\"] :\n self._logger.debug(\"%s is hard-mapped to %s\" % (name_or_ip, \\\n self.memory_cache[\"dns_info\"][srv] ))\n self._logger.debug(\"returning %s\" % (self.memory_cache[\"dns_info\"][srv]) )\n return self.memory_cache[\"dns_info\"][srv]\n\n # lookup from OS\n \n try : \n dns_info = socket.gethostbyaddr(name_or_ip)\n servernames = [dns_info[0]] + dns_info[1]\n ipaddrs = dns_info[2]\n except socket.gaierror :\n if is_name(name_or_ip) :\n raise LookupUtilError(\"Cannot resolve %s\" % name_or_ip)\n else :\n self._logger.warn(\"Cannot resolve %s\" % name_or_ip)\n self._logger.debug(\"returning %s\" % ({\"names\": [], \"ipaddrs\" : [name_or_ip,]}) )\n return {\"names\": [], \"ipaddrs\" : [name_or_ip,]}\n\n\n self._logger.debug(\"%s resolves to %s\" % (name_or_ip, dns_info)) \n # check if resolved ip-address matches (if hostalias was used)\n for hostname in afs.CONFIG.hosts :\n for ipaddr in ipaddrs :\n # ignore IP if we're asked to do so.\n if ipaddr in afs.CONFIG.ignoreIPList : continue\n if ipaddr in afs.CONFIG.hosts[hostname] :\n self._logger.debug(\"%s is hard-mapped to (%s,%s)\" % \\\n (ipaddrs, [hostname,],afs.CONFIG.hosts[hostname]))\n # add this hostalias to list in memory_cache\n if self.memory_cache[\"dns_info\"].has_key(hostname) :\n self.memory_cache[\"dns_info\"][hostname][\"names\"] = \\\n [hostname, ]\n self.memory_cache[\"dns_info\"][hostname][\"ipaddrs\"] = \\\n afs.CONFIG.hosts[hostname]\n else :\n self.memory_cache[\"dns_info\"][hostname] = { \\\n \"names\" : [hostname,], \\\n \"ipaddrs\" : afs.CONFIG.hosts[hostname]}\n self._logger.debug(\"memory_cache = %s\" % \\\n (self.memory_cache))\n ipaddrs = []\n self._logger.debug(\"returning %s\" % ({ \"names\" : [hostname], \"ipaddrs\" : \\\n afs.CONFIG.hosts[hostname] }) )\n return { \"names\" : [hostname], \"ipaddrs\" : \\\n afs.CONFIG.hosts[hostname] }\n\n if \"nxdomain\" in servernames[0] : \n raise LookupUtilError(\"cannot resolve DNS-entry %s\" % name_or_ip)\n # fill up localcache\n self.memory_cache[\"dns_info\"][servernames[0]] = { \\\n \"names\" : servernames, \"ipaddrs\" : ipaddrs }\n self._logger.debug(\"memory_cache = %s\" % (self.memory_cache))\n self._logger.debug(\"returning %s\" % ({\"names\": servernames, \"ipaddrs\" : ipaddrs}) )\n return {\"names\": servernames, \"ipaddrs\" : ipaddrs}", "def list_all():\n\n return (_conn.listDefinedDomains() +\n [_conn.lookupByID(id).name() for id in _conn.listDomainsID()])", "def get_input_domains():\n df = pandas.read_excel(\"AutoScrapy/files/EBE21 - Top 100 Onlineshops to scrapp.ods\", engine=\"odf\")\n list_of_addresses = df['Domain'].to_list()\n list_of_addresses = [(\"http://\" + address) for address in list_of_addresses]\n print(list_of_addresses)\n return list_of_addresses", "def get_domain(self, row_id):\n cursor = self.connection.cursor()\n cursor.execute(\"\"\"\n SELECT domain FROM queries WHERE rowid=(?);\n \"\"\", (row_id,))\n return cursor.fetchone()[0]", "def tracking_domain_list(self):\r\n params = base.get_params(None, locals())\r\n return self._get('tracking_domain_list', params)", "def custom_dns_resolver(hostname, type='A'):\n nameservers = globals.config.service.initial_dns\n custom_resolver = dns.resolver.Resolver()\n custom_resolver.nameservers = nameservers\n answer = custom_resolver.query(hostname, type)\n\n return str(random.choice(answer))", "def test_getdnsrecord(self, kasserver):\n assert (\n kasserver.get_dns_record(\"www.example.com\", \"A\") == self.RESPONSE_PARSED[0]\n )", "def get_domains() -> List[str]:\n ret = _call_endpoint(\"v1/domains\")\n # Example response:\n # [{'createdAt': '2016-06-25T03:08:44.000Z',\n # 'domain': 'mydomain.com',\n # 'domainId': 12345678,\n # 'expirationProtected': False,\n # 'expires': '2020-06-25T03:08:44.000Z',\n # 'holdRegistrar': False,\n # 'locked': True,\n # 'nameServers': None,\n # 'privacy': False,\n # 'renewAuto': True,\n # 'renewDeadline': '2020-08-09T03:08:44.000Z',\n # 'renewable': True,\n # 'status': 'ACTIVE',\n # 'transferProtected': False},]\n domains = [d[\"domain\"] for d in ret]\n return domains", "def case_search_enabled_domains():\n return CaseSearchConfig.objects.filter(enabled=True).values_list('domain', flat=True)", "def select_domain_by_id(conn, domainid):\n # open a cursor\n cur = conn.cursor()\n # sql\n sql = \"select * from domain where domainid > ?\"\n # execute the sql with bind parameters\n cur.execute(sql, (domainid,))\n # result of the query. The result is a list of rows\n # example [(1, 'domain1', 'daily'), (2, 'domain2', 'daily')]\n rows = cur.fetchall()\n return rows", "def find_dist(domain):\n print(DIST_MANAGER.find_matching_dist(domain))" ]
[ "0.754245", "0.69325036", "0.6888839", "0.6870619", "0.68629503", "0.6824631", "0.67168874", "0.66804457", "0.6670255", "0.6651589", "0.664277", "0.66395545", "0.66219234", "0.6609759", "0.6591403", "0.65406996", "0.6507059", "0.64893925", "0.647041", "0.64417464", "0.6440061", "0.64374006", "0.6430975", "0.63384515", "0.6335258", "0.63166475", "0.63051164", "0.63046676", "0.6266443", "0.6263354", "0.6230192", "0.6222976", "0.6212619", "0.62007797", "0.61968434", "0.6183697", "0.61787254", "0.61736244", "0.614425", "0.61313915", "0.6117595", "0.6104569", "0.6098173", "0.6097673", "0.6086641", "0.60776055", "0.60701543", "0.60551655", "0.6044288", "0.6043047", "0.60392207", "0.6033793", "0.6030518", "0.6016295", "0.60120046", "0.60018337", "0.5997935", "0.5979751", "0.59697664", "0.5969732", "0.5968344", "0.596562", "0.59557897", "0.5947806", "0.5947252", "0.5943935", "0.5942619", "0.5936178", "0.5933918", "0.5926861", "0.5912996", "0.58955115", "0.58946764", "0.58921885", "0.58883613", "0.5881719", "0.58803636", "0.58640724", "0.58612657", "0.58597803", "0.5859046", "0.58521765", "0.58318305", "0.58038116", "0.5798926", "0.5776378", "0.5775573", "0.5771805", "0.5768807", "0.5758075", "0.57564545", "0.57521284", "0.5750873", "0.57491934", "0.5746706", "0.5746599", "0.573445", "0.5719419", "0.5716817", "0.57145256" ]
0.7584279
0
Print each domain and its DNS records (for domains linked to this API key).
def print_all_dns_records(): for domain in sorted(get_domains()): dns_records = get_domain_dns_records(domain) print(domain) pprint(dns_records) print("*" * 50) # TODO: poor man's rate limiter. improve? time.sleep(2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cb_listdomains(self, cmd):\n for cur in sorted(self.d.listDomains(),\n key=lambda x: _domreverse(x['domain'])):\n print \"%(domain)60s %(expiration_date)15s\" % cur", "def cli(ctx, domain, ip_address, hostname):\n zone = getzone(domain)\n #print('.%s:%s:%s' % (domain, ip_address, hostname))\n for r in zone:\n if r['type'] == 'CNAME':\n print('C%s:%s' %( r['name'], r['content']))\n elif r['type'] == 'TXT':\n print('\\'%s:%s' %( r['name'], r['content']))\n elif r['type'] == 'MX':\n pass\n elif r['type'] == 'A':\n print('=%s:%s' %( r['name'], r['content']))\n else:\n exit('unknown DNS record type: %s' % r['type'])", "def get_dns_records_from_godaddy(self) -> list:\n\n headers = {\"Authorization\": \"sso-key {}:{}\".format(self.api_key, self.secret_key)}\n dns_records = []\n for dns_record in self.dns_records:\n url = \"https://api.godaddy.com/v1/domains/{}/records/{}/{}\".format(dns_record[\"domain\"],\n dns_record[\"dns_record_type\"],\n dns_record[\"name\"])\n dns_records.append(get(url, headers=headers).text)\n return dns_records", "def get_domain_dns_records(domain):\n url_suffix = \"v1/domains/{}/records\".format(domain)\n ret = _call_endpoint(url_suffix)\n if isinstance(ret, dict) and ret.get('code', None) == \"UNKNOWN_DOMAIN\":\n # e.g. {'code': 'UNKNOWN_DOMAIN', 'message': 'The given domain is not registered, or does not have a zone file'}\n raise Exception(f\"Can't find domain {domain}. Are you sure your API key and secret are correct?: {ret}\")\n return ret", "def print_domain(self):\n print('\\n*****************')\n print('DOMAIN: ' + self.domain)\n print('REQUIREMENTS: ' + str(self.requirements))\n print('TYPES: ' + str(self.types))\n print('PREDICATES: ' + str(self.predicates))\n print('ACTIONS: ' + str(self.actions))\n print('FUNCTIONS: ' + str(self.functions))\n print('CONSTANTS: ' + str(self.constants))\n print('****************')", "def list(self, domain):\n return request(\n API_LIST.DNS_LIST.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'domain': domain\n }\n )", "def show_domains(self):\n show_domains(self.system.cavity_gri)", "def test_getdnsrecords(self, kasserver):\n assert kasserver.get_dns_records(\"example.com\") == self.RESPONSE_PARSED", "def list_domain_names(self) -> Dict:\n pass", "def list_domain_names():\n pass", "def get_dns_list(self):\n return self.get_ipv4_dns_list()", "def list_type_A_domain(self, domain):\n r53 = self.connections.get_route53()\n # Get Zone ID\n zone = r53.get_zone(domain)\n zone_id = zone.id\n # Get all type A records\n records = r53.get_all_rrsets(hosted_zone_id=zone_id, name='A')\n for record in records:\n print(record)", "def get_dns(self):\n dns = []\n for id, user in self.users_by_id.items():\n if not user.dns:\n continue\n for dn in user.dns:\n dns.append(dn)\n return dns", "def infoDnsRecords(self, domainname: str) -> DNSRecordSet:\n response = self._send(self.nc_request(action=\"infoDnsRecords\", parameters={\"domainname\": domainname}))\n\n # build records\n rset = DNSRecordSet(dnsrecords=[])\n for r in response[\"dnsrecords\"]:\n dr = DNSRecord(id=int(r[\"id\"]),\n hostname=r[\"hostname\"],\n type=r[\"type\"],\n priority=int(r[\"priority\"]),\n destination=r[\"destination\"],\n deleterecord=r[\"deleterecord\"],\n state=r[\"state\"])\n\n rset.dnsrecords.append(dr)\n\n return rset", "def get_botnet_domains():\n\n fw = \"<HTTPS://YOUR_FORTIGATE_IP:YOUR_FORTIGATE_PORT>\"\n\n path = \"/api/v2/monitor/system/botnet-domains/hits/?access_token=\"\n\n token = \"<YOUR_API_KEY>\"\n\n content_filter = \"\"\n\n if content_filter != \"\":\n url = fw + path + token + content_filter\n else:\n url = fw + path + token\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n \n try:\n r = requests.get(url, verify=False).json()\n except Exception:\n print(\"Something went wrong. Is the url correct? Exiting...\")\n sys.exit()\n\n for key in r['results']:\n print()\n for k,v in key.items():\n print(\"{0:6} : {1}\".format(k.upper(), str(v)))", "def list_keystone_v3_domains(self):\n LOG_OBJ.debug(\"List the domains.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating domain\")\n print (\"No response from Server while creating domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Listing domains Failed with status %s \"\n \"and error : %s\" % response.status, response.data)\n print (\" Listing domains Failed with status %s and error : %s\" %\n response.status, response.data)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domains list : %s \" % output)\n print (\"Domains list : %s \" % output)\n return output['domains']", "def domain_list_all(self):\n page = 1\n on_page = 100\n ret = []\n while True:\n r = self.domain_list(page=page, on_page=on_page)\n ret += r['domains']\n if len(ret) >= r['total']:\n break\n page += 1\n return ret", "def print_sodoku(sodoku_domains):\n for y in range(9):\n print(*[sodoku_domains[(x,y)] for x in range(9)],sep=\",\")", "def retrieve_resource_records(self):\n log('Retrieving records for {}'.format(self.domain))\n current_records = self._api_connection('dnsListRecords')\n for current_resource_record in current_records.iter('resource_record'):\n self.current_records.append(\n dict(\n (resource_record.tag, resource_record.text)\n for resource_record\n in current_resource_record.iter()\n )\n )\n log('{} records retrieved for {}'.format(len(self.current_records), self.domain))\n log(self.current_records)", "def create_dns_dictionary(self, path_tracefile):\n responses = self.get_dns_responses(path_tracefile)\n dns_dict = dict()\n for response in responses:\n for x in range(response[DNS].ancount): # answer count, how many IP adresses are returned for the query\n try: # answer count could also include 'DNS SRV Resource Record' which does not have a 'rrname' attribute so ancount is wrong if there is such a record -> TODO get amount of DNSRR instead of using ancount\n domain = getattr(response[DNSRR][x], 'rrname').decode(\"utf-8\") # domain (this is returned in bytes so decode)\n ip = getattr(response[DNSRR][x], 'rdata') # IP adres of the domain, TODO make this work for multiple ip adresses for one domain (Test with [0] at end)\n dns_dict[ip] = domain[:-1] #remove last char '.' \n except:\n continue\n return dns_dict", "def list_all():\n\n return (_conn.listDefinedDomains() +\n [_conn.lookupByID(id).name() for id in _conn.listDomainsID()])", "def domainlist_reversewhois(self, response):\n data = response.json()\n for domain in data['response']['domains']:\n yield(domain.lower())", "def getlist(self):\n self.__domainlist.sort()\n\n outstr = \"{ \"\n for index, domain in enumerate(self.__domainlist):\n outstr += domain + \" \"\n if (index % 50 == 0) and index > 0:\n outstr += \"}\\n{ \"\n\n outstr += \"}\"\n\n return outstr", "def domainlist_reversens(self, response):\n data = response.json()\n for domain in itertools.chain(data['response']['primary_domains'], data['response']['primary_domains']):\n yield(domain.lower())", "def info(self):\n\n return self.call(method='getDomain', args=[self.domainname])", "def _display_dns_results(self):\n if self.check_valid_result_data(\"dns_results\", silent=True):\n nb_markdown(f\"DNS events related to {self.url}\", \"bold\")\n display(self._last_result.dns_results)\n else:\n nb_markdown(f\"No DNS resolutions found for {self.url}\")", "def domainlist_reverseip(self, response):\n data = response.json()\n for ip in data['response']['ip_addresses']:\n for domain in ip['domain_names']:\n yield(domain.lower())", "def fastlydomain(args):\n pprint(api.domain(service_id, args[0], args[1]).attrs)", "def getSDDCDNS_Zones(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n json_response = get_sddc_dns_zones_json(proxy,sessiontoken)\n sddc_dns = json_response['results']\n table = PrettyTable(['ID', 'Name','DNS Domain Names','upstream_servers'])\n for i in sddc_dns:\n table.add_row([i['id'], i['display_name'], i['dns_domain_names'], i['upstream_servers']])\n # return table\n print(table)", "def get_domains(self):\n\n response = self.call(method='getDomains')\n domains = []\n for d in response:\n domain = self.domain(domain=d['domain'])\n domains.append(domain)\n return domains", "def extract_domains(self, resp):\n return", "def list_crds(full, debug):\n for crd in manager.list_crds(full, debug):\n print(yaml.dump([crd], default_flow_style=False))", "def print_requests(requests):\n\n if not _debug: return\n keys = get_sorted_keys(requests)\n\n print \"\\nIn Memory Structure:\"\n print \"{\"\n for key in keys:\n\tprint \" %s:[\" % (key)\n for request in requests[key]:\n\t\tprint \" (%s, %s),\" % (key, request.url)\n\tprint \" ]\"\n print \"}\\n\"", "def dns(self, **kwargs):\n self.logger.debug(f\"Get RealTime DNS data\")\n url_path = 'dns'\n body = self._make_body(kwargs)\n return self._common_post(request_path=url_path, body=body)", "def list_websites(self):\r\n\r\n # Fetch websites\r\n self.fetch_website_list()\r\n\r\n # Print website data\r\n for website in self.website_list:\r\n print(\"ID: {0} | Domain: {1} | Name: {2}\".format(\r\n website['id'], website['domain'], website['name']))", "def show_keystone_v3_domain(self, domain_id):\n LOG_OBJ.debug(\"Details of a domain.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains/\" + str(domain_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while getting the \"\n \"details of domain\")\n print (\"No response from Server while getting the \"\n \"details of domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Show domain Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\"Show domain Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domains details : %s \" % output)\n print (\"Domains details : %s \" % output)\n return output['domain']", "def dns(self):\n\n return value_list_to_comma('DNS', self._peer.dns)", "def main():\n argument_parser = argparse.ArgumentParser()\n argument_parser.add_argument(\"name\", nargs=\"+\",\n help=\"DNS name(s) to look up\")\n argument_parser.add_argument(\"-v\", \"--verbose\",\n help=\"increase output verbosity\",\n action=\"store_true\")\n program_args = argument_parser.parse_args()\n fuckall = []\n for a_domain_name in program_args.name:\n if a_domain_name not in fuckall:\n print_results(collect_results(a_domain_name))\n fuckall.append(a_domain_name)", "def list_domain(self, feed_id=None):\n resources = self.list_resource(feed_id=feed_id, resource_type_id='Host Controller')\n domains = []\n if resources:\n for resource in resources:\n resource_data = self.get_config_data(\n feed_id=resource.path.feed_id, resource_id=resource.id)\n domain_data = resource_data.value\n domains.append(Domain(resource.id, resource.name, resource.path, domain_data))\n return domains", "def _get_records(self, domain, domain_id, record):\n for needed in [\"type\", \"source\", \"target\"]:\n if needed not in record:\n raise ValueError(\"{} not provided in record dict\".format(needed))\n\n if record[\"source\"] == \".\":\n fqdn = domain\n else:\n fqdn = \"{source}.{domain}\".format(source=record[\"source\"], domain=domain)\n return list(\n filter(\n lambda x: (\n x[\"source_idn\"] == fqdn\n and x[\"type\"] == record[\"type\"]\n and x[\"target\"] == record[\"target\"]\n ),\n self._get_request(\"/1/domain/{domain_id}/dns/record\".format(domain_id=domain_id)),\n )\n )", "def print_dd_dict( self, ):\n print( self._dd_dict )", "def listDomains(self):\n reply = self.rpc.getDomains(self.username,\n self.password)\n if reply[0] == 'UNKNOWN_ERROR':\n raise Exception(\"RPC returned error: \" + reply[0])\n return reply", "def get_dns(self):\n return self.mycam.devicemgmt.GetDNS()", "def domain_command():\n # 1. Get input host from Demisto\n domain = demisto.args().get('domain')\n # 2. Get the host reputation from SlashNext API\n response = domain_lookup(domain=domain)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n dbot_score_cont, domain_cont = get_dbot_std_context(\n domain, 'Domain', response.get('threatData').get('verdict'), response.get('threatData').get('threatType'))\n\n snx_ioc_cont = get_snx_host_ioc_context(domain, 'Domain', response.get('threatData'))\n\n ec = {\n 'SlashNext.Domain(val.Value === obj.Value)': snx_ioc_cont,\n 'DBotScore': dbot_score_cont,\n 'Domain': domain_cont\n }\n\n domain = domain.encode('idna')\n\n title = 'SlashNext Phishing Incident Response - Domain Lookup\\n' \\\n '##### domain = {}'.format(domain.decode())\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont,\n ['Value',\n 'Type',\n 'Verdict',\n 'ThreatStatus',\n 'ThreatName',\n 'ThreatType',\n 'FirstSeen',\n 'LastSeen']\n )\n\n return_outputs(md, ec, snx_ioc_cont)", "def test_getdnsrecord(self, kasserver):\n assert (\n kasserver.get_dns_record(\"www.example.com\", \"A\") == self.RESPONSE_PARSED[0]\n )", "def print_resources(self) -> None:\n for resource in self._request_get(self.url_base + 'documentacao'):\n print(\n \"Nome: {},\\nUrl: {},\\n\".format(\n resource['name'],\n self._format_url_to_resource(resource['url']),\n )\n )", "def print_data():\r\n\r\n d = data()\r\n for i in d:\r\n for key, value in i.items():\r\n print(key, \" : \", value)\r\n print()", "def get(domain_name=None):\n url = 'https://api.cloudns.net/dns/soa-details.json'\n\n params = Parameters({'domain-name': domain_name})\n\n return requests.get(url, params=params.to_dict())", "def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', '').get('hits', []):\n return\n for hit in resp.json()[\"hits\"][\"hits\"]:\n domain = hit.get(\"_source\", {}).get(\"domain\", \"\")\n if not domain:\n continue\n if not domain in domain_list:\n domain_list.append(domain)\n #print(hit[\"_source\"].get(\"title\", \"\").encode(\"ascii\",\"ignore\"))\n if domain not in ALLOWED_DOMAINS:\n print(domain)", "def dns_retentions(self):\n url_path = 'dns/retentions'\n self.logger.debug(\"Get possible retentions for '/dns' per each granularity\")\n return self._common_get(url_path)", "def tracking_domain_list(self):\r\n params = base.get_params(None, locals())\r\n return self._get('tracking_domain_list', params)", "def _print_findings(self) -> None:\n for ip_address in self._ip_addresses:\n print(f\"{ip_address}\")", "def main():\n config = _config()\n\n resolver = Resolver()\n resolver.nameservers = config['initial_nameservers']\n LOG.debug(\"Resolving namdservers %s\", config['nameservers'])\n nameservers = [resolver.address(_) for _ in config['nameservers']]\n\n resolver.nameservers = nameservers\n\n addresses = {}\n for domain in config['domains']:\n addresses[domain] = resolver.address(domain)\n LOG.debug(\"Found addresses: %s\", addresses)\n\n account = Account(**config['credentials'])\n client = Client(account)\n domains = client.get_domains()\n\n for domain, address in addresses.items():\n if domain not in domains:\n raise ValueError(\"%s not in client list of domains\" % domain)\n current = client.get_records(domain)[0]['data']\n if current != address:\n LOG.info('updating %s (%s -> %s)', domain, current, address)\n client.update_record_ip(address, domain, '@', 'A')\n else:\n LOG.info('Record up-to-date %s (%s)', domain, address)\n LOG.debug(\"complete\")", "def get_domain_names(self, MaxResults: str = None, NextToken: str = None) -> Dict:\n pass", "def print_response(response):\n for report in response.get('reports', []):\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])\n rows = report.get('data', {}).get('rows', [])\n\n for row in rows:\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n\n for header, dimension in zip(dimensionHeaders, dimensions):\n print(header + ': ' + dimension)\n\n for i, values in enumerate(dateRangeValues):\n print('Date range (' + str(i) + ')')\n for metricHeader, value in zip(metricHeaders, values.get('values')):\n\t print(metricHeader.get('name') + ': ' + value)", "def ListDomains(self, perPage=0, page=1):\n\n class Result(Model):\n domains = ListField(ModelField(Domain))\n\n if perPage != 0:\n headers = {\"perPage\": perPage, \"page\": page}\n response = self.client.http_get(\"/v4/domains\", headers)\n else:\n response = self.client.http_get(\"/v4/domains\")\n\n return parse_response(response, Result)", "def domains(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"domains\")", "def query_dns_records(event, context):\n ids = ['SOA', 'TXT', 'MX', 'NS', 'DNSKEY']\n dn = event['queryStringParameters'][query_parameter].lower()\n body = {'scanDate': (datetime.datetime.now(datetime.timezone.utc) +\n datetime.timedelta(hours=8)).isoformat().upper()[:26],\n 'scanRecordTypes': ids,\n 'domain': dn,\n 'records': {}}\n\n try:\n try:\n for record_type in ids:\n try:\n answers = dns.resolver.query(dn, record_type)\n records = []\n for data in answers:\n records.append(data.to_text())\n body['records'][record_type] = records\n except (dns.resolver.NoAnswer, dns.resolver.NoNameservers, dns.exception.Timeout):\n pass # might fail per record_type, perfectly fine\n\n # insert into DynamoDB\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(table_name)\n table.put_item(Item=body)\n status_code = 200\n result = json.dumps(body)\n\n except dns.resolver.NXDOMAIN:\n status_code = 404 # domain no longer exists, or domain not found :)\n result = ''\n\n except KeyError: # insufficient queryStringParameters\n status_code = 400\n result = ''\n\n return {'statusCode': status_code,\n 'headers': headers,\n 'body': result}", "def list_domain(self, feed_id=None):\n domains = self.list_resource(feed_id=feed_id,\n resource_type_id='Domain Host',\n cls=Domain,\n list_children=True,\n include_data=True)\n return domains", "def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break", "def query_records(self, context, rrs):\n records = self.dns_manager.query_records(context, rrs)\n return records", "def getdns(self):\r\n filename = r\"dns_profiles.txt\"\r\n fp = open(filename)\r\n data = []\r\n for lines in fp.readlines():\r\n data.append(list(map(float, lines.split())))\r\n #use the fundamental string function 'append','split' to extract floating point number\r\n fp.close()\r\n dns_data = np.array(data) #transfer list to array\r\n self.dns_z = dns_data[:, 0] / 1000 #z-plus -> z/h\r\n self.dns_u = dns_data[:, 1] # u-plus\r\n self.dns_uw = dns_data[:, 2]\r\n self.dns_uu = dns_data[:, 3]\r\n self.dns_ww = dns_data[:, 4]\r\n self.dns_vv = dns_data[:, 5]\r\n self.dns_tau = dns_data[:, 7]\r\n self.dns_tot = dns_data[:, 8]", "def dict_of_domains(fc):\r\n # need to find root database (GDB or SDE)\r\n db_root = os.path.dirname(fc)\r\n while db_root[-4:].lower() != '.gdb' and db_root[-4:].lower() != '.sde':\r\n old_db_root = db_root # protect against infinite loop\r\n db_root = os.path.dirname(db_root)\r\n if old_db_root == db_root: # protect against infinite loop\r\n break\r\n arcpy.AddMessage(\"Retrieving Domains from \" + str(db_root))\r\n return {domain.name: domain.codedValues for domain in arcpy.da.ListDomains(db_root)}", "def get(self, api_key):\n\n try:\n mailgun.list_domains(api_key)\n return {\"api_key\": api_key, \"valid\": True}\n except:\n return {\"api_key\": api_key, \"valid\": False}", "def domain_info(self, domain):\n endpoint = '/Domain/Info'\n\n params = {\n 'Domain' : domain\n }\n\n response = self.__perform_get_request(endpoint, params)\n \n if response.status_code == 200:\n parsed_response = response.json()\n return parsed_response", "def test_get_all_email_domains(self):\n email_dom2 = 'testgetall.com'\n org = 'o=%s' % (self.org_name)\n dn = '%s,%s' % (org, self.base_dn)\n dn_info = {'aenetPostfixDomain': [self.email_dom, email_dom2]}\n expected_result = [(dn, dn_info)] \n domain = SpokeEmailDomain(self.org_name)\n domain.create(email_dom2)\n result = domain.get()['data']\n self.assertEqual(result, expected_result)", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevNet_GetDnsServers', self.handle))", "def cb_zone(self, cmd):\n def normal(name, rr):\n return name + \"IN %(ttl)6s %(type)-5s %(rdata)s\" % rr\n def MX(name, rr):\n return name + \"IN %(ttl)6s %(type)-5s %(priority)s %(rdata)s\" % rr\n print \"$ORIGIN %s.\" % (self.d.domain)\n for name in sorted(self.d.listRR(),\n key=lambda x: _domreverse(x)):\n if name == \"@\":\n name = \"\"\n else:\n name = name + \".\"\n for e in _resolve_any_to_text(\"%s%s\" % (name, self.d.domain),\n 'ns1.loopia.se', self.d.domain):\n print re.sub(r'(^|\\.)%s. ' % (self.d.domain), ' ', e)", "def display_dict() -> None:\n for key in ascii_dict:\n print(key, ': ')\n for line in ascii_dict[key]:\n print(line)", "def collect_results(name: str) -> dict:\n full_response = {}\n\n target_name = dns.name.from_text(name)\n\n # lookup CNAME\n response = lookup(target_name, dns.rdatatype.CNAME)\n cnames = []\n if response is not None:\n for answers in response.answer:\n for answer in answers:\n cnames.append({\"name\": answer, \"alias\": name})\n\n # lookup A\n response = lookup(target_name, dns.rdatatype.A)\n arecords = []\n\n if response is not None:\n for answers in response.answer:\n a_name = answers.name\n for answer in answers:\n if answer.rdtype == 1: # A record\n arecords.append({\"name\": a_name, \"address\": str(answer)})\n\n # lookup AAAA\n response = lookup(target_name, dns.rdatatype.AAAA)\n aaaarecords = []\n\n if response is not None:\n for answers in response.answer:\n aaaa_name = answers.name\n for answer in answers:\n if answer.rdtype == 28: # AAAA record\n aaaarecords.append({\"name\": aaaa_name, \"address\": str(answer)})\n\n # lookup MX\n response = lookup(target_name, dns.rdatatype.MX)\n mxrecords = []\n if response is not None:\n for answers in response.answer:\n mx_name = answers.name\n for answer in answers:\n if answer.rdtype == 15: # MX record\n mxrecords.append({\"name\": mx_name,\n \"preference\": answer.preference,\n \"exchange\": str(answer.exchange)})\n\n full_response[\"CNAME\"] = cnames\n full_response[\"A\"] = arecords\n full_response[\"AAAA\"] = aaaarecords\n full_response[\"MX\"] = mxrecords\n\n return full_response", "def cli_print_record( field_list, showid=False):\n debug(\"cli_print_record(%s)\" % field_list)\n try:\n raw_record_list = api.find_records(field_list)\n except NoRecordsFound as error:\n print \"No records found for: %(field_list)s, %(error)s\" % locals() \n return False\n except InvaildQuery as error:\n print \"Not query to query database with\"\n return False\n\n # Grab all the display fields from the field_list\n display_field_list = api.get_display_fields(field_list)\n\n # Commented out, as i will assume if you have not asked for any fields,\n # then you want them all\n # Make sure that name is in the display_field_list\n #if 'name' not in display_field_list:\n # display_field_list.append('name')\n\n record_list = []\n record_length = defaultdict(int)\n for raw_record in raw_record_list:\n record = raw_record\n for k, v in raw_record.items():\n if isinstance(v, list):\n v = \",\".join(v)\n record[k] = v\n if record_length[k] < len(str(v)):\n record_length[k] = len(str(v))\n record_list.append(record)\n\n if display_field_list:\n\n simple_format = re.sub('(?P<m>\\w+)',\"%(\\g<m>)s\", \" \".join(display_field_list) )\n\n # Better formatting of the simple_format string\n display_string = \"\"\n for d in display_field_list:\n display_string += \"%%(%s)-%ds \" % (d, record_length[d])\n simple_format = display_string\n\n for record in record_list:\n try:\n print simple_format % record\n except KeyError as error:\n debug(\"cli_print_record: unable to print fields for record: %(error)s\" % locals())\n else:\n for record in record_list:\n print\n print '\\033[1m%(name)s\\033[0m' % record\n for key, value in sorted(record.items()):\n if type(value).__name__ in [ 'str', 'unicode','int','float','bool']:\n print \" %(key)s: %(value)s\" % locals()\n continue\n elif type(value).__name__ in [ 'list', 'set']:\n print \" %s: %s\" % ( key, \",\".join( value) )\n continue\n elif type(value).__name__ == 'ObjectId':\n if showid:\n print \" %(key)s: %(value)s\" % locals()\n continue\n elif type(value).__name__ == 'NoneType':\n continue\n\n else:\n raise RecordKeeperException(\"Unhandled data format '%s' <%s>\" % ( key, type(value).__name__))", "def main():\n for dev in Discover.discover().values():\n print(dev)", "def get_dev_records(self, context, zone_id):\n records = self.dns_manager.get_dev_records(context, zone_id)\n return records", "def show_hosts():\n host_str = \"\"\n data = parse(\"/tmp/cc/calls.log\")\n for ip in data:\n ln = \"{}: {}\".format(ip, data[ip]) + \"\\n\"\n host_str += ln\n return host_str", "def parse(domains):\n subdomains = []\n for domain in domains:\n url = 'https://urlscan.io/api/v1/search/?q=domain:{}'.format(domain)\n json_resp = json.loads(requests.get(url).text)\n subdomains += list(set(find('domain', json_resp)))\n return list(set(subdomains))", "def provDistributionList(ldp_conf, ldap_query, attrs):\n for dn, entry in ldapQuery(ldp_conf, ldap_query, attrs):\n distribution_list_name = entry['mail'][0]\n print 'cdl', distribution_list_name\n print 'adlm', distribution_list_name, \n if not 'zimbraMailForwardingAddress' in entry:\n continue\n for member in entry['zimbraMailForwardingAddress']:\n print member,\n # Break line, distribution list member finished\n print\n # Finish\n print", "def collect_results(name: str) -> dict:\n full_response = {}\n target_name = dns.name.from_text(name)\n # lookup CNAME\n response = lookup(target_name, dns.rdatatype.CNAME)\n cnames = []\n for answers in response.answer:\n for answer in answers:\n cnames.append({\"name\": answer, \"alias\": name})\n # lookup A\n response = lookup(target_name, dns.rdatatype.A)\n arecords = []\n for answers in response.answer:\n a_name = answers.name\n for answer in answers:\n if answer.rdtype == 1: # A record\n arecords.append({\"name\": a_name, \"address\": str(answer)})\n # lookup AAAA\n response = lookup(target_name, dns.rdatatype.AAAA)\n aaaarecords = []\n for answers in response.answer:\n aaaa_name = answers.name\n for answer in answers:\n if answer.rdtype == 28: # AAAA record\n aaaarecords.append({\"name\": aaaa_name, \"address\": str(answer)})\n # lookup MX\n response = lookup(target_name, dns.rdatatype.MX)\n mxrecords = []\n for answers in response.answer:\n mx_name = answers.name\n for answer in answers:\n if answer.rdtype == 15: # MX record\n mxrecords.append({\"name\": mx_name,\n \"preference\": answer.preference,\n \"exchange\": str(answer.exchange)})\n\n full_response[\"CNAME\"] = cnames\n full_response[\"A\"] = arecords\n full_response[\"AAAA\"] = aaaarecords\n full_response[\"MX\"] = mxrecords\n\n return full_response", "def get_all_dns_connection(self):\n return self.m_connection.all_dns", "def get_ds(self, domain: str, as_json: bool = False):\n formatted_answer = {'domain': domain, 'rr_types': [\"ds\"], 'answer': None}\n\n status, result = Resolver.ctx_dnssec.resolve(domain, rrtype=ub.RR_TYPE_DS)\n\n if status == 0 and result.havedata:\n print(\"ds record returned.\")\n formatted_answer['answer'] = {}\n ds_records_list = result.data.data\n i = 0\n for ds in ds_records_list:\n if as_json:\n formatted_answer['answer'][i] = str(ds)\n else:\n formatted_answer['answer'][i] = ds\n i += 0\n elif status != 0: # throw/raise error\n print(\"Resolve error: \", ub.ub_strerror(status))\n elif result.havedata == 0: # if no data in result\n print(\"No data.\")\n if as_json:\n return json.dumps(formatted_answer)\n return DNSFormattedResponse(formatted_answer)", "def test_dns(self):\n rv = extract_ids(X509_DNS_ONLY)\n assert [\n DNSPattern(b\"www.twistedmatrix.com\"),\n DNSPattern(b\"twistedmatrix.com\")\n ] == rv", "def domain(self):\n return self.keys()", "def DNSMadeEasyAPI_dns_managed():\n r = DNSMadeEasyAPI(API_KEY, API_SECRET, True).dns.managed.GET()\n assert_eq(\n r.status_code,\n 200)\n r.json()", "async def get_record_info(self, zone_id: str) -> list[CloudflareDNSRecord]:\n record_information: list[CloudflareDNSRecord] = []\n if self.records is None:\n self.records = []\n data = await self.get_zone_records(zone_id)\n\n if data is None:\n raise CloudflareException(f\"No records found for {zone_id}\")\n\n self.records = data\n\n if not self.records:\n return record_information\n\n for record in self.records:\n if self.zone not in record:\n record = f\"{record}.{self.zone}\"\n\n recorddata: list[dict[str, Any]] = await self.api.get(\n self._endpoint(\n path=f\"{zone_id}/dns_records\",\n query={\"name\": record},\n )\n )\n\n first_record = recorddata[0]\n record_information.append(\n CloudflareDNSRecord(\n content=first_record[\"content\"],\n id=first_record[\"id\"],\n name=first_record[\"name\"],\n proxied=first_record[\"proxied\"],\n type=first_record[\"type\"],\n )\n )\n return record_information", "def print_response(response):\n for report in response.get('reports', []):\n rows = report.get('data', {}).get('rows', [])\n for row in rows:\n print(row)", "def result_printer(self):\n for i in self.output:\n for item, value in i.items():\n if not isinstance(value, list) and \"http://\" not in value:\n print(f\"{item} : {value}\")\n print(20 * '-')", "def fqdns(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"fqdns\")", "def print_all(self):\n with open(self.file, 'r', encoding='utf-8') as self.contacts_file:\n for i in self.contacts_file.readlines():\n print(i)", "def print_data_list(self):\n print('\\n{0}'.format(self.webDataFrame))", "def get_dns_servers(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfg_GetDnsServers', self.handle))", "def get_domains() -> List[str]:\n ret = _call_endpoint(\"v1/domains\")\n # Example response:\n # [{'createdAt': '2016-06-25T03:08:44.000Z',\n # 'domain': 'mydomain.com',\n # 'domainId': 12345678,\n # 'expirationProtected': False,\n # 'expires': '2020-06-25T03:08:44.000Z',\n # 'holdRegistrar': False,\n # 'locked': True,\n # 'nameServers': None,\n # 'privacy': False,\n # 'renewAuto': True,\n # 'renewDeadline': '2020-08-09T03:08:44.000Z',\n # 'renewable': True,\n # 'status': 'ACTIVE',\n # 'transferProtected': False},]\n domains = [d[\"domain\"] for d in ret]\n return domains", "def list_computers(self, kwargs):\n resolve = \"resolve\" in kwargs and kwargs[\"resolve\"]\n dns = kwargs.get(\"dns\", \"\")\n dc = kwargs.get(\"dc\", False)\n\n hostnames = []\n if not dc:\n results = self.engine.query(self.engine.COMPUTERS_FILTER(), [\"name\"])\n else:\n results = self.engine.query(self.engine.DC_FILTER(), [\"name\"])\n for result in results:\n if \"name\" in result: # ugly\n computer_name = result[\"name\"]\n else:\n computer_name = result[:-1] # removing trailing $ sign\n\n hostnames.append(f\"{computer_name}.{self.engine.fqdn}\")\n # print only if resolution was not mandated\n if not resolve:\n print(f\"{computer_name}.{self.engine.fqdn}\")\n # do the resolution\n if resolve:\n for computer in utils_resolve(hostnames, dns):\n print(\"{addr:20} {name}\".format(addr=computer[\"address\"], name=computer[\"hostname\"]))", "def _read_dns_(dns, cnt):\r\n \r\n dn_names = None\r\n dn_ids = None\r\n dn_iaps = [None]*10\r\n \r\n for dn in dns.DN:\r\n if dn.ref == 'Name':\r\n dn_names = dn.value\r\n if dn.ref == 'DNId':\r\n dn_ids = dn.value\r\n if dn.ref == 'IAP':\r\n dn_iaps[0] = dn.value\r\n if dn.ref == 'IAP2':\r\n dn_iaps[1] = dn.value\r\n if dn.ref == 'IAP3':\r\n dn_iaps[2] = dn.value\r\n if dn.ref == 'IAP4':\r\n dn_iaps[3] = dn.value\r\n if dn.ref == 'IAP5':\r\n dn_iaps[4] = dn.value\r\n if dn.ref == 'IAP6':\r\n dn_iaps[5] = dn.value\r\n if dn.ref == 'IAP7':\r\n dn_iaps[6] = dn.value\r\n if dn.ref == 'IAP8':\r\n dn_iaps[7] = dn.value\r\n if dn.ref == 'IAP9':\r\n dn_iaps[8] = dn.value\r\n if dn.ref == 'IAP10':\r\n dn_iaps[9] = dn.value\r\n \r\n logger.info('Parsed DN names: %s' % dn_names)\r\n logger.info('Parsed DN ids: %s' % dn_ids)\r\n logger.info('Parsed DN iaps: %s' % dn_iaps)\r\n \r\n for i in range(len(dn_names)):\r\n mydn = Dn()\r\n mydn.set_id(dn_ids[i])\r\n mydn.set_name(dn_names[i])\r\n myiaps = [None]*10\r\n for j in range(10):\r\n myiaps[j] = dn_iaps[j][i]\r\n mydn.set_iaps(myiaps)\r\n cnt.add_dn(mydn)\r\n return cnt", "def public_ip_dns(resolv, nameservers, rdatatype, server, responsetype):\n for ns in nameservers:\n try:\n answer = resolv.query(ns, rdatatype)\n nameserver = answer[0].to_text()\n except Exception as e:\n print(e)\n continue\n resolve_public_ip(nameserver, server, responsetype)", "def listRR(self):\n reply = self.rpc.getSubdomains(self.username,\n self.password,\n self.domain)\n\n if len(reply) and reply[0] in ('UNKNOWN_ERROR',\n 'RATE_LIMITED'):\n raise Exception(\"RPC returned error: \" + reply[0])\n return reply", "async def ping_multiple_domains(req, resp):\n\n results = []\n\n def build_domain_results(protocol, request_domain, results, headers):\n domain_response_code, domain_response_text, domain_response_time_ms, domain_response_headers = _process_request(protocol, request_domain, req.params, headers)\n results.append({\n \"protocol\": protocol,\n \"domain\": request_domain,\n \"domain_response_code\": domain_response_code,\n \"domain_response_headers\": domain_response_headers,\n \"domain_response_time_ms\": domain_response_time_ms\n })\n\n def gather_results(data):\n for domain in data['domains']:\n protocol = domain['protocol']\n request_domain = domain['domain']\n headers = domain['headers']\n build_domain_results(protocol, request_domain, results, headers)\n\n resp.media = {\"domains_response_results\": results, \"wait\": gather_results(await req.media())}", "def resolveOriginalDomains():\n print('[+] Populating Domain Name Resolution for later check ')\n\n try:\n for domain in domains:\n response = dns.resolver.query(domain)\n d = Domain_Poison_Check(domain)\n print('[+] Domain: %s' % domain)\n for record in response:\n print(' |____> maps to %s.' % (record.address))\n d.pushAddr(record)\n check_domain_poison_results.append(d)\n return time.time()\n except Exception as err:\n print('[+] Exception: %s' % err)\n traceback.print_exc()\n return time.time()", "def print_all(self):\n print(\n \"\"\"\\nContents of hash table, with blank lines separating distinct\n linked lists:\"\"\".replace(' ', ''))\n\n for linked_list in self.main_array:\n print(linked_list)\n print('')", "def _list(self, account, page):\n response = self.client.get(self.get_url(account), data={\"page\": page})\n return [\n DomainResource(**item) for item in response['data']\n ], response['pagination']", "def recursive_dns_lookup(target_name, qtype, root_servers_list):\n\n # Base case\n if not root_servers_list:\n return None\n\n # Create dns query based on the target_name (website)\n # and qtype (queue type: CNAME, A, AAAA, or MX)\n dns_query = dns.message.make_query(target_name, qtype)\n\n for server in root_servers_list:\n # Doing a try catch to check if the dns server times out,\n # if it does then we continue and try another server\n try:\n query_response = dns.query.udp(dns_query, server, 3)\n except dns.exception.Timeout:\n continue\n # If there's an answer in the response\n if query_response.answer:\n # Search through the response.answer for possible answers\n for response_answers in query_response.answer:\n #print(\"response_answers: \", response_answers)\n for response_answer in response_answers:\n #print(\"Response_answer\", response_answer)\n target_name = str(response_answer)[:-1] # Removes the period at the end\n #print(\"Target_name\", target_name)\n # If we don't get the reponse we're after then\n # continue searching through the root_servers\n if response_answer.rdtype != qtype:\n if response_answer.rdtype == 5:\n return recursive_dns_lookup(target_name, qtype, ROOT_SERVERS)\n else:\n # Return the answer we wanted\n return query_response\n else: # If there isn't an answer in the response then we check additional\n\n # If we do have something in additional then get the stuff inside\n if query_response.additional:\n ip_addresses = []\n for response_additional in query_response.additional:\n #print(\"response_additional: \", response_additional)\n # Convert to string then send to function for parsing the address out\n response_additional_str = str(response_additional)\n\n #print(\"function get_address resp:\", resp)\n resp_elements = response_additional_str.split()\n #print(\"function get_address resp_elements:\", resp_elements)\n ip_address = []\n for resp_element in resp_elements:\n #print(\"function get_address resp_element:\", resp_element)\n if resp_element != 'A':\n continue\n else:\n #print(\"function get_address resp_element = A:\", resp_element)\n #print(\"function get_address address:\", resp_elements[-1])\n ip_address.append(resp_elements[-1])\n ip_addresses += ip_address\n\n return recursive_dns_lookup(target_name, qtype, ip_addresses)", "def main():\n argument_parser = argparse.ArgumentParser()\n argument_parser.add_argument(\"name\", nargs=\"+\",\n help=\"DNS name(s) to look up\")\n argument_parser.add_argument(\"-v\", \"--verbose\",\n help=\"increase output verbosity\",\n action=\"store_true\")\n program_args = argument_parser.parse_args()\n for a_domain_name in program_args.name:\n ##print(\"Domain name: \", a_domain_name)\n ##print(\"Cache System: \", CACHE_SYSTEM)\n if a_domain_name in CACHE_SYSTEM:\n print_results(CACHE_SYSTEM[a_domain_name])\n else:\n # Saves the domain name in a cache system and if it's searched\n # again then it won't requery and just print it out from the\n # cache system\n CACHE_SYSTEM[a_domain_name] = collect_results(a_domain_name)\n print_results(CACHE_SYSTEM[a_domain_name])" ]
[ "0.70802027", "0.6874424", "0.67829025", "0.6670683", "0.6497714", "0.64597666", "0.6447752", "0.6308665", "0.62406534", "0.6203262", "0.619593", "0.6123511", "0.60674375", "0.6036058", "0.6034905", "0.6027634", "0.59853095", "0.5983278", "0.59728104", "0.5970376", "0.5966782", "0.5955435", "0.59287465", "0.5914915", "0.5912606", "0.59061444", "0.58951664", "0.58683264", "0.58351517", "0.58340347", "0.58235985", "0.5720819", "0.5718227", "0.5709996", "0.56981564", "0.568801", "0.5623581", "0.5622485", "0.56215227", "0.55957425", "0.55953777", "0.55653334", "0.5553869", "0.5546617", "0.55446666", "0.5538852", "0.5513195", "0.5510513", "0.55045706", "0.5491934", "0.5481157", "0.5473348", "0.54674256", "0.5455259", "0.5452117", "0.5437091", "0.5428251", "0.5374801", "0.5368825", "0.5357781", "0.5355116", "0.5352537", "0.5312518", "0.5309272", "0.53090006", "0.5296844", "0.5293089", "0.52926004", "0.52880126", "0.5274588", "0.52714866", "0.5245347", "0.52411234", "0.524078", "0.5238163", "0.5234517", "0.52344257", "0.5219269", "0.52146935", "0.52140045", "0.5213099", "0.52123207", "0.5207036", "0.52029055", "0.52014047", "0.5194816", "0.5187425", "0.51847273", "0.5182031", "0.5177938", "0.51768756", "0.51741487", "0.5172985", "0.5166888", "0.5166489", "0.5161821", "0.51607466", "0.5157944", "0.51546323", "0.5144683" ]
0.8448762
0
Extracts MFCCs from music dataset and saves them into a json file.
def preprocess_dataset(dataset_path, SAMPLES_TO_CONSIDER: int, num_mfcc = 13, n_fft = 2048, hop_length = 512): data = { 'mapping': [], 'labels': [], 'MFCCs': [], 'files': [] } # loop through all sub-dirs total_samples = 0 valid_samples = 0 for i, (dirpath, dirname, filenames) in tqdm(enumerate(os.walk(dataset_path))): # ensure we're at sub-folder level if dirpath is not dataset_path: # save label (i.e., sub-folder name) in the mapping label = dirpath.partition('speech_commands_subset')[-1][1:] data['mapping'].append(label) print("\nProcessing: '{}'".format(label)) print("number of files for each class: ", len(filenames)) # process all audio files for f in filenames: total_samples += 1 file_path = os.path.join(dirpath, f) # load audio file and slice it to ensure length consistency among different files signal, sample_rate = librosa.load(file_path) # print(signal.shape) # print(type(signal[0])) # drop audio files with less than pre-decided number of samples if len(signal) >= SAMPLES_TO_CONSIDER: valid_samples += 1 # ensure consistency of the length of the signal signal = signal[:SAMPLES_TO_CONSIDER] # extract MFCCs MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc = num_mfcc, n_fft = n_fft, hop_length = hop_length) # print(MFCCs.shape) # print(type(MFCCs[0,0])) # store data for analysed track data['MFCCs'].append(MFCCs.T.tolist()) data['labels'].append(i-1) # data['files'].append(file_path) # print("{}: {}".format(file_path, i-1)) # if valid_samples == 20: # valid_samples =0 # break print("\ntotal samples: ", total_samples) print("\nvalid_samples: ", valid_samples) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_mfcc(dataset_path, json_path, num_mfcc=13, n_fft=2048, hop_length=512, num_segments=5):\n\n # dictionary to store mapping, labels, and MFCCs\n data = {\n \"mapping\": [],\n \"labels\": [],\n \"mfcc\": []\n }\n\n samples_per_segment = int(SAMPLES_PER_TRACK / num_segments)\n num_mfcc_vectors_per_segment = math.ceil(samples_per_segment / hop_length)\n\n # loop through all genre sub-folder\n for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):\n\n # ensure we're processing a genre sub-folder level\n if dirpath is not dataset_path:\n\n # save genre label (i.e., sub-folder name) in the mapping\n semantic_label = dirpath.split(\"/\")[-1]\n data[\"mapping\"].append(semantic_label)\n print(\"\\nProcessing: {}\".format(semantic_label))\n\n # process all audio files in genre sub-dir\n for f in filenames:\n\n # load audio file\n file_path = os.path.join(dirpath, f)\n signal, sample_rate = librosa.load(file_path, sr=SAMPLE_RATE)\n\n # process all segments of audio file\n for d in range(num_segments):\n\n # calculate start and finish sample for current segment\n start = samples_per_segment * d\n finish = start + samples_per_segment\n\n # extract mfcc\n mfcc = librosa.feature.mfcc(signal[start:finish], sample_rate, n_mfcc=num_mfcc, n_fft=n_fft,\n hop_length=hop_length)\n mfcc = mfcc.T\n\n # store only mfcc feature with expected number of vectors\n if len(mfcc) == num_mfcc_vectors_per_segment:\n data[\"mfcc\"].append(mfcc.tolist())\n data[\"labels\"].append(i - 1)\n print(\"{}, segment:{}\".format(file_path, d + 1))\n\n # save MFCCs to json file\n with open(json_path, \"w\") as fp:\n json.dump(data, fp, indent=4)", "def compute_all_features(mp3_file):\n # Decode and read mp3\n audio, _ = librosa.load(mp3_file, sr=SR)\n\n # Compute mels\n mel = compute_melspecs(audio)\n\n # Save\n out_file = os.path.join(\n OUTPUT_DIR, os.path.basename(mp3_file).replace(\".mp3\", \"-mel.npy\"))\n np.save(out_file, mel)", "def wav2mfcc(file_path, max_len=44, n_mfcc=20):", "def combine_mfcc_id():\n\tmfcc, vad = [], []\n\tall_frames = get_frame()\n\t#read file utt2spk, sorted and store in an array vad\n\twith open('utt2spk') as f:\n\t\tcontent = f.readlines()\n\tcontent = [x.strip() for x in content]\n\n\tfor key in all_frames.keys():\n\t\tmfcc.append(all_frames[key]) #append mfcc to a list \n\t\tfor tuples in content: \n\t\t\twav = tuples.split(' ')[0]\n\t\t\tID = tuples.split(' ')[1]\n\t\t\tif wav == key: #same wav file name\n\t\t\t\tif ID == 'speech': \n\t\t\t \t\tvad.append([1, 0, 0])\n\t\t\t\telif ID == 'noise':\n\t\t\t\t\tvad.append([0, 1, 0])\n\t\t\t\telse:\n\t\t\t\t\tvad.append([0, 0, 1])\n\n\ttrain_mfcc = mfcc[:len(mfcc)/2]\n\tval_mfcc = mfcc[len(mfcc)/2:len(mfcc)*3/4]\n\ttest_mfcc = mfcc[len(mfcc)*3/4:]\n\ttrain_vad = vad[:len(vad)/2]\n\tval_vad = vad[len(vad)/2:len(vad)*3/4]\n\ttest_vad = vad[len(vad)*3/4:]\n\t\n\t################### Train and Validation data are 2D array ######\n\tnew_train_mfcc, new_train_vad = [], []\n\tfor count, matrix in enumerate(train_mfcc): \n\t\tfor row in matrix:\n\t\t\tnew_train_mfcc.append(row) \n\t\t\tnew_train_vad.append(train_vad[count])\n\n\tnew_val_mfcc, new_val_vad = [], []\n\tfor count, matrix in enumerate(val_mfcc):\n\t\tfor row in matrix:\n\t\t\tnew_val_mfcc.append(row)\n\t\t\tnew_val_vad.append(val_vad[count])\n\t#################### Test data is a 3D array #####################\n\tnew_test_mfcc, new_test_vad = [], []\n\tfor count, matrix in enumerate(test_mfcc): #ensure dimension of the data (10-sec)\n\t\tif np.array(matrix).shape == (1003,20): \n\t\t\tnew_test_mfcc.append(matrix)\n\t\t\tnew_test_vad.append([vad[count]])\n\t\n\tnp_train_mfcc = np.array(new_train_mfcc) \n\tnp_val_mfcc = np.array(new_val_mfcc)\n\tnp_test_mfcc = np.array(new_test_mfcc)\n\tnp_train_vad = np.array(new_train_vad)\n\tnp_val_vad = np.array(new_val_vad)\n\tnp_test_vad = np.array(new_test_vad)\n\n\tprint(\"The shape of the train mfcc numpy array is %s\" % (np_train_mfcc.shape,))\n\tprint(\"The shape of the validation mfcc numpy array is %s\" % (np_val_mfcc.shape,))\n\tprint(\"The shape of the test mfcc numpy array is %s\" % (np_test_mfcc.shape,))\n\tprint(\"The shape of the train vad numpy array is %s\" % (np_train_vad.shape,))\n\tprint(\"The shape of the validation vad numpy array is %s\" % (np_val_vad.shape,))\n\tprint(\"The shape of the test vad numpy array is %s\" % (np_test_vad.shape,))\n\n\treturn (np_train_mfcc,np_val_mfcc,np_test_mfcc,np_train_vad,np_val_vad,np_test_vad)", "def save_mfccs(fname, mfccs, categories, distances=None):\n # data prep\n n = len(mfccs)\n categories = np.asarray(categories)\n mfcc_lens = np.empty(n, int)\n for i, mfcc in enumerate(mfccs):\n mfcc_lens[i] = mfcc.shape[0]\n flat_mfccs = np.concatenate(mfccs, axis=0)\n # save data\n if os.path.isfile(fname):\n _save_mfccs_append(fname, mfccs, flat_mfccs, categories, distances, mfcc_lens)\n else:\n _save_mfccs_new(fname, mfccs, flat_mfccs, categories, distances, mfcc_lens)", "def extract_mfccs(file_name, pad_len=174, n_mfcc=40):\n\n signal, sr = librosa.load(file_name, res_type='kaiser_fast')\n mfccs = librosa.feature.mfcc(signal, sr=sr, n_mfcc=n_mfcc)\n\n if mfccs.shape[1] > pad_len:\n mfccs = mfccs[:, :pad_len]\n else:\n pad_width = pad_len - mfccs.shape[1]\n mfccs = np.pad(mfccs, ((0, 0), (0, pad_width)), mode='constant')\n\n return mfccs", "def do_mfccs(fname):\n sound, srate = sf.read(fname)\n \n #f = Sndfile(fname,'r')\n #srate = f.samplerate\n #nf = f.nframes\n #sound = f.read_frames(nf) \n fbanks = Spectral(\n nfilt=40, # nb of filters in mel bank\n alpha=0.97, # pre-emphasis\n fs=srate, # sampling rate\n frate=100, # frame rate\n wlen=0.025, # window length\n nfft=512, # length of dft\n ncep=13, # nb of cepstral coefficients\n lowerf=100,\n upperf=6855.4976,\n do_deltas=True, # speed\n do_deltasdeltas=True # acceleration\n )\n fb = np.array(fbanks.transform(sound), dtype='float32')\n return fb", "def arff(features, path):\n out = open(path, 'w')\n\n # Header\n out.write(\"@RELATION music_speech\\n\")\n for i in range(features.shape[1]-1):\n out.write(\"@ATTRIBUTE MFCC_%i NUMERIC\\n\" % i)\n out.write(\"@ATTRIBUTE class {music,speech}\\n\\n@DATA\\n\")\n\n # Data\n for mfcc in features:\n for i in xrange(len(mfcc)-1):\n out.write(\"%f,\" % mfcc[i])\n out.write(\"%s\\n\" % ('music' if mfcc[-1] == 1 else 'speech'))\n\n out.close()", "def extract_feature_1d(file_name, **kwargs):\n mfcc = kwargs.get(\"mfcc\")\n mel = kwargs.get(\"mel\")\n audio = kwargs.get(\"audio\")\n\n y, sr = librosa.load(file_name, duration=8, sr=16000, dtype=np.float32)\n result = np.array([])\n\n if mfcc:\n # O np mean é utilizado para transformar a matriz em vetor, tirando a media de cada linha\n mfccs = np.mean(librosa.feature.mfcc(y=y, sr=sr, n_mfcc=128).T, axis=0)\n result = np.hstack((result, mfccs))\n\n if mel:\n mel1d = np.mean(librosa.feature.melspectrogram(y, sr=sr).T,axis=0)\n mel = librosa.power_to_db(mel1d ** 2)\n\n result = np.hstack((result, mel))\n if audio:\n result = np.hstack((result, y))\n\n return result", "def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)", "def extract_mfcc_by_beat(audio, beats):\n spectrum = Spectrum()\n mfcc = MFCC()\n mfccs = []\n w = Windowing(type = 'hann')\n\n for i in range(len(beats) - 1):\n buffer = audio[int(beats[i]) : int(beats[i + 1])]\n\n if len(buffer) % 2 != 0:\n buffer = np.append(buffer, audio[int(beats[i + 1])])\n\n frame = w(buffer)\n spec = spectrum(frame)\n mfcc_bands, mfcc_coeffs = mfcc(spec)\n mfccs.append(mfcc_coeffs)\n \n mfccs = np.matrix(mfccs)\n return mfccs", "def mscoco_read_json(file_path):\n print(\"Reading mscoco raw data .. \")\n print(\" data path: %s\" % file_path)\n with open(file_path, \"r\") as fd:\n data = json.load(fd)\n\n print(\"%d sentences in total\" % len(data[\"annotations\"]))\n \n # aggregate all sentences of the same images\n image_idx = set([d[\"image_id\"] for d in data[\"annotations\"]])\n paraphrases = {}\n for im in image_idx: paraphrases[im] = []\n for d in tqdm(data[\"annotations\"]):\n im = d[\"image_id\"]\n sent = d[\"caption\"]\n paraphrases[im].append(sent)\n\n sentence_sets = [paraphrases[im] for im in paraphrases]\n\n return sentence_sets", "def get_test_file(test_file, gmms):\n rate, sig = wavfile.read(test_file)\n mfcc_feat = mfcc(sig, rate)\n pred = {}\n for model in gmms:\n pred[model] = gmms[model].score(mfcc_feat)\n return get_nbest(pred, 2), pred", "def main():\n\n classes = {\n \"rain\":0,\n \"rooster\":1,\n \"crying_baby\":2,\n \"sea_waves\":3,\n \"clock_tick\":4,\n \"sneezing\":5,\n \"dog\":6,\n \"crackling_fire\":7,\n \"helicopter\":8,\n \"chainsaw\":9,\n }\n\n with open(\"../data/audio/ESC-50-master/meta/esc50.csv\") as f:\n lines = [i[:-1] for i in f.readlines()]\n lines = lines[1:]\n\n os.system(\"rm -rf ../data/audio/ESC-10\")\n os.system(\"mkdir ../data/audio/ESC-10\")\n os.system(\"mkdir ../data/audio/ESC-10/audio\")\n\n meta = []\n for line in lines:\n t = line.split(\",\")\n if (t[-3] == 'True'):\n meta.append(\"../data/audio/ESC-10/audio/%s %d\" % (t[0],classes[t[3]]))\n src = \"../data/audio/ESC-50-master/audio/\"+t[0]\n dst = \"../data/audio/ESC-10/audio/\"+t[0]\n shutil.copy(src,dst)\n\n with open(\"../data/audio/ESC-10/filelist.txt\",\"w\") as f:\n for m in meta:\n f.write(m+\"\\n\")", "def wav2mfccDataAugmnetation(file_path):\r\n #Load .wav to array\r\n augmentArray =[]\r\n wave, _ = librosa.load(file_path, mono=Constants.channelMap[Tunable.tunableDict['channels']], sr=Tunable.tunableDict['samplingRate'])\r\n for i in range(Tunable.tunableDict['pitchShiftLower'], Tunable.tunableDict['pitchShiftUpper']):\r\n wave = librosa.effects.pitch_shift(wave, sr=Tunable.tunableDict['samplingRate'], n_steps=i)\r\n wave = np.asfortranarray(wave)\r\n\r\n #Convert to Mel-Frequency Cepstral Coefficients\r\n mfcc = librosa.feature.mfcc(wave, sr=Tunable.tunableDict['samplingRate'], n_mfcc=Tunable.tunableDict['buckets'])\r\n\r\n # If maximum length exceeds mfcc lengths then pad the remaining ones\r\n if Tunable.tunableDict['maxLen'] > mfcc.shape[1]:\r\n pad_width = Tunable.tunableDict['maxLen'] - mfcc.shape[1]\r\n mfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='minimum')\r\n\r\n # Else cutoff the remaining parts\r\n else:\r\n mfcc = mfcc[:, :Tunable.tunableDict['maxLen']]\r\n augmentArray.append(mfcc)\r\n\r\n return augmentArray", "def wav2mfcc(file_path):\r\n #Load .wav to array\r\n wave, _ = librosa.load(file_path, mono=Constants.channelMap[Tunable.tunableDict['channels']], sr=Tunable.tunableDict['samplingRate'])\r\n wave = np.asfortranarray(wave)\r\n\r\n #Convert to Mel-Frequency Cepstral Coefficients\r\n mfcc = librosa.feature.mfcc(wave, sr=Tunable.tunableDict['samplingRate'], n_mfcc=Tunable.tunableDict['buckets'])\r\n\r\n # If maximum length exceeds mfcc lengths then pad the remaining ones\r\n if Tunable.tunableDict['maxLen'] > mfcc.shape[1]:\r\n pad_width = Tunable.tunableDict['maxLen'] - mfcc.shape[1]\r\n mfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='minimum')\r\n\r\n # Else cutoff the remaining parts\r\n else:\r\n mfcc = mfcc[:, :Tunable.tunableDict['maxLen']]\r\n\r\n return [mfcc]", "def mfcc(path, windowsize, overlap, M):\n srate, data = scipy.io.wavfile.read(path)\n\n bank = filterbank(0, srate/2, M, srate, windowsize)\n buckets = bucketize(data/32768.0, windowsize, overlap)\n energies = buckets.dot(bank.transpose())\n\n return scipy.fftpack.dct(numpy.log10(energies))", "def load_mfccs(fnames):\n if isinstance(fnames, str):\n return load_mfccs_file(fnames)\n mfccs = []\n dists = []\n cats = []\n for fname in fnames:\n m, d, c = load_mfccs_file(fname)\n mfccs.extend(m)\n dists.append(d)\n cats.append(c)\n dists = np.concatenate(dists)\n cats = np.concatenate(cats)\n return mfccs, dists, cats", "def get_audio_data(filename):\n\n audio_file = eyed3.load(filename)\n artist = audio_file.tag.artist\n title = audio_file.tag.title\n time = audio_file.info.time_secs\n album = audio_file.tag.album\n genre = re.sub('^\\(.*\\)', '', str(audio_file.tag._getGenre().name).lower().replace('|', ',').replace('/', ','))\n\n try:\n year = audio_file.tag.getBestDate().year\n except:\n year = None\n\n comments = []\n for i in audio_file.tag.comments:\n comment = correct_playlist_names(i.text.lower().strip())\n comments += comment.replace('|', ',').replace('/', ',').strip('|').split(',')\n\n return {\n 'artist' : artist,\n 'title' : title,\n 'album' : album,\n 'time' : time,\n 'comments' : filter(None, comments),\n 'genre' : genre.split(','),\n 'year' : year\n }", "def preprocess_data(num_mfcc_coeffs, num_filters, window_len, window_step, max_num_frames):\n inputs = [] \n labels = [] \n \n SOURCE_DIR = '../data/cmu_arctic/scottish-english-male-awb/wav/' \n TARGET_DIR = '../data/cmu_arctic/us-english-male-bdl/wav/'\n index = 0\n for source_fname, target_fname in zip(os.listdir(SOURCE_DIR), os.listdir(TARGET_DIR)):\n if index >= 20:\n break\n index += 1\n\n if source_fname == '.DS_Store' or target_fname == '.DS_Store':\n continue\n\n (source_sample_rate, source_wav_data) = wav.read(SOURCE_DIR + source_fname) \n (target_sample_rate, target_wav_data) = wav.read(TARGET_DIR + target_fname)\n\n source_mfcc_features = np.array(mfcc(source_wav_data, samplerate=source_sample_rate, numcep=num_mfcc_coeffs, nfilt=num_filters, winlen=window_len, winstep=window_step))\n target_mfcc_features = np.array(mfcc(target_wav_data, samplerate=target_sample_rate, numcep=num_mfcc_coeffs, nfilt=num_filters, winlen=window_len, winstep=window_step))\n\n # align with FastDTW\n source_mfcc_features, target_mfcc_features = get_dtw_series(source_mfcc_features, target_mfcc_features)\n\n # pad MFCC feature matrices (rows) to max_num_frames\n source_padded_frames = pad_sequence(source_mfcc_features, max_num_frames)\n target_padded_frames = pad_sequence(target_mfcc_features, max_num_frames)\n\n inputs.append(source_padded_frames) \n labels.append(target_padded_frames) \n\n return inputs, labels", "def create_weka_mfcc_13():\n global ARGS\n\n ## ten thu muc can trich chon vector dac trung (RLS, LMS, NLMS, Kalman, Non)\n name = '';\n fout = open('weka/MFCC78_TUNNING_{}_dataset.arff'.format(name), 'w')\n fout.write('@RELATION {}_dataset\\n\\n'.format(name))\n\n fout.write('@ATTRIBUTE MEAN_MFCC1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC13\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD13\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD13\tREAL\\n')\n fout.write('@ATTRIBUTE class \t{'+ARGS.labels+'}\\n\\n')\n \n fout.write('@DATA\\n')\n\n ## cua so\n windowing = Windowing(type='hamming',\n size=1104,\n zeroPhase=False)\n \n ## quang pho\n spectrum = Spectrum(size=1104)\n\n ##khoi tao MFCC\n mfcc = MFCC(highFrequencyBound=4000, ## gioi han tren cua tan so\n inputSize=201, \t\t\t ## kich thuoc pho dau vao\n lowFrequencyBound=0,\t ## gioi han duoi cua tan so\n numberBands=40,\t\t\t ## so luong cac dai Mels trong bo loc\n numberCoefficients=13, ## so luong dau ra cac he so Mel\n sampleRate=16000)\t\t ## tan so lay mau\n\n for label in ARGS.labels.split(','): ## duyet cac thu muc giong voi ten nhan\n\n ## dia chi thu muc\n dir = os.path.join(ARGS.dir, label)\n\n logging.info('Access folder <{}>'.format(dir))\n\n for file in sorted(os.listdir(dir)):\n\n \t## duyet cac file .wav\n if file.endswith('.wav'):\n logging.info('Process <{}>'.format(file))\n path = os.path.join(dir, file)\n \n ## doc file am thanh\n loader = MonoLoader(filename=path, sampleRate=ARGS.sampleRate)\n audio = loader()\n cnt = 0\n\n for window in FrameGenerator(audio, \n frameSize=ARGS.window_length*ARGS.sampleRate/1000, \n hopSize=ARGS.window_stride*ARGS.sampleRate/1000, \n startFromZero=True):\n mfccs = []\n for frame in FrameGenerator(window, \n frameSize=ARGS.frame_length*ARGS.sampleRate/1000, \n hopSize=ARGS.frame_stride*ARGS.sampleRate/1000, \n startFromZero=True):\n s = spectrum(windowing(frame))\n\n _, m = mfcc(s)\n\n m_delta = librosa.feature.delta(m, order=1) ## dao ham bac 1\n m_delta_delta = librosa.feature.delta(m, order=2) ## dao ham bac 2\n\n m_all = np.concatenate((m, m_delta, m_delta_delta), axis=0) ## them vao chuoi\n mfccs.append(m_all)\n mfccs = np.array(mfccs)\n mfccs_mean = np.mean(mfccs, axis=0)\n mfccs_std = np.std(mfccs, axis=0)\n feat = np.concatenate((mfccs_mean, mfccs_std), axis=0).tolist()\n str_feat = [str(x) for x in feat]\n line = ','.join(str_feat)+','+label\n fout.write(line+'\\n')\n cnt = cnt+1\n logging.info('{} samples'.format(cnt))", "def testing():\n\n # lists which contains paths of keyword and non-keyword utterances\n non_kw_clips, kw_clips = generate_clips_kwds()\n\n non_kw_sent_dict, kw_sent_dict = {}, {}\n templates_dict = {}\n\n # calculate and store MFCC features in a dictionary\n for kw in listdir(kw_path):\n templates_dict[kw] = proc_one(kw_path + kw)\n\n for sent in non_kw_clips:\n filename = sent[:-3] + 'wav'\n non_kw_sent_dict[filename] = proc_one(filename)\n\n for word, paths in kw_clips.items():\n for path in paths:\n filename = path[:-3] + 'wav'\n kw_sent_dict[filename] = (proc_one(filename), word)\n\n final_results = {}\n\n # non-keyword comparisons\n for i, (non_kw_utterance, clip_feat) in enumerate(non_kw_sent_dict.items()):\n\n print(i, '/', len(non_kw_sent_dict))\n\n final_results[non_kw_utterance] = {}\n\n for keyword, kw_feat in templates_dict.items():\n print(\"Comparing keyword and non-kw sentence:\", keyword, non_kw_utterance)\n\n lmd = compare_all(clip_feat, kw_feat)\n final_results[non_kw_utterance][keyword] = (lmd, 0)\n\n with open(results_json, 'w') as f:\n json.dump(final_results, f)\n\n # keyword comparisons\n for i, (kw_utterance, (clip_feat, word)) in enumerate(kw_sent_dict.items()):\n\n print(i, '/', len(kw_sent_dict))\n final_results[kw_utterance] = {}\n\n for keyword, kw_feat in templates_dict.items():\n\n print(\"Comparing keyword and kw sentence:\", keyword, kw_utterance)\n\n lmd = compare_all(clip_feat, kw_feat)\n\n if keyword.split('_')[0] == word:\n final_results[kw_utterance][keyword] = (lmd, 1)\n else:\n final_results[kw_utterance][keyword] = (lmd, 0)\n\n with open(results_json, 'w') as f:\n json.dump(final_results, f)", "def process_sound_file(file_path):\n\n return to_mfcc(get_wav(file_path))", "def convert_all_data(mfccPath, fbankPath, labeldict, datadir):\n\n inputmfcc, inputnamemfcc = ark_parser(mfccPath, 'train.ark')\n inputfbank, inputnamefbank = ark_parser(fbankPath, 'train.ark')\n\n label = []\n inputlist = []\n assert len(inputnamemfcc) == len(labeldict.keys()) and len(inputnamefbank) == len(labeldict.keys())\n\n for fb, mfcc in zip(inputfbank, inputmfcc):\n fb = pp.normalize_mfcc(fb)\n mfcc = pp.normalize_mfcc(mfcc)\n inputlist.append(np.concatenate((fb, mfcc), axis=1))\n\n for name in inputnamemfcc:\n label.append(labeldict[name])\n\n with open('./train_data.pkl', 'wb') as train_data:\n pickle.dump(inputlist, train_data)\n\n convert_label_to_int(datadir, datadir + '48phone_char.map', label)", "def features_and_labels(soundfile, frag_length=128):\n label = soundfile.split('\\\\')[-1].split('_')[0]\n waveform, sample_rate = torchaudio.load(soundfile)\n MFCCs = transforms.MFCC(n_mfcc=128, melkwargs={'n_mels':128, 'win_length':320, 'hop_length':160, 'n_fft':1024 })(waveform[0][:])\n MFCCs = MFCCs.T.view((-1, frag_length, 128)) # transform the shape into (index, time_representation, melbands)\n\n frag_nums = MFCCs.shape[0]\n labels = int(label)*np.ones(frag_nums, dtype=np.int8)\n labels = torch.from_numpy(labels)\n\n return MFCCs, labels", "def output_wave_file(predicted_mfccs, filename):\n global eng\n predicted_mfccs_transposed = np.transpose(predicted_mfccs)\n\n\n # MFCC features need to be a numpy array of shape (num_coefficients x num_frames) in order to be passed to the invmelfcc function\n inverted_wav_data = eng.invmelfcc(matlab.double(predicted_mfccs_transposed.tolist()), 16000.0, 25, 100.0, 0.005, 0.005)\n\n inverted_wav_data = np.squeeze(np.array(inverted_wav_data))\n\n # scales the waveform to be between -1 and 1\n maxVec = np.max(inverted_wav_data)\n minVec = np.min(inverted_wav_data)\n inverted_wav_data = ((inverted_wav_data - minVec) / (maxVec - minVec) - 0.5) * 2\n\n wav.write(filename + '.wav', 16000.0, inverted_wav_data)", "def calc_mfccs(audio_data, samplerate, n_mfcc=13, n_fft=400, hop_length=160):\n mfcc = librosa.feature.mfcc(audio_data, sr=samplerate, n_mfcc=n_mfcc, n_fft=n_fft, hop_length=hop_length)\n\n # add derivatives and normalize\n mfcc_delta = librosa.feature.delta(mfcc)\n mfcc_delta2 = librosa.feature.delta(mfcc, order=2)\n mfcc = np.concatenate((normalize(mfcc),\n normalize(mfcc_delta),\n normalize(mfcc_delta2)), axis=0)\n\n return mfcc.T", "def get_movie_data(files: list) -> list:\n pass", "def convert_all_test_data(mfccPath, fbankPath, datadir):\n inputmfcc, inputnamemfcc = ark_parser(mfccPath, 'test.ark')\n inputfbank, inputnamefbank = ark_parser(fbankPath, 'test.ark')\n\n label = []\n inputlist = []\n assert len(inputnamemfcc) == len(inputnamefbank)\n\n for fb, mfcc in zip(inputfbank, inputmfcc):\n fb = pp.normalize_mfcc(fb)\n mfcc = pp.normalize_mfcc(mfcc)\n inputlist.append(np.concatenate((fb, mfcc), axis=1))\n\n with open('./test_data.pkl', 'wb') as test_data:\n pickle.dump(inputlist, test_data)\n\n with open('./test_name.pkl', 'wb') as test_name:\n pickle.dump(inputnamefbank, test_name)", "def create_song_feats(path):\n features = read_process_song(path, debug=True)\n df = pd.DataFrame(features)\n df.to_csv('./Features/single_song_features/song_features.csv', index=False)", "def loadSHSMFCCs(IDs):\n IDDict = getSHSIDDict()\n fin = open(\"SHSDataset/MFCC/bt_aligned_mfccs_shs.txt\")\n mfccs = {}\n count = 0\n while True:\n ID = fin.readline().rstrip()\n if not ID:\n break\n ID = IDDict[ID]\n if count%1000 == 0:\n print(\"Loaded mfccs for %i songs...\"%count)\n if not ID in IDs:\n fin.readline()\n count += 1\n continue\n x = fin.readline().rstrip()\n x = x.split(\",\")\n if len(x[-1]) == 0:\n x = x[0:-1]\n x = np.array([float(a) for a in x])\n x = np.reshape(x, (len(x)/12, 12))\n mfccs[ID] = x\n count += 1\n fin.close()\n return mfccs", "def convert_testing_data(mfccPath):\n inputlist, inputnamelist = ark_parser(mfccPath, 'test.ark')\n\n print(\"%d sample in testing set\" % len(inputlist))\n with open('./test_data.pkl', 'wb') as test_data:\n pickle.dump(inputlist, test_data)\n \n with open('./test_name.pkl', 'wb') as test_name:\n pickle.dump(inputnamelist, test_name)", "def merge():\n result = []\n for f in glob.glob(f\"{DATA_DIR}/COP*.json\"):\n with open(f, \"r\") as infile:\n result.append(json.load(infile))\n\n with open(f\"{DATA_DIR}/corpus.json\", \"w\", encoding=\"utf-8\") as outfile:\n json.dump(result, outfile)", "def get_MSMC_results(filename, name):\n lines = []\n with open(filename, \"r\") as f:\n for line in f:\n lines.append(line)\n time = [v.split('\\t')[1] for v in lines]\n time = [float(v) for v in time[1:]]\n IICR_k = [v.split('\\t')[3] for v in lines]\n IICR_k = [float(v) for v in IICR_k[1:]]\n\n return {'name': name, 'model':'msmc', 'x_vector' : time, 'y_vector': IICR_k}", "def filter_corpus():\n result = []\n for f in glob.glob(f\"{DATA_DIR}/COP*.json\"):\n with open(f, \"r\") as infile:\n data = json.load(infile)\n articles = flatten(data)\n result.append(articles)\n\n with open(f\"{DATA_DIR}/filtered_data.json\", \"w\", encoding='utf-8') as f:\n json.dump(result, f, indent=2)", "def main(directory, results_path):\n\n dicom_paths = []\n for root, _, files in os.walk(directory):\n dicom_paths.extend([os.path.abspath(os.path.join(root, f)) for f in files if f.endswith('.dcm')])\n \n metadata = p_umap(get_dicom_metadata_and_slice_counts, dicom_paths)\n\n with open(results_path, 'w') as results_file:\n json.dump(metadata, results_file, indent=4, sort_keys=True)", "def save_all_chunks_with_labels(audio_dir, json_dir, csv_dir):\n for file in os.listdir(json_dir):\n file_path = os.path.join(json_dir, file)\n audio_file_path = os.path.join(audio_dir, file)[:-4] + \"wav\"\n with open(file_path) as f:\n data = json.load(f)\n save_arrays_with_labels(audio_file_path, data, csv_dir)", "def Prediction(self):\n # converts first 19 chunks of audio bytes into 16 bit int values\n in_data = np.fromstring(np.array(self.frames[:19]), 'Int16')\n\n # extract MFCCs from the 19 chunks of audio\n audio_sig = np.array([mfcc(in_data, self.rate, self.window,\n self.stride, self.mfcc, self.filter_banks,\n self.fft_num, 0, None, True)])\n\n # makes predictions\n prediction = self.ww_model.model.predict(audio_sig)\n\n if(self.print_pred):\n print(prediction)\n\n return prediction", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def calculate_mixture_features(args):\n workspace = args.workspace\n speech_dir = args.speech_dir\n noise_dir = args.noise_dir\n data_type = args.data_type\n fs = cfg.sample_rate\n dir_name = args.dir_name\n\n fid_clean = open(speech_dir, 'r')\n lines_clean = fid_clean.readlines()\n fid_clean.close()\n\n fid_reverb = open(noise_dir, 'r')\n lines_reverb = fid_reverb.readlines()\n fid_reverb.close()\n\n for files_clean, files_reverb in zip(lines_clean, lines_reverb):\n\n files_clean = files_clean.strip('\\n')\n files_reverb = files_reverb.strip('\\n')\n\n fid = open(files_clean,'r')\n wavLines_clean = fid.readlines()\n fid.close()\n fid = open(files_reverb,'r')\n wavLines_reverb = fid.readlines()\n fid.close()\n\n cnt = 0 \n\n for wavs_clean, wavs_reverb in zip(wavLines_clean, wavLines_reverb):\n \n t1 = time.time()\n # cnt = 0\n\n wav_name_clean, wav_path_clean = wavs_clean.split()\n wav_name_reverb, wav_path_reverb = wavs_reverb.split()\n \n # Read clean speech audio. \n (speech_audio, _) = read_audio(wav_path_clean, target_fs=fs)\n \n # Read reverb speech audio. \n (noise_audio, _) = read_audio(wav_path_reverb, target_fs=fs)\n \n # Cut reverb speech to the same length as clean speech. \n if len(noise_audio) > len(speech_audio):\n noise_audio = noise_audio[0: len(speech_audio)]\n \n # Extract spectrogram. \n mixed_complx_x = calc_sp(noise_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, dir_name, \"%s.p\" % wav_name_reverb)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, wav_name_reverb]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n # print(mixed_complx_x)\n # print(speech_x)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))", "def create_json(wav_lst, json_file, clean_folder, txt_folder, lexicon):\r\n logger.debug(f\"Creating json lists in {json_file}\")\r\n\r\n # Processing all the wav files in the list\r\n json_dict = {}\r\n for wav_file in wav_lst: # ex:p203_122.wav\r\n\r\n # Example wav_file: p232_001.wav\r\n noisy_path, filename = os.path.split(wav_file)\r\n _, noisy_dir = os.path.split(noisy_path)\r\n _, clean_dir = os.path.split(clean_folder)\r\n noisy_rel_path = os.path.join(\"{data_root}\", noisy_dir, filename)\r\n clean_rel_path = os.path.join(\"{data_root}\", clean_dir, filename)\r\n\r\n # Reading the signal (to retrieve duration in seconds)\r\n signal = read_audio(wav_file)\r\n duration = signal.shape[0] / SAMPLERATE\r\n\r\n # Read text\r\n snt_id = filename.replace(\".wav\", \"\")\r\n with open(os.path.join(txt_folder, snt_id + \".txt\")) as f:\r\n word_string = f.read()\r\n word_string = remove_punctuation(word_string).strip().upper()\r\n phones = [\r\n phn for word in word_string.split() for phn in lexicon[word].split()\r\n ]\r\n\r\n # Remove duplicate phones\r\n phones = [i for i, j in zip(phones, phones[1:] + [None]) if i != j]\r\n phone_string = \" \".join(phones)\r\n\r\n json_dict[snt_id] = {\r\n \"noisy_wav\": noisy_rel_path,\r\n \"clean_wav\": clean_rel_path,\r\n \"length\": duration,\r\n \"words\": word_string,\r\n \"phones\": phone_string,\r\n }\r\n\r\n # Writing the json lines\r\n with open(json_file, mode=\"w\") as json_f:\r\n json.dump(json_dict, json_f, indent=2)\r\n\r\n logger.info(f\"{json_file} successfully created!\")", "def year2ccmc_datafile(tofilename):\n return str(tofilename[0]) + '_' + tofilename[1] + '_pointdata.txt'", "def create_melspectrogram_dataset(label_folder='electronic_music/Trance_label/Train/', save_folder='song_mel_label_data',\n sr=44100, n_mels=128, n_fft=2048, hop_length=512, song_duration=180.0,\n create_data=False):\n if create_data:\n # get list of all labels\n os.makedirs(save_folder, exist_ok=True)\n labels = [path for path in os.listdir(label_folder) if os.path.isdir(label_folder + path)]\n\n # iterate through all lables, songs and find mel spectrogram\n for label in labels:\n print('{} \\n'.format(label))\n label_path = os.path.join(label_folder, label)\n label_songs = os.listdir(label_path)\n\n for song in label_songs:\n print(song)\n song_path = os.path.join(label_path, song)\n\n # Create mel spectrogram for song_duration in the middle of the song and convert it to the log scale\n audio = MP3(song_path)\n audio_lenght = int(audio.info.length)\n audio_middle = (audio_lenght - int(song_duration))/2\n y, sr = librosa.load(song_path, sr=sr, offset=audio_middle, duration=song_duration)\n S = librosa.feature.melspectrogram(y, sr=sr, n_mels=n_mels, n_fft=n_fft, hop_length=hop_length)\n log_S = librosa.logamplitude(S, ref_power=1.0)\n data = (label, log_S, song)\n\n # Save each song\n save_name = label + '_%%-%%_' + song\n with open(os.path.join(save_folder, save_name), 'wb') as fp:\n dill.dump(data, fp)", "def canned_ims2_response():\n return file_utils.response_file_to_json(test_data_dir + '/waveform_41177893.1')", "def mfcc(wav_path, delta = 2):\n y, sr = librosa.load(wav_path)\n # MEL frequency cepstrum coefficient\n mfcc_feat = librosa.feature.mfcc(y = y, sr = sr, n_mfcc = 13)\n ans = [mfcc_feat]\n # Calculate the 1st derivative\n if delta >= 1:\n mfcc_delta1 = librosa.feature.delta(mfcc_feat, order = 1, mode ='nearest')\n ans.append(mfcc_delta1)\n # Calculate the 2nd derivative\n if delta >= 2:\n mfcc_delta2 = librosa.feature.delta(mfcc_feat, order = 2, mode ='nearest')\n ans.append(mfcc_delta2)\n return np.transpose(np.concatenate(ans, axis = 0),[1,0])", "def extract_mfcc(signal, signal_sr=SR, n_fft=FRAME_LEN, hop_length=HOP, n_mfcc=MFCC_dim):\n # compute the mfcc of the input signal\n mfcc = librosa.feature.mfcc(\n y=signal, sr=signal_sr, n_fft=n_fft, hop_length=hop_length, n_mfcc=n_mfcc, dct_type=3\n )\n\n # extract the first and second order deltas from the retrieved mfcc's\n mfcc_delta = librosa.feature.delta(mfcc, order=1, mode='nearest')\n mfcc_delta2 = librosa.feature.delta(mfcc, order=2, mode='nearest')\n\n # create the mfcc array\n mfccs = []\n\n # populate it using the extracted features\n for i in range(n_mfcc):\n mfccs.extend(sta_fun(mfcc[i, :])) \n for i in range(n_mfcc):\n mfccs.extend(sta_fun(mfcc_delta[i, :]))\n for i in range(n_mfcc):\n mfccs.extend(sta_fun(mfcc_delta2[i, :]))\n\n # finally return the coefficients\n return mfccs", "def map_audio(self): \n for root, dirs, files in os.walk(self.dir):\n for name in files:\n if (name.split(\".\")[-1].lower() == 'm4a' or \\\n name.split(\".\")[-1].lower() == 'mp3'):\n \n cur_path = \"{0}/{1}\".format(root, name)\n cur_file = auto.File(cur_path)\n \n artist = cur_file.artist.lower().strip()\n album = cur_file.album.lower().strip()\n title = cur_file.title.lower().strip()\n bitrate = cur_file.bitrate\n \n if not artist in self.audio_dict:\n self.audio_dict[artist] = {}\n \n if not album in self.audio_dict[artist]:\n self.audio_dict[artist][album] = {}\n \n title_key = title\n for in_album_title in self.audio_dict[artist][album]:\n if sm(None, title, in_album_title).ratio() > 0.9:\n title_key = in_album_title\n \n if not title_key in \\\n self.audio_dict[artist][album]:\n self.audio_dict[artist][album][title_key] = []\n \n self.audio_dict[artist][album][title_key].append({\n 'path': cur_path,\n 'bitrate': bitrate,\n 'file_name': name\n })\n \n return self", "def get_artist_audio_features(q, interactive = False, genre_delimiter = '-!!-', to_file = '', client = None):\n query = client.search(q = q, type = \"artist\")\n items = query['artists']['items']\n\n if not items:\n raise Exception(\"No artists found\")\n\n if interactive:\n print(\"Select the artist to use...\")\n print(\"\\n\".join(\"[{}]: {}\".format(ii, entry['name']) for ii, entry in enumerate(items)))\n artist_indx = int(input(\"artist number: \").strip())\n if artist_indx > len(items):\n raise IndexError(\"Selected number higher than options available\")\n artist = items[artist_indx]\n else:\n artist = items[0]\n\n # get artist genres\n artist_genres = genre_delimiter.join(artist['genres']) if genre_delimiter else None\n\n # get artist albums\n albums = get_artist_albums(artist['id'])\n albums['artist_genres'] = artist_genres\n\n # get album popularity\n album_popularity = get_album_popularity(albums.id)\n\n # get album tracks\n tracks = get_album_tracks(albums.id)\n\n # get track audio features\n features = get_track_features(tracks.id)\n\n # get track popularity\n popularity = get_track_popularity(tracks.id)\n\n album_data = albums.merge(album_popularity, 'left', 'id')\n\n track_data = tracks \\\n .drop(columns = ['type']) \\\n .merge(popularity, 'left', 'id') \\\n .merge(features.drop(columns = ['uri', 'type', 'duration_ms']), 'left', 'id')\n\n\n merged = prefix_merge(album_data, track_data, ['album_', 'track_'], how = 'left', on = 'album_id')\n\n if to_file:\n merged.to_csv(to_file)\n\n return merged", "def compute_chunk_features(mp3_file):\n # Extract MP3 file to a mono, 10kHz WAV file\n sox_command = \"/usr/local/bin/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def load_MF_outputs(file_path):\r\n # Transform the .csv database into panda type\r\n excel = pd.ExcelFile(file_path)\r\n\r\n # Collect data from a particular tab\r\n line = excel.parse('line', header=0, index_col=0)\r\n foundation = excel.parse('foundation', header=0, index_col=0)\r\n\r\n # Splits the different dataset through different dict keys()\r\n MF_outputs = {'line': line,\r\n 'foundation': foundation,\r\n }\r\n\r\n return MF_outputs", "def proc_one(filename):\n (rate, sig) = wav.read(filename)\n assert rate == samp_rate\n # since templates have max value of 32768, normalise it\n if sig.max() > 1:\n sig = sig / 32768\n # Normalise so that max-value is 1\n sig = sig / max(sig)\n\n # calculate MFCC\n feat = mfcc(sig, samplerate=samp_rate, winlen=win_length / 1000, winstep=hop / 1000, preemph=0.95, numcep=14,\n winfunc=np.hamming)\n # print(sig.shape, feat.shape)\n return feat", "def preprocess(self, file, num_mfcc=13, n_fft=2048, hop_length=512):\r\n\r\n # load and resample audio file\r\n signal, sample_rate = librosa.load(file, sr=DATASET_SAMPLE_RATE)\r\n\r\n # check length of signal\r\n if len(signal) >= COMMAND_LENGTH:\r\n # truncate signal to COMMAND_LENGTH\r\n signal = signal[:COMMAND_LENGTH]\r\n\r\n elif len(signal) < COMMAND_LENGTH:\r\n # zero pad signal to COMMAND_LENGTH\r\n padding = np.zeros(COMMAND_LENGTH - len(signal))\r\n signal = np.append(signal, padding)\r\n\r\n # extract MFCCs\r\n MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc=num_mfcc, n_fft=n_fft, hop_length=hop_length)\r\n\r\n # input data for the model should be 4 dimensional array: (# samples, # time steps, # coefficients, 1)\r\n MFCCs = MFCCs[np.newaxis, ..., np.newaxis]\r\n\r\n return MFCCs", "def extract_feats_to_file(npy_path, audio_path, featurizer):\n # Returns a (time, feature) NumPy array\n data = featurizer.file_to_feats(audio_path)\n np.save(npy_path, data)", "def get_data(input_path):\n all_imgs = []\n classes_count = {}\n class_mapping = {}\n\n # parsing Flag\n visualise = False\n\n # MSCOCO directory\n data_path = input_path\n\n print('Parsing annotation files')\n annot_path = os.path.join(data_path, 'annotations_bbox')\n imgs_path = os.path.join(data_path, 'images')\n\n # images directory (train, val, trainval, test)\n imgsets_path_trainval = os.path.join(data_path, 'images', 'trainval.txt')\n imgsets_path_train = os.path.join(data_path, 'images', 'train.txt')\n imgsets_path_val = os.path.join(data_path, 'images', 'val.txt')\n imgsets_path_test = os.path.join(data_path, 'images', 'test.txt')\n\n trainval_files = []\n train_files = []\n val_files = []\n test_files = []\n\n with open(imgsets_path_trainval) as f:\n for line in f:\n trainval_files.append(line.strip())\n\n with open(imgsets_path_train) as f:\n for line in f:\n train_files.append(line.strip())\n\n with open(imgsets_path_val) as f:\n for line in f:\n val_files.append(line.strip())\n\n # test-set (default) not included in MSCOCO\n if os.path.isfile(imgsets_path_test):\n with open(imgsets_path_test) as f:\n for line in f:\n test_files.append(line.strip())\n\n # annotation read\n annots_train = json.load(open(os.path.join(annot_path, 'bbox_train2017.json'), 'r'))\n annots_val = json.load(open(os.path.join(annot_path, 'bbox_val2017.json'), 'r'))\n annots = dict()\n annots['train'] = annots_train\n annots['val'] = annots_val\n\n for part in ['train', 'val']:\n annots_keys = tqdm(annots[part].keys())\n for img_name in annots_keys:\n annots_keys.set_description(\"Processing %s\" % img_name)\n for bbox in annots[part][img_name]:\n class_name = bbox['label'].replace(' ', '')\n all_imgs.append({\n \"filepath\": os.path.join(data_path, 'images', '%s2017' % part, \"%s.jpg\" % img_name),\n \"width\": None,\n \"height\": None,\n \"bboxes\": [{\n \"class\": class_name,\n \"x1\": bbox['bbox']['x1'],\n \"y1\": bbox['bbox']['x2'],\n \"x2\": bbox['bbox']['y1'],\n \"y2\": bbox['bbox']['y2'],\n \"difficult\": False\n }],\n \"image_id\": img_name,\n \"imageset\": part\n })\n if class_name not in classes_count:\n classes_count[class_name] = 1\n else:\n classes_count[class_name] += 1\n if class_name not in class_mapping:\n class_mapping[class_name] = len(class_mapping)\n\n # visualise bounding boxes\n if visualise:\n img = cv2.imread(annotation_data['filepath'])\n for bbox in annotation_data['bboxes']:\n cv2.rectangle(img, (bbox['x1'], bbox['y1']), (bbox['x2'], bbox['y2']), (0, 0, 255))\n cv2.imshow('img', img)\n print(annotation_data['imageset'])\n cv2.waitKey(0)\n\n return all_imgs, classes_count, class_mapping", "def data_music_parcelled():\n pass", "def generateFeatureData(directory, outFileName='tmp/features.txt', isClassifying=False):\n\n audioList = getAudioFiles(directory)\n\n outFile = open(outFileName, \"w\")\n\n for audio in audioList:\n features = audio.getFeatures()\n \n if isClassifying: # We are classifying, we don't know type\n audioType = '0'\n else: # We are generating training data. Try to predict using file name\n audioType = '1' if audio.predictType() == 'Music' else '-1'\n \n outFile.write(audioType + ' ' + features + ' # ' + audio.name + '\\n')\n\n outFile.close()\n\n return audioList", "def ExtractFeatures(self):\n\n self.MFCC = librosa.feature.mfcc(self.sample, sr=self.sample_rate, n_mfcc=13)\n self.MFCC_DELTA = librosa.feature.delta(self.MFCC)\n self.MEL_SPECTROGRAM = librosa.feature.melspectrogram(self.sample, sr=self.sample_rate)\n f, t, SPECTRO = signal.spectrogram(self.sample)\n self.SPECTRO\n self.LPC = np.array(audiolazy.lazy_lpc.lpc.autocor(self.sample, 2).numerator)\n self.FFT = np.fft.fft(self.sample)\n widths = np.arange(1, 31)\n self.CWT = signal.cwt(self.sample, signal.ricker, widths)", "def filterAll(media_list_file, in_movie_dir, out_movie_dir):\n with open(media_list_file) as f:\n names = json.load(f)\n\n count_tweets = []\n count_kept_tweets = []\n for name in names:\n with open(\"{}/{}.json\".format(in_movie_dir, name)) as f:\n tweets = json.load(f)\n kept_tweets = [t for t in tweets.values() if keep(t)]\n print(\"total = {}\\tkeep = {}\\tName = {}\".format(\n len(tweets), len(kept_tweets), name))\n count_tweets.append(len(tweets))\n count_kept_tweets.append(len(kept_tweets))\n with open(\"{}/{}.json\".format(out_movie_dir, name), \"w\") as f:\n json.dump(kept_tweets, f)\n print(\"Mean tweets = {}\".format(np.mean(count_tweets)))\n print(\"Mean kept tweets = {}\".format(np.mean(count_kept_tweets)))\n print(\"Mean fraction kept tweets = {}\".format(\n np.mean(count_kept_tweets) / np.mean(count_tweets)))", "def load_data_from_json(json_path):\r\n print(\"\\nLoading data from json file\")\r\n with open(json_path, \"r\") as fp:\r\n data = json.load(fp)\r\n \r\n X = np.array(data[\"MFCCs\"])\r\n y = np.array(data[\"labels\"])\r\n print(\"Training sets loaded!\")\r\n print(\"data size :\", X.shape, \"labels size: \", y.shape)\r\n print(\"release the 'data' for memories\")\r\n del data\r\n\r\n return X, y", "def create_mp3():\n\n #TODO: les roles ne devraient pas etre en dur\n list_all_roles = [\n [],\n [\"morgan\"],\n [\"oberon\"],\n [\"mordred\"],\n [\"morgan\", \"oberon\"],\n [\"morgan\", \"mordred\"],\n [\"oberon\", \"mordred\"],\n [\"morgan\", \"oberon\", \"mordred\"]\n ]\n\n for list_roles in list_all_roles:\n\n list_mp3 = [\"init.mp3\", \"serv_mord.mp3\"]\n if \"oberon\" in list_roles:\n list_mp3.append(\"oberon.mp3\")\n list_mp3.append(\"red_identi.mp3\")\n\n if \"morgan\" in list_roles:\n list_mp3.append(\"add_per_mor.mp3\")\n\n list_mp3.append(\"serv_mord.mp3\")\n if \"mordred\" in list_roles:\n list_mp3.append(\"mordred.mp3\")\n list_mp3.extend([\"merlin_identi.mp3\", \"end.mp3\"])\n\n mp3_combined = AudioSegment.empty()\n for mp3 in list_mp3:\n mp3_combined += AudioSegment.from_mp3(\"resources/{}\".format(mp3))\n\n mp3_combined.export(\"resources/_{}.mp3\".format('-'.join(sorted(list_roles))), format=\"mp3\")", "def mfcc_features(y, sr, n_mels=128, n_mfcc=13):\n # Analyze only first second\n y = y[0:sr]\n\n # Calculate MFCCs (Mel-Frequency Cepstral Coefficients)\n mel_spectrum = librosa.feature.melspectrogram(y,\n sr=sr,\n n_mels=n_mels)\n log_spectrum = librosa.amplitude_to_db(mel_spectrum,\n ref=np.max)\n mfcc = librosa.feature.mfcc(S=log_spectrum,\n sr=sr,\n n_mfcc=n_mfcc)\n\n if mfcc.shape[-1] < DELTA_WIDTH:\n raise RuntimeError('MFCC vector does not contain enough time steps')\n\n if not mfcc.any():\n return np.zeros(n_mfcc * 3)\n\n # Standardize feature for equal variance\n delta_mfcc = librosa.feature.delta(mfcc, width=DELTA_WIDTH)\n delta2_mfcc = librosa.feature.delta(mfcc, order=2, width=DELTA_WIDTH)\n feature_vector = np.concatenate((\n np.mean(mfcc, 1),\n np.mean(delta_mfcc, 1),\n np.mean(delta2_mfcc, 1)))\n feature_vector = (\n feature_vector - np.mean(feature_vector)\n ) / np.std(feature_vector)\n\n return feature_vector", "def get_json_info_music(self, get_json_id_num):\n signature = genrate_letters()\n remake = remake_url(self.get_json_cursor)\n json_url = remake.remake_music_url(signature, get_json_id_num)\n\n response = get_responses(json_url)\n\n return self.return_response(response, get_json_id_num)", "def mfcc_features(self, audio, rate, numcep = 20, nfft = 2000, N = 2):\n self.mfcc = python_speech_features.mfcc(audio, rate, numcep = numcep, nfft = nfft)\n #self.mfcc = preprocessing.scale(self.mfcc)\n \n self.delta_mfcc = python_speech_features.delta(self.mfcc, N)\n \n self.mfcc_feature = np.hstack((self.mfcc, self.delta_mfcc))\n \n return self.mfcc_feature", "def get_all_jsons():\r\n res = get_all_mps_ids()\r\n for id in res.keys():\r\n get_mp_json_from_file(id)", "def make_all_cosmos_des(run, cosmos_config, des_config, catfile, tileid):\n\n flist = files.get_cosmos_flist(tileid)\n cosmos_meds = files.get_meds_file(run, tileid, 'cosmos','i')\n\n print('making cosmos MEDS:',cosmos_meds)\n maker = CosmosMEDSMaker(\n config_path=cosmos_config,\n catname=catfile,\n flistname=flist,\n )\n maker.write(cosmos_meds)\n\n for band in ['u','g','r','i','z']:\n\n band_flist = files.get_des_flist(band)\n band_meds = files.get_meds_file(run, tileid, 'des',band)\n\n print('making DES MEDS:',band_meds)\n maker = CosmosMEDSMaker(\n config_path=des_config,\n catname=cosmos_meds,\n flistname=band_flist,\n )\n maker.write(band_meds)", "def encodeMP3(self, wavf: str, dstf: str, cover: str, meta: TrackMeta) -> None:\n FNULL = open(os.devnull, 'w')\n subprocess.call(['lame', '-V2', wavf, dstf], stdout=FNULL, stderr=FNULL)\n FNULL.close()\n # tag MP3\n mm = TrackMeta(meta)\n mp3 = MP3(dstf, ID3=ID3)\n mp3[\"TIT2\"] = TIT2(encoding=3, text=mm.title())\n mp3[\"TPE1\"] = TPE1(encoding=3, text=mm.artist())\n mp3[\"TALB\"] = TALB(encoding=3, text=mm.album())\n mp3[\"TPE2\"] = TPE2(encoding=3, text=mm.albumartist())\n if mm.date():\n mp3[\"TDRC\"] = TDRC(encoding=3, text=mm.date())\n mp3[\"TRCK\"] = TRCK(encoding=3,\n text=mm.tracknumber() + \"/\" + mm.tracktotal())\n mp3[\"TPOS\"] = TPOS(encoding=3,\n text=mm.discnumber() + \"/\" + mm.disctotal())\n\n # composer\n if mm.composer():\n mp3[\"TCM\"] = TCM(encoding=3, text=mm.composer())\n\n # cover\n if cover:\n data = open(cover, 'rb').read()\n if cover.endswith('png'):\n mime = 'image/png'\n else:\n mime = 'image/jpeg'\n mp3.tags.add(APIC(encoding=3, mime=mime, type=3, desc=u'Cover', data=data))\n\n # save\n mp3.save()", "def main():\n loader = MicrosoftDataloader()\n train,dev,test = loader.getData()\n sentences = []\n\n # Collect all the training sentences\n for i,row in pd.concat((train,test)).iterrows():\n if isinstance(row[\"sentence1\"], basestring) and isinstance(row[\"sentence2\"], basestring):\n sentences.append(row[\"sentence1\"])\n sentences.append(row[\"sentence2\"])\n\n # Get the mapping between sentences and their cotext vectors\n mapped = get_sentence_to_context_map(sentences)\n\n # At this stage we have a map between every sentence and its context vector\n # However the JSON file must contain sentences in the same order as in the MSR data file\n data = []\n for i,sentence in enumerate(sentences):\n embedding = mapped[sentence]\n data.append({'index':i, 'embedding':embedding, 'text':sentence})\n\n # Write the sentences and embeddings to JSON\n # The array index should corrospond to the sentence #\n print \"Saving embedded sentences to: {0}\".format(EMBED_FILE)\n with open(EMBED_FILE,'w') as outfile:\n json.dump(data,outfile,indent=2)", "def readAudioData(self, shouldProcess):\n if shouldProcess:\n return gatherData(self.playlists) \n else:\n return pd.read_pickle(\"data/audioDF.pkl\")", "def write_coco_json(filepath, dataset_dicts, name_to_id, **kwargs):\n info = {\n \"description\": kwargs.get(\"description\", \"\"),\n \"url\": kwargs.get(\"url\", \"\"),\n \"version\": kwargs.get(\"version\", \"0.0\"),\n \"year\": kwargs.get(\"year\", \"2017\"),\n \"contributor\": kwargs.get(\"contributor\", \"\"),\n \"date_created\": kwargs.get(\"date_created\", \"2017/01/01\"),\n }\n\n licenses = {\n \"url\": \"closed\",\n \"id\": 0,\n \"name\": \"closed\",\n }\n\n images, annotations = [], []\n annotation_id = 1\n for record in dataset_dicts:\n images.append({\n \"id\": record[\"image_id\"],\n \"width\": record[\"width\"],\n \"height\": record[\"height\"],\n \"file_name\": record[\"file_name\"]\n })\n\n for annotation in record[\"annotations\"]:\n x0, y0, x1, y1 = annotation[\"bbox\"]\n annotations.append({\n \"id\": annotation_id,\n \"category_id\": annotation[\"category_id\"],\n \"bbox\": [x0, y0, x1 - x0, y1 - y0],\n \"iscrowd\": annotation[\"iscrowd\"],\n \"image_id\": record[\"image_id\"],\n \"area\": (x1 - x0) * (y1 - y0),\n })\n annotation_id += 1\n\n categories = [{\n \"id\": category_id,\n \"name\": \"{}\".format(category_name),\n \"supercategory\": \"\"\n } for category_name, category_id in name_to_id.items()]\n\n coco_dict = {\n \"info\": info,\n \"licenses\": licenses,\n \"images\": images,\n \"annotations\": annotations,\n \"categories\": categories,\n }\n\n with filepath.open(mode=\"w\") as file_handle:\n json.dump(coco_dict, file_handle)", "def load_data(self):\r\n if not os.path.exists(self.origin_dir):\r\n raise ValueError(f\"Folder {self.origin_dir} not exists!\")\r\n\r\n # loop folders\r\n listglobs = glob.glob(os.path.join(self.origin_dir)+r\"[0-9]*\")\r\n count = 0\r\n temp = []\r\n for x in listglobs:\r\n\r\n # step1, get speaker id md5\r\n user_id = x.rsplit(\"\\\\\")[-1]\r\n speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n print(\"1=>\", x)\r\n\r\n for k in [\"你好小顺\", \"小顺小顺\"]:\r\n paths = os.path.join(x, k)\r\n print(\"2=>\", paths)\r\n # step2, parse speaker info\r\n with open(os.path.join(paths, \"spearker_info.txt\"), 'r', encoding=\"utf-8\") as f:\r\n line = f.readline()\r\n arrs = line.strip().split(\"\\\\t\")\r\n if len(arrs) != 3:\r\n raise ValueError(\"Required three field in speaker_info<id>\\t<gender>\\t<age>\")\r\n self.wav_desc[\"gender\"] = arrs[1].strip(\"<\").rstrip(\">\")\r\n self.wav_desc[\"age\"] = arrs[-1].strip(\"<\").rstrip(\">\")\r\n\r\n # step3, parse wav detailed information\r\n # key: wav_id, value: info_list, [keyword, noise_type, distance, speed,user_id, equipment]\r\n wav_infos_dict = {}\r\n with open(os.path.join(paths, \"wav_desc.txt\"), \"r\", encoding=\"utf-8\") as f:\r\n for line in f.readlines():\r\n arrs = line.strip().split(\"\\\\t\")\r\n wav_infos_dict[arrs[0].strip(\"<\").rstrip(\">\")] = [x.strip(\"<\").rstrip(\">\") for\r\n x in arrs[1:]]\r\n\r\n print(f\"Parse wav info finished find {len(wav_infos_dict)} infos.\")\r\n\r\n # Step4, audio with background noise and without nose, which was back_wav and wav_data folder\r\n for wav_folder in [\"back_wav\", \"wav_data\"]:\r\n audio_lists = glob.glob(os.path.join(paths + f\"\\\\{wav_folder}\", \"*.wav\"))\r\n for xa in audio_lists:\r\n # copy data to\r\n wav_id, user_id = get_wav_name(xa)\r\n # print(wav_id, user_id)\r\n # create md5 id\r\n utt_id = hashlib.md5(xa.encode(\"utf-8\")).hexdigest()\r\n # speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n # print(utt_id, speaker_id)\r\n # collect all info for an audio\r\n self.wav_desc[\"utt_id\"] = utt_id\r\n infos = wav_infos_dict[wav_id]\r\n if len(infos) != 6:\r\n print(\"==>\", infos)\r\n self.wav_desc[\"keyword_id\"] = self.keywords_dict[infos[0]]\r\n self.wav_desc[\"noise_type\"] = infos[1]\r\n self.wav_desc[\"distance\"] = infos[2]\r\n self.wav_desc[\"record_speed\"] = infos[3]\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n self.wav_desc[\"record_equipment\"] = infos[5]\r\n\r\n # record wav information\r\n t_infos = copy.deepcopy(self.wav_desc)\r\n self.all_wavs.append(t_infos)\r\n count += 1\r\n temp.append(utt_id)\r\n\r\n # copy data to resource folder\r\n dest = shutil.copy2(xa, os.path.join(self.dest_dir, f\"audios/{utt_id}.wav\"))\r\n set_index = which_set(dest, 20, 30)\r\n self.data_index[set_index].append(t_infos)\r\n\r\n # write wav information into json file\r\n with open(os.path.join(self.dest_dir, \"resources/wav_desc.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.all_wavs, f, ensure_ascii=False, indent=True)\r\n print(f\"total wavs:{count}, total ids:{len(temp)}\")\r\n for set_index in self.data_index.keys():\r\n with open(os.path.join(self.dest_dir, f\"resources/p_{set_index}.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.data_index[set_index], f, ensure_ascii=False, indent=True)\r\n print(f\"Collect {set_index} data total {len(self.data_index[set_index])} samples.\")", "def output_wave_files(predicted_mfccs_batch, true_target_mfccs_batch):\n # only outputting 1 wavefile in the batch, because otherwise it takes too long\n for i in range(min(1, predicted_mfccs_batch.shape[0])):\n print \"Converting wavefile \", i\n predicted_mfccs = predicted_mfccs_batch[i,:,:]\n target_mfccs = true_target_mfccs_batch[i]\n\n output_wave_file(predicted_mfccs, filename='autoencoder_pred_' + str(i)) \n output_wave_file(target_mfccs, filename='autoencoder_input_' + str(i))", "def convert2mel(audio,base_path,fs, n_fft,fmax,n_mels,hop_length_samples, window_lenght,type_training):\n\n path = os.path.join(base_path, audio)\n if type_training != \"train\":\n if os.path.isfile(os.path.join(base_path,\"processed_wavs_train\",audio)):\n data,_ = librosa.core.load(os.path.join(base_path,\"processed_wavs_train\",audio), sr=fs, res_type=\"kaiser_best\")\n else:\n data,_ = librosa.core.load(os.path.join(base_path,\"processed_wavs_test\",audio), sr=fs, res_type=\"kaiser_best\")\n else:\n data, _ = librosa.core.load(path, sr=fs, res_type=\"kaiser_best\")\n data = normalize_amplitude(data)\n\n powSpectrum = np.abs(stft(data+ 0.00001,n_fft,hop_length = hop_length_samples, win_length = window_lenght, window = windowing(window_lenght, sym=False), center=True, pad_mode='reflect'))**2\n\n mels = melspectrogram(y= None,n_fft=n_fft ,sr=fs ,S= powSpectrum, hop_length= hop_length_samples ,n_mels=n_mels,fmax=fmax , fmin = 0.0).T\n mels = librosa.core.power_to_db(mels, ref=np.min(mels))\n mels = mels / np.max(mels)\n\n return mels.T", "def output_to_cwl_json(\n galaxy_output, get_metadata, get_dataset, get_extra_files, pseduo_location=False,\n):\n def element_to_cwl_json(element):\n element_output = GalaxyOutput(\n galaxy_output.history_id,\n element[\"object\"][\"history_content_type\"],\n element[\"object\"][\"id\"],\n )\n return output_to_cwl_json(element_output, get_metadata, get_dataset, get_extra_files)\n\n output_metadata = get_metadata(galaxy_output.history_content_type, galaxy_output.history_content_id)\n\n def dataset_dict_to_json_content(dataset_dict):\n if \"content\" in dataset_dict:\n return json.loads(dataset_dict[\"content\"])\n else:\n with open(dataset_dict[\"path\"]) as f:\n return json.load(f)\n\n if output_metadata[\"history_content_type\"] == \"dataset\":\n ext = output_metadata[\"file_ext\"]\n assert output_metadata[\"state\"] == \"ok\"\n if ext == \"expression.json\":\n dataset_dict = get_dataset(output_metadata)\n return dataset_dict_to_json_content(dataset_dict)\n else:\n file_or_directory = \"Directory\" if ext == \"directory\" else \"File\"\n if file_or_directory == \"File\":\n dataset_dict = get_dataset(output_metadata)\n properties = output_properties(pseduo_location=pseduo_location, **dataset_dict)\n basename = properties[\"basename\"]\n extra_files = get_extra_files(output_metadata)\n found_index = False\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n if path == SECONDARY_FILES_INDEX_PATH:\n found_index = True\n\n if found_index:\n ec = get_dataset(output_metadata, filename=SECONDARY_FILES_INDEX_PATH)\n index = dataset_dict_to_json_content(ec)\n for basename in index[\"order\"]:\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n if path == os.path.join(SECONDARY_FILES_EXTRA_PREFIX, basename):\n ec = get_dataset(output_metadata, filename=path)\n if not STORE_SECONDARY_FILES_WITH_BASENAME:\n ec[\"basename\"] = basename + os.path.basename(path)\n else:\n ec[\"basename\"] = os.path.basename(path)\n ec_properties = output_properties(pseduo_location=pseduo_location, **ec)\n if \"secondaryFiles\" not in properties:\n properties[\"secondaryFiles\"] = []\n\n properties[\"secondaryFiles\"].append(ec_properties)\n else:\n basename = output_metadata.get(\"cwl_file_name\")\n if not basename:\n basename = output_metadata.get(\"name\")\n\n listing = []\n properties = {\n \"class\": \"Directory\",\n \"basename\": basename,\n \"listing\": listing,\n }\n\n extra_files = get_extra_files(output_metadata)\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n ec = get_dataset(output_metadata, filename=path)\n ec[\"basename\"] = os.path.basename(path)\n ec_properties = output_properties(pseduo_location=pseduo_location, **ec)\n listing.append(ec_properties)\n\n return properties\n\n elif output_metadata[\"history_content_type\"] == \"dataset_collection\":\n if output_metadata[\"collection_type\"] == \"list\":\n rval = []\n for element in output_metadata[\"elements\"]:\n rval.append(element_to_cwl_json(element))\n elif output_metadata[\"collection_type\"] == \"record\":\n rval = {}\n for element in output_metadata[\"elements\"]:\n rval[element[\"element_identifier\"]] = element_to_cwl_json(element)\n return rval\n else:\n raise NotImplementedError(\"Unknown history content type encountered\")", "def write_json_music(self, get_json_id_num, response):\n\n name = str(get_json_id_num) + \"_\" + str(self.get_json_cursor) + \".json\"\n dst_path = os.path.join(self.get_json_folder_path, name)\n self.write_json(name, dst_path, response)", "def get_wave_dicoms(folder_name):\n dicom_list = glob.glob(folder_name + \"/*.dcm\")\n time_and_dicom = {}\n for a_dicom in dicom_list:\n dicom_data = pydicom.dcmread(a_dicom)\n if len(dicom_data[0x5400, 0x0100][0][0x5400, 0x1010].value) > 10:\n # print(dicom_data[0x0008, 0x0018].value)\n if dicom_data[0x0008, 0x1010].value == \"H-SIM1\":\n direction = \"H\"\n else:\n direction = \"V\"\n time_and_dicom[a_dicom] = [dicom_data.AcquisitionTime,\n dicom_data[0x0008, 0x0018].value,\n direction]\n\n sorted_t_d = sorted(time_and_dicom.items(),\n key=lambda x: x[1],\n reverse=True)\n return sorted_t_d", "def generate_metadata_files(self):\n\n data_folder = self.get_data_folder(mode='absolute')\n\n parents = (data_folder / '_').parents\n\n for mfile in self.mdata:\n for regex, level in METADATA_LEVEL_BY_NAME.items():\n if re.compile(regex).match(mfile.name):\n create_file(mfile, parents[(3-level)] / mfile.name,\n mode='copy')", "def checkid3(filename):\n datas = None\n mdat = None\n\n try:\n mdat = mutagen.File(filename, easy=True)\n except:\n msg = \"mutagen failed %s\" % sys.exc_value\n Logs.objects.create(filename=filename, message=msg)\n\n if mdat is not None:\n try:\n genre = mdat['genre'][0]\n except:\n genre = ''\n\n try:\n datas = {'artist': mdat['artist'][0],\n 'album': mdat['album'][0],\n 'title': mdat['title'][0],\n 'genre': genre}\n except KeyError:\n msg = \"%s : %s is not in the list.\" % (str(sys.exc_type),\n sys.exc_value)\n Logs.objects.create(filename=filename, message=msg)\n\n return datas", "def set_meta_mp3(file):\n\n list_str_prop_mp3 = ['album', 'artist', 'title']\n list_other_prop_mp3 = ['comment', 'genre', 'year']\n dict_file_mp3 = {}\n # For each string properties into the tag\n for prop in list_str_prop_mp3:\n # If the tag exist (i.e it's not empty for the music file)\n if file.tag.d.has_key(prop.upper()):\n # We delete spe char and we format it\n dict_file_mp3[prop] = delete_spe_char_and_format(file.tag[prop.upper()])\n else:\n # Or we define it's value as 'Unknow ' + prop\n # For instance 'Unknow Artist'\n dict_file_mp3[prop] = 'Unknow ' + prop.capitalize()\n # For each other properties\n for prop in list_other_prop_mp3:\n if file.tag.d.has_key(prop.upper()):\n # We just copy them\n dict_file_mp3[prop] = file.tag[prop.upper()]\n else:\n dict_file_mp3[prop] = ''\n # To try to find the tracknumber, we need 'title'\n if dict_file_mp3.has_key('title'): \n # But before, we delete the duplicate\n list_duplicate = [dict_file_mp3['artist'], dict_file_mp3['album']]\n # Now we delete the duplicates\n dict_file_mp3['title'] = delete_duplicate(dict_file_mp3['title'], list_duplicate)\n # So we are able to find the tracknumber\n number = ''\n # If ID3 already find it\n if file.tag.d.has_key(\"TRACKNUMBER\"):\n number = file.tag[\"TRACKNUMBER\"]\n # Else we try to find by ourself\n else:\n number = find_tracknumber(dict_file_mp3['title'])\n # If we found a tracknumber, we delete it from 'title'\n if number:\n dict_file_mp3['title'] = delete_duplicate(dict_file_mp3['title'], [number])\n dict_file_mp3['tracknumber'] = number\n # And we format the new title\n dict_file_mp3['title'] = build_track_name(dict_file_mp3['title'], number)\n dict_file_mp3['name'] = dict_file_mp3['title'] + '.mp3'\n dict_file_mp3['path'] = build_path([dict_file_mp3['artist'], dict_file_mp3['album']])\n return dict_file_mp3", "def compute_chunk_features(mp3_file):\n # On convertit le fichier mp3 en un fichier wav mono, 1avec un sample rate de 10000Hertz: on utilise\n # On utilise l'application sox \"c:/Program Files (x86)/sox-14.4.0/sox\"\n\n sox_command = \"./sox-14.4.0/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def all_monthly_stats(cc, filename): # pragma: no cover\n output = {}\n stats_monthly_breakdown = compute_monthly_breakdown_stats(cc)\n stats_by_time = compute_stats_by_time(cc)\n stats_by_time.append(stats_monthly_breakdown)\n for d in stats_by_time:\n label = d['timeframe']\n output[label] = d\n with open(filename, 'w') as f:\n json.dump(output, f)", "def get_movies():\n\n # ouverture du fichier de notre liste de films\n with open(DATA_FILE,\"r\") as f:\n movies_list = json.load(f)\n\n # notre liste des instances\n movies = [Movie(m)for m in movies_list] \n return movies", "def load_muscle(preprocess=True, mean_cut=None):\n\n url = f'https://raw.githubusercontent.com/PengTao-HUST/GDNB/master/data/muscle.txt'\n cache_dir = sys.modules['gdnb'].__path__[0] + '/data/'\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n data_file = os.path.basename(url)\n full_path = cache_dir + data_file\n\n if not os.path.exists(full_path):\n urlretrieve(url, cache_dir + data_file)\n\n if preprocess:\n probes = []\n names = []\n exprs = []\n with open(full_path, 'r') as f:\n f.readline()\n f.readline()\n for line in f.readlines():\n data = line.split('\\t')\n probe = data[0]\n name = data[1]\n expr = np.array([np.log2(float(x)) for x in data[2:]])\n probes.append(probe)\n names.append(name)\n exprs.append(expr)\n probes = np.asarray(probes)\n names = np.asarray(names)\n exprs = np.asarray(exprs)\n\n use_index = []\n for name in names:\n if re.match('.*///.*', name) or not re.match('^[A-Z].*', name):\n use_index.append(False)\n else:\n use_index.append(True)\n\n probes, names, exprs = [obj[use_index] for obj in [probes, names, exprs]]\n\n means = np.mean(exprs, axis=1)\n if mean_cut is None:\n mean_cut = 7\n\n cut_index = np.where(means > mean_cut)\n probes_c, names_c, exprs_c = [obj[cut_index] for obj in [probes, names, exprs]]\n exprs_c = np.apply_along_axis(normalize_by_mean, 1, exprs_c)\n\n exprs_split = np.array([exprs_c[:, 2 * i: 2 * i + 6] for i in range(25)])\n return probes_c, names_c, exprs_split\n else:\n return full_path", "def makepredictions(self):\n data, sampling_rate = librosa.load(self.file)\n mfccs = np.mean(librosa.feature.mfcc(y=data, sr=sampling_rate, n_mfcc=40).T, axis=0)\n x = np.expand_dims(mfccs, axis=1)\n x = np.expand_dims(x, axis=0)\n predictions = self.loaded_model.predict_classes(x)\n predict = self.convertclasstoemotion(predictions)\n print(\"Prediction is\", \" \", self.convertclasstoemotion(predictions))\n return predict", "def prepare_mirex_lastfmapi_dataset(which='mirex'):\n assert which in ['lastfmapi', 'mirex']\n\n raw_folder_path = f\"{raw_dataset_path}/dataset_similarity_oramas_et_al\"\n preprocessed_folder_path = f\"{preprocessed_dataset_path}/similarity/{which}\"\n os.makedirs(preprocessed_folder_path, exist_ok=True)\n musicbrainzngs_setup()\n\n # create items.json:\n # an entry for every artist present in mirex_gold.txt, including also those in the top-n most similar list.\n\n artists = []\n with open(f\"{raw_folder_path}/{which}_gold.txt\") as f:\n for line in f.read().split('\\n')[:-1]:\n\n # artists with a ground truth of less than 10 items should be excluded, according to the paper\n if len(line.split('\\t')[1].split(' ')) >= 10:\n artists += [line.split('\\t')[0]]+line.split('\\t')[1].split(' ')\n\n artists = list(set(artists))\n items = []\n for a in tqdm(artists):\n artist_name = musicbrainzngs.get_artist_by_id(a)['artist']['name']\n d = {\n 'id': a,\n 'seed': {\n 'artist_musicbrainz_id': a,\n 'artist_name': artist_name,\n }\n }\n items.append(d)\n\n with open(f\"{preprocessed_folder_path}/items.json\", 'w', encoding='utf-8') as f:\n json.dump(items, f, ensure_ascii=False, indent=4)\n\n # create similar_items_ground_truth.json\n\n ground_truth_validation = {}\n ground_truth_test = {}\n with open(f\"{raw_folder_path}/{which}_gold.txt\") as f:\n for line in f.read().split('\\n')[:-1]:\n\n # artists with a ground truth of less than 10 items should be excluded, according to the paper\n if len(line.split('\\t')[1].split(' ')) >= 10:\n held_out = line.split('\\t')[1].split(' ')\n\n sample = random.sample(held_out, len(held_out))\n sample_validation = sample[:len(sample)//2]\n sample_test = sample[len(sample)//2:]\n ground_truth_validation[line.split('\\t')[0]] = sample_validation\n ground_truth_test[line.split('\\t')[0]] = sample_test\n\n with open(f\"{preprocessed_folder_path}/similar_items_ground_truth_validation.json\", 'w', encoding='utf-8') as f:\n json.dump(ground_truth_validation, f, ensure_ascii=False, indent=4)\n\n with open(f\"{preprocessed_folder_path}/similar_items_ground_truth_test.json\", 'w', encoding='utf-8') as f:\n json.dump(ground_truth_test, f, ensure_ascii=False, indent=4)", "def read(self):\n import cclib\n from pybel import readfile\n\n molecule = next(readfile(\"orca\", self._file))\n\n atom_numbers = []\n coordinates = []\n\n for atom in molecule:\n atom_number = atom.atomicnum\n atom_numbers.append(atom_number)\n for coord in atom.coords:\n coordinates.append(coord)\n\n molecular_mass = molecule.exactmass\n\n cjson = {\n \"chemical json\": 0,\n \"atoms\": {\n \"elements\": {\"number\": atom_numbers},\n \"coords\": {\"3d\": coordinates},\n },\n \"properties\": {\"molecular mass\": molecular_mass},\n }\n\n data = cclib.io.ccread(self._file)\n\n # Add calculated properties\n if hasattr(data, \"scfenergies\"):\n if len(data.scfenergies) > 0:\n energy = data.scfenergies[-1] * EV_TO_J_MOL\n cjson.setdefault(\"properties\", {})[\"totalEnergy\"] = energy\n\n if hasattr(data, \"gbasis\"):\n basis = _cclib_to_cjson_basis(data.gbasis)\n cjson[\"basisSet\"] = basis\n\n if hasattr(data, \"vibfreqs\"):\n vibfreqs = list(data.vibfreqs)\n cjson.setdefault(\"vibrations\", {})[\"frequencies\"] = vibfreqs\n\n if hasattr(data, \"vibdisps\"):\n vibdisps = _cclib_to_cjson_vibdisps(data.vibdisps)\n cjson.setdefault(\"vibrations\", {})[\"eigenVectors\"] = vibdisps\n\n # Add a placeholder intensities array\n if \"vibrations\" in cjson and \"frequencies\" in cjson[\"vibrations\"]:\n if \"intensities\" not in cjson[\"vibrations\"]:\n cjson[\"vibrations\"][\"intensities\"] = [\n 1 for i in range(len(cjson[\"vibrations\"][\"frequencies\"]))\n ]\n if \"modes\" not in cjson[\"vibrations\"]:\n cjson[\"vibrations\"][\"modes\"] = [\n i + 1 for i in range(len(cjson[\"vibrations\"][\"frequencies\"]))\n ]\n\n return cjson", "def extract_meta_data(video_file_name, output_file=meta.txt, *args, **kwargs):", "def write_megam_file(train_toks, encoding, stream, bernoulli: bool = ..., explicit: bool = ...):\n ...", "def compute_melspecs(audio):\n return librosa.feature.melspectrogram(y=audio,\n sr=SR,\n n_mels=N_MELS,\n n_fft=N_FFT,\n hop_length=HOP_LENGTH,\n fmin=MEL_FMIN,\n fmax=MEL_FMAX)", "def measureDataComplexM_multiext(filename,sigma = 1.1,scale=0.27):\n hdu=pf.open(filename)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n sigma = sigma/scale\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n M20=np.zeros(Nobj)\n M22=np.zeros(Nobj).astype(complex)\n M31=np.zeros(Nobj).astype(complex)\n M33=np.zeros(Nobj).astype(complex)\n for i in range(Nobj):\n M20[i],M22[i],M31[i],M33[i]=complexMoments(data=hdui.data[i][4:].reshape(npix,npix),sigma=sigma)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,np.median(M20), np.median(M22), np.median(M31), np.median(M33)])\n data=np.array(data)\n hp.mwrfits(filename[:-7]+'_complexMoments_gausswt_'+str(sigma*scale)+'.fit',data.T,colnames=colnames)\n return '---done !-----'", "def get_drms_files(self):\n import drms\n client = drms.Client(email=self.email,verbose=True)\n fmt = '%Y.%m.%d_%H:%M'\n self.t_qstr = self.series+'[{0}_TAI-{1}_TAI@{2}]'.format(self.start.strftime(fmt),self.end.strftime(fmt),self.cadence) \n\n\n #create wavelength query string\n self.w_qstr = '[' \n for i in self.wav: self.w_qstr = self.w_qstr+'{0},'.format(int(i.value))\n #remove last , and add bracket\n self.w_qstr = self.w_qstr[:-1]+']'\n \n #make the series string\n self.s_qstr = '{'+self.segment+'}'\n\n #the full query\n self.qstr = self.t_qstr+self.w_qstr+self.s_qstr\n\n #IF ERRORS WITH URL ERROR IT IS BECAUSE THE DOWNLOAD FILE SIZE IS TOO LARGE\n #export the data file list \n self.expt = client.export(self.qstr)\n#create an array of indexes to download\n index = np.arange(np.size(self.expt.urls.url))\n# get file from JSOC\n #set directory to current if no path set\n outf = self.expt.download(self.odir,index,fname_from_rec=True)", "def generate(self):\n if len(self.files) == 0:\n raise Exception('no files to process')\n music = []\n for filename in self.files:\n music.extend(self._process_file(filename))\n return self._extract_raw(sorted(music, key=lambda tup: (tup[0], tup[1])))", "def dump_pinball_music():\n\texport_sounds(song_labels, os.path.join(conf.path, 'music'), 'Music_')", "def extract_audio_features(self, _dir_='/Volumes/TOSHIBA EXT/audio_samples10M', load=False):\n\t\tself.movie_aud_features = extract_Afeatures(_dir_=_dir_)\n\t\treturn self.movie_aud_features", "def _mo_from_ccdata(self, mosyms, moenergies, mooccs, mocoeffs):\n\n lines = []\n\n spin = 'Alpha'\n for i in range(len(mooccs)):\n for j in range(len(mooccs[i])):\n restricted_spin_idx = i % len(mocoeffs)\n lines.append(' Sym= {}'.format(mosyms[restricted_spin_idx][j]))\n moenergy = utils.convertor(moenergies[restricted_spin_idx][j], 'eV', 'hartree')\n lines.append(' Ene= {:10.4f}'.format(moenergy))\n lines.append(' Spin= {}'.format(spin))\n lines.append(' Occup= {:10.6f}'.format(mooccs[i][j]))\n # Rearrange mocoeffs according to Molden's lexicographical order.\n mocoeffs[restricted_spin_idx][j] = self._rearrange_mocoeffs(mocoeffs[restricted_spin_idx][j])\n for k, mocoeff in enumerate(mocoeffs[restricted_spin_idx][j]):\n lines.append('{:4d} {:10.6f}'.format(k + 1, mocoeff))\n spin = 'Beta'\n return lines", "def extract_deltas_of_mfccs(data_list: List[AudioData]) -> List[AudioData]:\n result = []\n for audio_data in data_list:\n mfccs = audio_data.features.mfccs\n if mfccs is None:\n warnings.warn(\"MFCCS is None. Ignoring.\", RuntimeWarning)\n else:\n first_col = numpy.reshape(numpy.zeros_like(mfccs[:, 0]), (-1, 1))\n velocity = numpy.diff(mfccs, prepend=first_col)\n acceleration = numpy.diff(velocity, prepend=first_col)\n audio_data.features.velocity_of_mfccs = velocity\n audio_data.features.acceleration_of_mfccs = acceleration\n result.append(audio_data)\n\n return result", "def extract(self, fname, quality=0.5, decoder=None):\n fname = safe_unicode(fname)\n if not fname:\n print('UNICODE FAILED: %s' % fname)\n return {}\n\n filename, real_filename = fname, fname\n\n (f, ext) = os.path.splitext(fname)\n ext = ext.lower()[1:]\n\n # Create parser\n try:\n if decoder:\n tags = None\n tags = [ (\"id\", decoder), None ]\n else:\n tags = None\n parser = None\n parser = hachoir_parser.createParser(fname, real_filename=real_filename, tags=tags)\n except hachoir_core.stream.InputStreamError, err:\n print('Failed to create parser for %s' % fname)\n print(err)\n return False\n if not parser:\n print('No parser found for %s' % fname)\n return False\n\n # Extract metadata\n results = None\n try:\n results = hachoir_metadata.extractMetadata(parser, quality)\n except hachoir_core.error.HachoirError, err:\n print('Failed to extract metadata for %s' % fname)\n print(err)\n return False\n if not results:\n return False\n\n # Convert metadata to dictionary\n meta = None\n meta = {\n 'unknown': {}\n }\n\n prefix = ''\n\n default_cat = None\n stream_id = None\n\n for line in str(results).split('\\n'):\n line = line.strip()\n #print('LINE: \\'%s\\'' % line)\n\n if line[0] in string.ascii_letters:\n (default_cat, stream_id) = self.parse_category(line)\n\n if default_cat not in meta.keys():\n if default_cat in ['audio', 'video']:\n meta[default_cat] = [{'stream_id': stream_id}]\n else:\n meta[default_cat] = {}\n else:\n if default_cat in ['audio', 'video']:\n meta[default_cat][stream_id] = {'stream_id': stream_id}\n\n continue\n\n line = safe_unicode(line)[2:]\n if not ': ' in line:\n continue\n\n tokens = line.split(': ')\n key = tokens[0]\n value = ': '.join(tokens[1:])\n\n #print(\"K: %s; V: %s; DC: %s; ID: %s\" % (key, value, default_cat, stream_id))\n\n\n if key in self._ignored_keys:\n continue\n\n if key in self._key_remapper.keys():\n key = self._key_remapper[key]\n\n if default_cat is 'unknown' and key in self._key_categories.keys():\n if not self._key_categories[key] in meta.keys():\n meta[self._key_categories[key]] = {}\n default_cat = self._key_categories[key]\n\n if key in self._int_fields:\n value = self.parse_int(value)\n\n elif key in self._float_fields:\n value = self.parse_float(value)\n\n elif key in self._bitrate_fields:\n bitrate_meta = self.parse_bitrate(value)\n if not bitrate_meta:\n continue\n if 'vbr' in bitrate_meta.keys() and default_cat in ['audio', 'video']:\n meta[default_cat][stream_id]['vbr'] = True\n value = bitrate_meta['bitrate']\n\n elif key in self._duration_fields:\n value = self.parse_duration(value)\n\n elif key in self._endianness_fields:\n value = self.parse_endianness(value)\n\n elif key in self._samplerate_fields:\n value = self.parse_samplerate(value)\n\n elif key in self._channel_fields:\n value = self.parse_channel(value)\n\n if default_cat in ['audio', 'video']:\n meta[default_cat][stream_id][key] = value\n else:\n meta[default_cat][key] = value\n\n for category in ['unknown']:\n if len(meta[category]) == 0:\n del(meta[category])\n\n return meta", "def load_data(data_path):\n\n with open(data_path, \"r\") as fp:\n data = json.load(fp)\n\n # convert lists to numpy arrays\n X = np.array(data[\"mfcc\"])\n y = np.array(data[\"labels\"])\n\n print(\"Data succesfully loaded!\")\n\n return X, y", "def gen_metars(obs, filename, convids=False):\n mtime = datetime.datetime.utcnow().strftime(\"%d%H%M\")\n thres = datetime.datetime.utcnow() - datetime.timedelta(hours=3)\n thres = thres.replace(tzinfo=pytz.UTC)\n fp = open(filename, 'w')\n fp.write(\"\\001\\015\\015\\012001\\n\")\n fp.write(\"SAUS43 KDMX %s\\015\\015\\012METAR\\015\\015\\012\" % (mtime, ))\n for sid in obs:\n ob = obs[sid]\n if ob['valid'] < thres:\n continue\n if sid in [\"RIOI4\", \"ROSI4\", \"RSMI4\", 'RMCI4']:\n continue\n metarid = sid[:4]\n remoteid = NT.sts[sid]['remote_id']\n if convids:\n metarid = RWIS2METAR.get(\"%02i\" % (remoteid,), 'XXXX')\n temptxt = \"\"\n t_temptxt = \"\"\n windtxt = \"\"\n if ob.get('sknt') is not None and ob.get('drct') is not None:\n windtxt = METARwind(ob['sknt'], ob['drct'], ob.get('gust'))\n if obs.get('tmpf') is not None and obs.get('dwpf') is not None:\n m_tmpc, t_tmpc = METARtemp(temperature(ob['tmpf'], 'F').value('C'))\n m_dwpc, t_dwpc = METARtemp(temperature(ob['dwpf'], 'F').value('C'))\n temptxt = \"%s/%s\" % (m_tmpc, m_dwpc)\n t_temptxt = \"T%s%s \" % (t_tmpc, t_dwpc)\n fp.write((\"%s %s %s %s RMK AO2 %s%s\\015\\015\\012\"\n \"\") % (metarid, ob['valid'].strftime(\"%d%H%MZ\"),\n windtxt, temptxt, t_temptxt, \"=\"))\n\n fp.write(\"\\015\\015\\012\\003\")\n fp.close()", "def mscoco_to_csv(sentence_sets, output_file_name):\n mscoco_data = {}\n for i in range (0, 2):\n mscoco_data[\"sentence\" + str(i)] = [subset[i] for subset in sentence_sets]\n mscoco_df = pd.DataFrame(data=mscoco_data)\n mscoco_csv = mscoco_df.to_csv(output_file_name, index=False)\n return mscoco_csv", "def _write_flac(parameters):\n # Load data\n from ._common import flac\n\n data = deepcopy(flac)\n data.update(parameters[\"flac\"])\n\n # Reorder rocks\n if parameters[\"rocks_order\"]:\n order = parameters[\"rocks_order\"]\n for rock in parameters[\"rocks\"].keys():\n if rock not in order:\n order.append(rock)\n else:\n order = parameters[\"rocks\"].keys()\n\n # Formats\n fmt = block_to_format[\"FLAC\"]\n fmt1 = str2format(fmt[1])\n fmt2 = str2format(fmt[2])\n fmt3 = str2format(fmt[3])\n\n # Record 1\n values = [\n bool(data[\"creep\"]),\n data[\"porosity_model\"],\n data[\"version\"],\n ]\n out = write_record(values, fmt1)\n\n # Additional records\n for k in order:\n # Load data\n data = deepcopy(default)\n data.update(parameters[\"default\"])\n data.update(parameters[\"rocks\"][k])\n\n # Permeability model\n values = [data[\"permeability_model\"][\"id\"]]\n values += list(data[\"permeability_model\"][\"parameters\"])\n out += write_record(values, fmt2)\n\n # Equivalent pore pressure\n values = [data[\"equivalent_pore_pressure\"][\"id\"], None]\n values += list(data[\"equivalent_pore_pressure\"][\"parameters\"])\n out += write_record(values, fmt3)\n\n return out" ]
[ "0.74878", "0.6018327", "0.57007396", "0.56769806", "0.55784905", "0.55092484", "0.5501925", "0.54827094", "0.5445455", "0.5390397", "0.5368815", "0.53593296", "0.5321659", "0.5301206", "0.5285528", "0.52711874", "0.5266658", "0.52634954", "0.523695", "0.52124006", "0.5208634", "0.5196521", "0.5190206", "0.5188465", "0.5156657", "0.51522917", "0.51376885", "0.51114255", "0.5085951", "0.5082588", "0.5076185", "0.50714344", "0.5052381", "0.50294155", "0.5019302", "0.50181115", "0.4991683", "0.49912292", "0.49899185", "0.49766505", "0.4971085", "0.49710062", "0.4958334", "0.49522752", "0.4951056", "0.49380687", "0.49321473", "0.4921802", "0.4909582", "0.49074778", "0.49055037", "0.49055016", "0.49041137", "0.4901471", "0.48845503", "0.48786008", "0.48781556", "0.48663777", "0.4857131", "0.48556384", "0.48498988", "0.48427442", "0.48201352", "0.48075315", "0.47905302", "0.47803122", "0.47723702", "0.47683895", "0.476731", "0.4751543", "0.4749177", "0.47488728", "0.4745859", "0.474397", "0.47381577", "0.47333935", "0.47233334", "0.47224122", "0.47209802", "0.47188634", "0.4715986", "0.4713132", "0.4708233", "0.47071767", "0.47070304", "0.47056335", "0.47051582", "0.47020787", "0.47020355", "0.47005704", "0.46982798", "0.46904895", "0.46862757", "0.468242", "0.46696535", "0.46659684", "0.46613717", "0.46576408", "0.46558613", "0.4650533" ]
0.55278736
5
Loads training dataset from json file.
def load_data_from_fold(data_path): print("\nLoading data from json folder {}".format(data_path)) SAMPLES_TO_CONSIDER = 22050 data = preprocess_dataset(data_path, SAMPLES_TO_CONSIDER) X = np.array(data["MFCCs"]) y = np.array(data["labels"]) print("Training sets loaded!") print("data size :", X.shape, "labels size: ", y.shape) print("release the 'data' for memories") del data return X, y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_training_data(\n self,\n train_data_file=\"datasets/train_data.json\",\n test_data_file=\"datasets/test_data.json\",\n ):\n train_data = pd.read_json(train_data_file)\n test_data = pd.read_json(test_data_file)\n return train_data, test_data", "def load_training(self):\n path = \"./training/\" + self.training + \".json\"\n\n data = {}\n\n with open(path, \"r\") as infile:\n data = json.load(infile)\n\n self.states = data[\"states\"]\n self.transitions = data[\"transitions\"]\n self.matrix = data[\"matrix\"]", "def load_data_from_json(json_path):\r\n print(\"\\nLoading data from json file\")\r\n with open(json_path, \"r\") as fp:\r\n data = json.load(fp)\r\n \r\n X = np.array(data[\"MFCCs\"])\r\n y = np.array(data[\"labels\"])\r\n print(\"Training sets loaded!\")\r\n print(\"data size :\", X.shape, \"labels size: \", y.shape)\r\n print(\"release the 'data' for memories\")\r\n del data\r\n\r\n return X, y", "def load_file(filename):\n with open(filename, 'rt') as f:\n d = json.load(f)\n return pd.DataFrame.from_records(d['dataset'])", "def load_data(self, dataset, dataset_name):\n with open(dataset, \"r\", encoding=\"utf-8\") as f:\n self.data = json.load(f)\n self.dataset_name = dataset_name", "def load_training_data(file_path):\n return load_data(file_path)", "def set_data_from_json(self, filename):\n with open(filename, 'r') as f:\n self.data = json.load(f, object_pairs_hook=OrderedDict)", "def _load_training_data(self):\n self._save_training_data()", "def read_classification_json(fn):\n with open(fn) as f:\n classification_data = json.load(f)\n f.close()\n \n return classification_data", "def load_data(filename):\n # Load JSON lines\n with open(filename, encoding='utf-8') as f:\n examples = [json.loads(line) for line in f]\n\n return examples", "def load_data_file(self):\n with open(self.files['data'], 'r') as infile:\n data = json.load(infile)\n self.boundary_nodes = data['boundary_nodes']\n self.nodes = {int(k): v for k, v in data['nodes'].items()}\n self.levels = data['levels']\n infile.close()", "def load(data_dir=None, **kwargs):\n src_file = Path(__file__)\n if not data_dir:\n data_dir = src_file.with_suffix('')\n with open(Path(data_dir).joinpath('categories.json')) as fp:\n features = datasets.Features(\n {'id': datasets.Value('string'),\n 'text': datasets.Value('string'),\n 'label': datasets.features.ClassLabel(names=json.load(fp))}\n )\n return datasets.load_dataset(str(src_file.absolute()),\n data_dir=data_dir,\n features=features, **kwargs)", "def load(config_file: typing.TextIO) -> \"TrainingConfig\":\n return TrainingConfig.from_json(config_file.read())", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def load(self):\n with io.open(self.filename, encoding='utf-8') as f:\n self.load_from_dict(json.loads(f.read()))", "def json_data_loader(path):\n res = open(path, 'r').read()\n logging.info(\"Loading file using a pyspark.read.json\")\n data_rdd = Spark.instance.sc().parallelize([res])\n return Spark.instance.spark().read.json(data_rdd)", "def load(self):\n if not self.exist:\n self.create()\n\n with open(self.file_path, encoding=Config.ENCODING) as file:\n self.data = json.load(file)", "def load_data(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n return data", "def _load(self):\n if self.file_path.exists():\n with open(self.file_path) as fid:\n self.data = json.load(fid)", "def loadData(infile,k):\n f = open(infile,'r')\n #f = f.read().split(\"\\n\")\n #raw = json.loads(f[1])\n f = f.read()\n raw = json.loads(f)\n data = np.array(raw)\n dataset = data[k]\n return dataset", "def from_disk(cls, path: Path, loader_func: Callable = read_jsonl):\n return Dataset(\n loader_func(path / 'train.jsonl'),\n loader_func(path / 'dev.jsonl'),\n test=loader_func(path / 'test.jsonl')\n )", "def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def from_file(path: Union[Path, str], name: str) -> Dataset:\n _path = path if isinstance(path, Path) else Path(path)\n with open(_path, \"r\", encoding=\"utf-8\") as yupi_fd:\n data = json.load(yupi_fd)\n return Dataset._from_json(name, data)", "def load_dataset(self, dataset_dir, json_path):\n # Add classes. We have only one class to add.\n self.add_class(\"glomerulus\", 1, \"glomerulus\")\n\n \n\n # Load annotations\n # VGG Image Annotator saves each image in the form:\n # { 'filename': '28503151_5b5b7ec140_b.jpg',\n # 'regions': {\n # '0': {\n # 'region_attributes': {},\n # 'shape_attributes': {\n # 'all_points_x': [...],\n # 'all_points_y': [...],\n # 'name': 'polygon'}},\n # ... more regions ...\n # },\n # 'size': 100202\n # }\n # We mostly care about the x and y coordinates of each region\n json_path=\"../../data/hubmap/train/aaa6a05cc.json\"", "def load(self) -> None:\n # Load in centroids\n if (self._path_model / f\"{self}\").is_file():\n with open(self._path_model / str(self), 'r') as file:\n self._centroids = {k: np.asarray(v, dtype=np.float32) for k, v in json.load(file).items()}\n \n # Load in (validation) clusters\n if (self._path_data / f\"{self}-train\").is_file():\n with open(self._path_data / f\"{self}-train\", 'r') as file:\n self._clusters = json.load(file)\n if (self._path_data / f\"{self}-val\").is_file():\n with open(self._path_data / f\"{self}-val\", 'r') as file:\n self._clusters_val = json.load(file)", "def load_json(json_path):\n\n try:\n # Tries to read .txt file into a dataframe\n json = pd.read_json(json_path, orient='split')\n\n except FileNotFoundError as e:\n # If file is not found, handle the exception and exit\n logger.error(e)\n raise\n\n # Expand features nested column\n features = json['features'].apply(pd.Series)\n\n # Drop old features column\n json = json.drop('features', 1)\n\n # Concate both into a single dataframe\n json = pd.concat([json, features], axis=1)\n\n return json", "def load_reveiws_dataset(filename):\n review_DataFrame = pd.read_json(filename, lines=True)\n return review_DataFrame", "def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls(**json.loads(text))", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def loadData ( self ) :\n df = pd.read_json ( self.dataset )\n df = df[pd.notnull ( df[2] )]\n df[1] = df[1].apply ( self.clean_text )\n\n self.X = df[1]\n self.y = df[2]", "def import_local_json_database_file(filename):\n\n # open json dataset\n with open(filename, 'r') as f:\n jsnDataset = json.load(f)\n return jsnDataset", "def _load_json_data(filename):\n\n relative_path = join(\"data\", filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n with open(absolute_path) as data_file:\n return json.loads(data_file.read())", "def _load(self):\n if os.path.exists(self.path):\n with open(self.path) as src:\n data = json.loads(src.read())\n else:\n data = {\n 'type': 'FeatureCollection',\n 'features': []}\n\n # Must be a FeatureCollection\n assert data['type'] == 'FeatureCollection'\n # All features must have ids, TODO must be unique strings\n assert all(f.get('id') for f in data['features'])\n\n return data", "def load_data_set_from_json(json_path, ratio=0.7):\n train_doc_list = []\n train_category_list = []\n\n test_doc_list = []\n test_category_list = []\n if os.path.exists(json_path):\n with open(json_path, \"r\") as f:\n category_map = json.load(f)\n categories = category_map.keys()\n\n for category in categories:\n all_doc_list = category_map.get(category)\n length = len(all_doc_list)\n train_set_length = int(length * ratio)\n\n for i in range(length):\n if i < train_set_length:\n train_doc_list.append(all_doc_list[i])\n train_category_list.append(category)\n else:\n test_doc_list.append(all_doc_list[i])\n test_category_list.append(category)\n\n else:\n print(\"File doesn't exist, please run load_file_to_json first\")\n\n return train_doc_list, train_category_list, test_doc_list, test_category_list", "def load_data_file(path):\n with open(path, encoding='utf-8') as f:\n return json.load(f)", "def _load(predictions, f):\n\n # with open(f) as json_file:\n data = json.load(f)\n for p in data['predictions']:\n prediction = Prediction(p)\n predictions[prediction.example_id] = prediction", "def load_training_set():\n global training_set\n f = gzip.open('mnist.pkl.gz', 'rb')\n train, valid, test = cPickle.load(f)\n [training_set, training_labels] = train\n [validation_set, validation_labels] = valid\n [testing_set, testing_labels] = test\n training_set = np.concatenate((training_set, validation_set))\n f.close()\n np.random.shuffle(training_set)", "def from_json(cls, filename):\n with open(filename, 'r') as f:\n loaded_data = json.load(f, object_pairs_hook=OrderedDict)\n return cls(data=loaded_data)", "def load_json(self, filename):\n with open(filename, 'r', encoding='utf-8') as f:\n data_dict = json.load(f)\n return data_dict", "def load_data(filepath):\n \n return pd.read_json(filepath)", "def load_json_data(filepath):\n with open(filepath,'r') as f:\n return json.load(f)", "def load(cls, path: str):\n with open(path, \"r\") as f:\n run_data = json.load(f)\n return Experiment.load_from_dict(run_data)", "def load_json_data(path, fraction=None, examples_per_class=None):\n with open(path, 'rb') as file:\n data = json.load(file)\n features = np.array(data[0]).astype(float)\n targets = np.array(data[1]).astype(int)\n\n return features, np.array([[]]), targets, np.array([])", "def load_data(name):\n with open(f\"tests/data/{name}.json\", \"r\") as json_file:\n return json.load(json_file)", "def load(self):\n #self.df = read_file(\"../data/yelp_academic_dataset_user.json\") #Full Data.\n self.df = read_file(\"../data/user300.json\") #For local machine.\n #self.get_friend_list()\n #self.save_friend_nodes()", "def load():\n try:\n with open('learn.json', 'r') as file:\n return json.load(file)\n except IOError:\n return []", "def _load_processed_data(self):\n with open(os.path.join(self._data_root_path, self._processed_train_data_file_name),\n 'r') as f:\n train_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._processed_dev_data_file_name), 'r') as f:\n dev_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._word_vocab_file_name), 'r') as f:\n word_vocab = Vocab.from_json(json.load(f))\n\n with open(os.path.join(self._data_root_path, self._char_vocab_file_name), 'r') as f:\n char_vocab = Vocab.from_json(json.load(f))\n\n return train_examples, dev_examples, word_vocab, char_vocab", "def load_dataset(name):\n dataset, info = tfds.load(name=name,\n with_info=True,\n data_dir='data/external')\n train_dataset = dataset['train']\n train_dataset = train_dataset.shuffle(SHUFFLE_BUFFER_SIZE,\n reshuffle_each_iteration=False)\n\n return train_dataset", "def _load_dataset_with_labels(self, json_file):\n # Flatten JSON\n df = pd.json_normalize(\n json_file, self.JSON_RECORD_PATH, meta=[[\"data\", \"title\"]]\n )\n df_questions = pd.json_normalize(\n json_file, self.JSON_RECORD_PATH[:-1], meta=[[\"data\", \"title\"]]\n )\n df_contexts = pd.json_normalize(\n json_file, self.JSON_RECORD_PATH[:-2], meta=[[\"data\", \"title\"]]\n )\n\n # Build the flattened Pandas DataFrame\n contexts = np.repeat(df_contexts[\"context\"].values, df_contexts.qas.str.len())\n contexts = np.repeat(contexts, df_questions[\"answers\"].str.len())\n df[\"context\"] = contexts\n df[\"question_id\"] = np.repeat(\n df_questions[\"id\"].values, df_questions[\"answers\"].str.len()\n )\n df[\"question\"] = np.repeat(\n df_questions[\"question\"].values, df_questions[\"answers\"].str.len()\n )\n df[\"context_id\"] = df[\"context\"].factorize()[0]\n\n # Rename columns\n df.rename(columns={\"data.title\": \"title\", \"text\": \"answer\"}, inplace=True)\n\n # Add end index for answers\n df = self._add_end_index(df)\n\n # Remove duplicated answers\n df = df.drop_duplicates()\n\n return df", "def read(self, data_path: str = None, *args, **kwargs) -> Dict:\n\n with open(data_path) as f:\n content = f.readlines()\n\n dataset = dict()\n dataset[\"train\"] = [(line,) for line in content]\n dataset[\"valid\"] = []\n dataset[\"test\"] = []\n\n return dataset", "def from_json(cls, fname):\n d = read_json(fname)\n return cls.from_dict(d)", "def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)", "def load_data(self, filepath: str) -> pd.DataFrame:\n \n with open(filepath) as f:\n self.data = json.load(f)\n\n return self.data", "def load_json_fixture(filename: str) -> Any:\n return json.loads(load_fixture(f\"jellyfin/{filename}\"))", "def load_training_data(config):\n # Load data\n LOGGER.info(\"Loading training data.\")\n train_x = load_data(config['data_source'], config['train_x_filename'])\n train_y = load_data(config['data_source'], config['train_y_filename'])\n val_x = load_data(config['data_source'], config['val_x_filename'])\n val_y = load_data(config['data_source'], config['val_y_filename'])\n LOGGER.info(\"Training data size: %d\", len(train_x))\n LOGGER.info(\"Validation data size: %d\", len(val_x))\n\n # Build datasets and create iterators\n LOGGER.info(\"Building dataset.\")\n train_dataset = get_dataset(\n train_x, train_y, config['batch_size'], config['data_shape'],\n config['n_classes'], True)\n val_dataset = get_dataset(\n val_x, val_y, config['batch_size'], config['data_shape'],\n config['n_classes'])\n\n return train_dataset, val_dataset, len(val_x)", "def load_dataset(path):\n with open(path) as f:\n data = json.load(f)['data']\n output = {'qids': [], 'questions': [], 'answers': [],\n 'contexts': [], 'qid2cid': []}\n for article in data:\n for paragraph in article['paragraphs']:\n output['contexts'].append(paragraph['context'])\n for qa in paragraph['qas']:\n output['qids'].append(qa['id'])\n output['questions'].append(qa['question'])\n output['qid2cid'].append(len(output['contexts']) - 1)\n if 'answers' in qa:\n output['answers'].append(qa['answers'])\n return output", "def load_dataset(path, test_or_train):\n senta_batch, sentb_batch, scores_batch = [], [], []\n with open(path, encoding='utf-8') as f:\n for i, line in enumerate(f):\n items = line.strip().split('\\t')\n if test_or_train == 'train':\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n elif test_or_train in ['dev', 'test']:\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n else:\n raise Exception(\"{} error\".format(test_or_train))\n senta_batch.append(senta)\n sentb_batch.append(sentb)\n scores_batch.append(score)\n return senta_batch, sentb_batch, scores_batch", "def load_from_json(filename):\n\n with open(filename, 'r') as file:\n return json.load(file)", "def load_dataset(path):\n training_data = []\n with open(path) as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n next(reader)\n for row in reader:\n training_data.append(row[1])\n return training_data", "def load_from_json_file(filename):\n with open(filename) as f:\n return json.load(f)", "def load_from_json(self, file_name: str) -> bool:\n try:\n with open(file_name, 'r') as f:\n data = json.loads(f.read())\n self.__g = DiGraph.from_dict(data)\n return True\n except:\n traceback.print_exc()\n return False", "def load_dataset(dataset_path: str, mode: str, vectorizer_path: str):\n if mode == \"celeb\":\n x_path = dataset_path + \"/celebrity-feeds.ndjson\"\n else:\n x_path = dataset_path + \"/follower-feeds.ndjson\"\n y_data = [json.loads(line) for line in open(dataset_path + \"/labels.ndjson\", \"r\")]\n\n if not Path(vectorizer_path).exists():\n logging.info(\"no stored vectorizer found, creating ...\")\n vec = TfidfVectorizer(preprocessor=_preprocess_feed, ngram_range=N_GRAM_RANGE,\n max_features=MAX_WORD_FEATURES, analyzer='word', min_df=3)\n vec.fit(_read_text_linewise(x_path, mode))\n joblib.dump(vec, vectorizer_path)\n else:\n logging.info(\"loading stored vectorizer\")\n vec = joblib.load(vectorizer_path)\n\n # load x data\n logging.info(\"transforming data ...\")\n x = vec.transform(_read_text_linewise(x_path, mode))\n\n # load Y data\n y_gender = [g_dict[l[\"gender\"]] for l in y_data]\n y_occ = [o_dict[l[\"occupation\"]] for l in y_data]\n y_age = [_get_age_class(l[\"birthyear\"]) for l in y_data]\n ids = [i[\"id\"] for i in y_data]\n return x, y_age, y_gender, y_occ, ids", "def load_from_json_file(filename):\n with open(filename, 'r') as f:\n obj = json.loads(f.read())\n return obj", "def __load(self, filename):\n values = {}\n training = []\n filecontent = loader.load('csv/' + filename + '.csv').generate().strip().split('\\n')\n i = 0\n for r in filecontent:\n if i < 1000: # limit size of datasets to 1,000 records\n training.append( r )\n i += 1\n else:\n break\n\n values['training'] = '\\n'.join(training)\n values['n_training'] = int(3. * len(training) / 4.)\n values['description'] = self.possible_values.get(filename, ('', 10))[0]\n values['n_trees'] = self.possible_values.get(filename, ('', 10))[1]\n self.cached_values[ filename ] = json.dumps(values)", "def load_data(fname):\n # load the json in gzip format\n with gzip.open(fname, 'r') as fin:\n data = json.loads(fin.read().decode('utf-8'))\n return data", "def from_json(self, fpath):\n import json\n with open(fpath, 'r') as fp:\n d = json.load(fp)\n return Grid.from_dict(d)", "def load_file(self):\n self._check_setup()\n json_str = self.get_json_file()\n if json_str is None:\n return\n\n if not self._is_json_str():\n with open(json_str, 'r') as f:\n jf = json.load(f)\n else:\n jf = json.loads(json_str)\n\n\n self.jf = jf\n\n target = jf['target']\n if isinstance(target, str):\n target = eval(target)\n\n goal = jf['goal']\n if isinstance(goal, str):\n goal = eval(goal)\n\n self.gen_target_pos = np.array(target)\n self.gen_goal_pos = np.array(goal)\n\n if 'place_walls' in jf:\n self.place_walls = jf['place_walls']\n\n if self.get_is_rnd():\n self.rnd_map = jf['rnd']\n self.env_jf = jf['env']", "def load_training_data(vocab, directory):\n top_level = os.listdir(directory)\n dataset = []\n for d in top_level:\n if d[-1] == '/':\n label = d[:-1]\n subdir = d\n else:\n label = d\n subdir = d+\"/\"\n files = os.listdir(directory+subdir)\n for f in files:\n bow = create_bow(vocab, directory+subdir+f)\n dataset.append({'label': label, 'bow': bow})\n return dataset", "def load_data(path, source='assistant', splits=(70, 10, 20)):\n if not len(splits) == 3:\n raise ValueError('splits expected to have three components: found {}'\n .format(len(splits)))\n train_size, test_size, val_size = np.array(splits) / np.sum(splits)\n\n data = json.load(open(path))\n if source == 'assistant':\n # Split into training set & (test + val) set.\n data_train, data_test = train_test_split(\n data, train_size=train_size\n )\n # Now, the (test + val) set into the test, and val sets\n data_test, data_val = train_test_split(\n data_test,\n test_size=(val_size / (val_size + test_size))\n )\n data = {\n 'train': data_train,\n 'test': data_test,\n 'val': data_val\n }\n elif source == 'sentiment':\n text, label = data['text'], data['label']\n\n # Split into training set & (test + val) set.\n text_train, text_test, label_train, label_test = train_test_split(\n text, label,\n train_size=train_size\n )\n # Now, the (test + val) set into the test, and val sets\n text_test, text_val, label_test, label_val = train_test_split(\n text_test, label_test,\n test_size=(val_size / (val_size + test_size))\n )\n data = {\n 'train': (text_train, label_train),\n 'test': (text_test, label_test),\n 'val': (text_val, label_val)\n }\n else:\n raise ValueError('Invalid source: {}'.format(source))\n return data", "def from_json_file(cls, json_file):\n with open(json_file, \"r\") as reader:\n return cls.from_dict(json.load(reader))", "def load_from_json(db: Database, json_file: str):\n db.clear_db()\n with open(json_file, 'r') as f:\n source = json.load(f)\n\n for work in source['works']:\n history = [] if 'history' not in work else work['history']\n db.insert_work({'code': work['id'],\n 'name': work['name'],\n 'world': work['world'],\n 'series': work['series'],\n 'genre': work['genre'],\n 'form': work['form'],\n 'status': work['status'],\n 'word_count': work['word_count'],\n 'type': 'work',\n 'last_change': history[-1]['timestamp'] if len(history) > 0 else '',\n 'parent': None,\n 'aggregate': False},\n commit=False)\n for entry in history:\n db.set_history(work['id'], entry['timestamp'], 'word_count', entry['count'], commit=False)\n\n for name, classifier in source['classifiers'].items():\n classifier['name'] = name\n db.insert_classifier(classifier, commit=False)\n\n for language, contexts in source['i18n'].items():\n for context, string_set in contexts.items():\n for key, value in string_set.items():\n db.set_translation(language, context, key, value, commit=False)\n\n db.force_commit()", "def from_json(cls, jsonfile, **kwargs):\n jdict = ltu.loadjson(jsonfile)\n slf = cls.from_dict(jdict, **kwargs)\n # Return\n return slf", "def load(path: str) -> \"DataDescriptor\":\n\n\t\twith open(path, \"r\") as f:\n\t\t\tinfo_dict = json.load(f)\n\n\t\treturn DataDescriptor(\n\t\t\tn_gram_size=int(info_dict[\"n_gram_size\"]),\n\t\t\tcaseless=bool(info_dict[\"caseless\"]),\n\t\t\tignore_punctuation=bool(info_dict[\"ignore_punctuation\"]),\n\t\t\tadd_pos_tags=bool(info_dict[\"add_pos_tags\"]),\n\t\t\tuses_lemma=bool(info_dict[\"uses_lemma\"]),\n\t\t\tuses_sentences=bool(info_dict[\"uses_sentences\"])\n\t\t)", "def _read_train_datas(self):\r\n with open(self.train_label_path, 'r') as fb:\r\n lines = fb.readlines()\r\n return self._parse_raw_labels(lines)", "def load_from_geojson(self, filename_or_url):", "def _load_train_data(self):\n\n self.train_loader = data.Train_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n self.train_loader.load_data()\n\n # load mean and std\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])", "def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')", "def test_load_json_fobj():\n\n file_name = 'test_fooof_all'\n\n with open(os.path.join(TEST_DATA_PATH, file_name + '.json'), 'r') as f_obj:\n data = load_json(f_obj, '')\n\n assert data", "def load_data(data_path):\r\n\r\n with open(data_path, \"r\") as fp:\r\n data = json.load(fp)\r\n\r\n # Convert lists to numpy arrays.\r\n X = np.array(data[\"mel\"]) # The name in brackets is changed to \"mfccs\" if MFCC features are used to train.\r\n y = np.array(data[\"labels\"])\r\n return X, y", "def load_dataset(self):\n\n train_path = os.path.join(self.dataset_path, 'images_background')\n validation_path = os.path.join(self.dataset_path, 'images_evaluation')\n\n # First let's take care of the train alphabets\n for alphabet in os.listdir(train_path):\n if alphabet[0] == '.':\n continue\n alphabet_path = os.path.join(train_path, alphabet)\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.train_dictionary[alphabet] = current_alphabet_dictionary\n\n # Now it's time for the validation alphabets\n for alphabet in os.listdir(validation_path):\n alphabet_path = os.path.join(validation_path, alphabet)\n if alphabet[0] == '.':\n continue\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.evaluation_dictionary[alphabet] = current_alphabet_dictionary", "def ignore_test_load_local_data(self):\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n rm_tmp_file(\"tmp.json\")\n assert train_data is not None\n assert len(train_data.training_examples) == 1000\n assert \"text\" not in train_data.training_examples[0].data\n assert \"label\" in train_data.training_examples[0].data", "def load_json(path):\n with open(path) as data_file:\n return json.load(data_file)", "def load_training_dataset(data_dir):\n\tball_images = load_ball_images_to_memory(data_dir)\n\tgen = functools.partial(data_generator, data_dir, ball_images)\n\treturn tf.data.Dataset.from_generator(gen, (tf.float32, tf.float32))", "def json_loader(filename):\n\n with open(filename, \"r\", encoding=\"UTF-8\") as source:\n data = json.load(source, object_hook=object_decode)\n return data", "def load_dataset(file_path):\n return Dataset.load(file_path)", "def load_data(path='mnist.npz'):\n origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'\n path = get_file(\n path,\n origin=origin_folder + 'mnist.npz',\n file_hash=\n '731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1')\n print('############################################' + path) \n with np.load(path, allow_pickle=True) as f: # pylint: disable=unexpected-keyword-arg\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n\n return (x_train, y_train), (x_test, y_test)", "def load_config(filename):\n with open(filename, \"r\") as my_file:\n my_file = my_file.read()\n return K.models.model_from_json(my_file)", "def load_json(self):\n\n self.load_json_str(self.get_json_str())", "def _load_fixture(filename):\n\n # Read the binary data into text\n with open(filename, 'rb') as stream:\n content = stream.read().decode('utf-8')\n\n # Decode the data as JSON\n data = json.loads(content)\n\n # Instantiate a session.\n session = Session()\n\n # Iterate through the entries to add them one by one.\n for item in data:\n # Resolve model from the table reference.\n table = Base.metadata.tables[item['model'].split('.')[-1]]\n\n # Add the primary key.\n item['fields']['id'] = item['pk']\n\n # Add a new row.\n session.connection().execute(table.insert().values(**item['fields']))\n\n # Commit the session to the database.\n session.commit()", "def load_data(filepath=None):\r\n if filepath is None:\r\n filepath = LATEST_DATA_SET_PATH\r\n\r\n with open(filepath) as file:\r\n return json.load(file)", "def load_json(self, file):\n with open(file, 'r', encoding=\"utf8\") as f:\n self.extend(json.load(f))", "def train_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular/train.csv', sep=',')", "def load_from_json(file):\n with open(file, 'r') as f:\n return json.load(f)", "def load_data(self, annotation_json, images_dir):\r\n # Load json from file\r\n json_file = open(annotation_json)\r\n coco_json = json.load(json_file)\r\n json_file.close()\r\n \r\n # Add the class names using the base method from utils.Dataset\r\n source_name = \"coco_like\"\r\n ids={}\r\n i=0\r\n for category in coco_json['categories']:\r\n i+=1\r\n class_id = category['id']\r\n ids[class_id]=i\r\n class_name = category['name']\r\n if class_id < 1:\r\n print('Error: Class id for \"{}\" cannot be less than one. (0 is reserved for the background)'.format(class_name))\r\n return\r\n \r\n self.add_class(source_name, class_id, class_name)\r\n for annotation in coco_json['annotations']:\r\n annotation[\"category_id\"]=ids[annotation[\"category_id\"]]\r\n \r\n # Get all annotations\r\n \r\n annotations = {}\r\n for annotation in coco_json['annotations']:\r\n image_id = annotation['image_id']\r\n if image_id not in annotations:\r\n annotations[image_id] = []\r\n annotations[image_id].append(annotation)\r\n \r\n # Get all images and add them to the dataset\r\n seen_images = {}\r\n for image in coco_json['images']:\r\n image_id = image['id']\r\n if image_id in seen_images:\r\n print(\"Warning: Skipping duplicate image id: {}\".format(image))\r\n else:\r\n seen_images[image_id] = image\r\n try:\r\n image_file_name = image['file_name']\r\n image_width = image['width']\r\n image_height = image['height']\r\n except KeyError as key:\r\n print(\"Warning: Skipping image (id: {}) with missing key: {}\".format(image_id, key))\r\n \r\n image_path = os.path.abspath(os.path.join(images_dir, image_file_name))\r\n image_annotations = annotations[image_id]\r\n \r\n # Add the image using the base method from utils.Dataset\r\n self.add_image(\r\n source=source_name,\r\n image_id=image_id,\r\n path=image_path,\r\n width=image_width,\r\n height=image_height,\r\n annotations=image_annotations\r\n )", "def load_json(self, json_path=None):\n if json_path is None:\n json_path = self.json_path\n with open(json_path, encoding='utf-8', mode='r') as f:\n data = json.load(f)\n return data", "def loadJson (self, path):\n\n # get all lines in json, concatenate then into a big string then parse it\n with open(path, \"r\") as file_content:\n all_lines = file_content.readlines()\n all_content_str = \"\".join(all_lines)\n json_dict = json.loads(all_content_str)\n self.tile_reprs = list(json_dict['tiles']['structural-tiles'].keys())\n\n # remove this empty char\n self.tile_reprs.remove(\"-\")", "def from_json(cls, file_path):\n profile = cls()\n with open(file_path, 'r') as fd:\n profile._ngrams = json.load(fd)\n return profile", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def load_data(self, json_file, target_index='infinity'):\n # data = json.loads(json_file)\n with open(json_file) as f:\n data = json.load(f)\n self.elasticsearch.index(index=target_index, doc_type='image_vector', body=data)", "def load(self, path):\n with open(path, \"rt\") as open_file:\n data = json.load(open_file)\n return data" ]
[ "0.7791885", "0.7707302", "0.71105415", "0.70239383", "0.6984626", "0.6925664", "0.6893865", "0.6847589", "0.67815673", "0.6773288", "0.66926444", "0.6675152", "0.6651707", "0.6640259", "0.66312504", "0.6630534", "0.6623066", "0.66220605", "0.6606385", "0.65876716", "0.65810245", "0.65713644", "0.6505979", "0.64789426", "0.64755666", "0.64690053", "0.64564073", "0.64432675", "0.64190453", "0.64051354", "0.63528025", "0.6338239", "0.6329401", "0.63268745", "0.63077235", "0.62888277", "0.6286131", "0.6276163", "0.6261153", "0.62574524", "0.621978", "0.6212854", "0.6205364", "0.6189412", "0.6187334", "0.61744684", "0.617394", "0.61671996", "0.6165447", "0.61649555", "0.61483", "0.61388934", "0.6134972", "0.613142", "0.6124025", "0.6110298", "0.61006457", "0.60902065", "0.6079083", "0.60781497", "0.60781246", "0.60659367", "0.6058301", "0.6043083", "0.6042904", "0.6022686", "0.60220486", "0.60196406", "0.60171527", "0.60108596", "0.6005297", "0.6004082", "0.6003016", "0.5997511", "0.5997309", "0.5993864", "0.59911436", "0.59906906", "0.598785", "0.5985788", "0.598369", "0.598368", "0.59829897", "0.5982295", "0.59816647", "0.59773767", "0.5974526", "0.5954236", "0.5952561", "0.59378296", "0.5936877", "0.5932717", "0.59311426", "0.5925998", "0.5925396", "0.59240174", "0.59209645", "0.5918728", "0.5917995", "0.5916831" ]
0.61219925
55
Loads training dataset from json file.
def load_data_from_json(json_path): print("\nLoading data from json file") with open(json_path, "r") as fp: data = json.load(fp) X = np.array(data["MFCCs"]) y = np.array(data["labels"]) print("Training sets loaded!") print("data size :", X.shape, "labels size: ", y.shape) print("release the 'data' for memories") del data return X, y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_training_data(\n self,\n train_data_file=\"datasets/train_data.json\",\n test_data_file=\"datasets/test_data.json\",\n ):\n train_data = pd.read_json(train_data_file)\n test_data = pd.read_json(test_data_file)\n return train_data, test_data", "def load_training(self):\n path = \"./training/\" + self.training + \".json\"\n\n data = {}\n\n with open(path, \"r\") as infile:\n data = json.load(infile)\n\n self.states = data[\"states\"]\n self.transitions = data[\"transitions\"]\n self.matrix = data[\"matrix\"]", "def load_file(filename):\n with open(filename, 'rt') as f:\n d = json.load(f)\n return pd.DataFrame.from_records(d['dataset'])", "def load_data(self, dataset, dataset_name):\n with open(dataset, \"r\", encoding=\"utf-8\") as f:\n self.data = json.load(f)\n self.dataset_name = dataset_name", "def load_training_data(file_path):\n return load_data(file_path)", "def set_data_from_json(self, filename):\n with open(filename, 'r') as f:\n self.data = json.load(f, object_pairs_hook=OrderedDict)", "def _load_training_data(self):\n self._save_training_data()", "def read_classification_json(fn):\n with open(fn) as f:\n classification_data = json.load(f)\n f.close()\n \n return classification_data", "def load_data(filename):\n # Load JSON lines\n with open(filename, encoding='utf-8') as f:\n examples = [json.loads(line) for line in f]\n\n return examples", "def load_data_file(self):\n with open(self.files['data'], 'r') as infile:\n data = json.load(infile)\n self.boundary_nodes = data['boundary_nodes']\n self.nodes = {int(k): v for k, v in data['nodes'].items()}\n self.levels = data['levels']\n infile.close()", "def load(data_dir=None, **kwargs):\n src_file = Path(__file__)\n if not data_dir:\n data_dir = src_file.with_suffix('')\n with open(Path(data_dir).joinpath('categories.json')) as fp:\n features = datasets.Features(\n {'id': datasets.Value('string'),\n 'text': datasets.Value('string'),\n 'label': datasets.features.ClassLabel(names=json.load(fp))}\n )\n return datasets.load_dataset(str(src_file.absolute()),\n data_dir=data_dir,\n features=features, **kwargs)", "def load(config_file: typing.TextIO) -> \"TrainingConfig\":\n return TrainingConfig.from_json(config_file.read())", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def load(self):\n with io.open(self.filename, encoding='utf-8') as f:\n self.load_from_dict(json.loads(f.read()))", "def json_data_loader(path):\n res = open(path, 'r').read()\n logging.info(\"Loading file using a pyspark.read.json\")\n data_rdd = Spark.instance.sc().parallelize([res])\n return Spark.instance.spark().read.json(data_rdd)", "def load(self):\n if not self.exist:\n self.create()\n\n with open(self.file_path, encoding=Config.ENCODING) as file:\n self.data = json.load(file)", "def load_data(filename):\n with open(filename) as json_file:\n data = json.load(json_file)\n return data", "def _load(self):\n if self.file_path.exists():\n with open(self.file_path) as fid:\n self.data = json.load(fid)", "def loadData(infile,k):\n f = open(infile,'r')\n #f = f.read().split(\"\\n\")\n #raw = json.loads(f[1])\n f = f.read()\n raw = json.loads(f)\n data = np.array(raw)\n dataset = data[k]\n return dataset", "def from_disk(cls, path: Path, loader_func: Callable = read_jsonl):\n return Dataset(\n loader_func(path / 'train.jsonl'),\n loader_func(path / 'dev.jsonl'),\n test=loader_func(path / 'test.jsonl')\n )", "def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))", "def from_file(path: Union[Path, str], name: str) -> Dataset:\n _path = path if isinstance(path, Path) else Path(path)\n with open(_path, \"r\", encoding=\"utf-8\") as yupi_fd:\n data = json.load(yupi_fd)\n return Dataset._from_json(name, data)", "def load_dataset(self, dataset_dir, json_path):\n # Add classes. We have only one class to add.\n self.add_class(\"glomerulus\", 1, \"glomerulus\")\n\n \n\n # Load annotations\n # VGG Image Annotator saves each image in the form:\n # { 'filename': '28503151_5b5b7ec140_b.jpg',\n # 'regions': {\n # '0': {\n # 'region_attributes': {},\n # 'shape_attributes': {\n # 'all_points_x': [...],\n # 'all_points_y': [...],\n # 'name': 'polygon'}},\n # ... more regions ...\n # },\n # 'size': 100202\n # }\n # We mostly care about the x and y coordinates of each region\n json_path=\"../../data/hubmap/train/aaa6a05cc.json\"", "def load(self) -> None:\n # Load in centroids\n if (self._path_model / f\"{self}\").is_file():\n with open(self._path_model / str(self), 'r') as file:\n self._centroids = {k: np.asarray(v, dtype=np.float32) for k, v in json.load(file).items()}\n \n # Load in (validation) clusters\n if (self._path_data / f\"{self}-train\").is_file():\n with open(self._path_data / f\"{self}-train\", 'r') as file:\n self._clusters = json.load(file)\n if (self._path_data / f\"{self}-val\").is_file():\n with open(self._path_data / f\"{self}-val\", 'r') as file:\n self._clusters_val = json.load(file)", "def load_json(json_path):\n\n try:\n # Tries to read .txt file into a dataframe\n json = pd.read_json(json_path, orient='split')\n\n except FileNotFoundError as e:\n # If file is not found, handle the exception and exit\n logger.error(e)\n raise\n\n # Expand features nested column\n features = json['features'].apply(pd.Series)\n\n # Drop old features column\n json = json.drop('features', 1)\n\n # Concate both into a single dataframe\n json = pd.concat([json, features], axis=1)\n\n return json", "def load_reveiws_dataset(filename):\n review_DataFrame = pd.read_json(filename, lines=True)\n return review_DataFrame", "def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls(**json.loads(text))", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def loadData ( self ) :\n df = pd.read_json ( self.dataset )\n df = df[pd.notnull ( df[2] )]\n df[1] = df[1].apply ( self.clean_text )\n\n self.X = df[1]\n self.y = df[2]", "def import_local_json_database_file(filename):\n\n # open json dataset\n with open(filename, 'r') as f:\n jsnDataset = json.load(f)\n return jsnDataset", "def _load_json_data(filename):\n\n relative_path = join(\"data\", filename)\n absolute_path = join(dirname(__file__), relative_path)\n\n with open(absolute_path) as data_file:\n return json.loads(data_file.read())", "def _load(self):\n if os.path.exists(self.path):\n with open(self.path) as src:\n data = json.loads(src.read())\n else:\n data = {\n 'type': 'FeatureCollection',\n 'features': []}\n\n # Must be a FeatureCollection\n assert data['type'] == 'FeatureCollection'\n # All features must have ids, TODO must be unique strings\n assert all(f.get('id') for f in data['features'])\n\n return data", "def load_data_set_from_json(json_path, ratio=0.7):\n train_doc_list = []\n train_category_list = []\n\n test_doc_list = []\n test_category_list = []\n if os.path.exists(json_path):\n with open(json_path, \"r\") as f:\n category_map = json.load(f)\n categories = category_map.keys()\n\n for category in categories:\n all_doc_list = category_map.get(category)\n length = len(all_doc_list)\n train_set_length = int(length * ratio)\n\n for i in range(length):\n if i < train_set_length:\n train_doc_list.append(all_doc_list[i])\n train_category_list.append(category)\n else:\n test_doc_list.append(all_doc_list[i])\n test_category_list.append(category)\n\n else:\n print(\"File doesn't exist, please run load_file_to_json first\")\n\n return train_doc_list, train_category_list, test_doc_list, test_category_list", "def load_data_file(path):\n with open(path, encoding='utf-8') as f:\n return json.load(f)", "def _load(predictions, f):\n\n # with open(f) as json_file:\n data = json.load(f)\n for p in data['predictions']:\n prediction = Prediction(p)\n predictions[prediction.example_id] = prediction", "def load_training_set():\n global training_set\n f = gzip.open('mnist.pkl.gz', 'rb')\n train, valid, test = cPickle.load(f)\n [training_set, training_labels] = train\n [validation_set, validation_labels] = valid\n [testing_set, testing_labels] = test\n training_set = np.concatenate((training_set, validation_set))\n f.close()\n np.random.shuffle(training_set)", "def from_json(cls, filename):\n with open(filename, 'r') as f:\n loaded_data = json.load(f, object_pairs_hook=OrderedDict)\n return cls(data=loaded_data)", "def load_json(self, filename):\n with open(filename, 'r', encoding='utf-8') as f:\n data_dict = json.load(f)\n return data_dict", "def load_data(filepath):\n \n return pd.read_json(filepath)", "def load_json_data(filepath):\n with open(filepath,'r') as f:\n return json.load(f)", "def load(cls, path: str):\n with open(path, \"r\") as f:\n run_data = json.load(f)\n return Experiment.load_from_dict(run_data)", "def load_json_data(path, fraction=None, examples_per_class=None):\n with open(path, 'rb') as file:\n data = json.load(file)\n features = np.array(data[0]).astype(float)\n targets = np.array(data[1]).astype(int)\n\n return features, np.array([[]]), targets, np.array([])", "def load_data(name):\n with open(f\"tests/data/{name}.json\", \"r\") as json_file:\n return json.load(json_file)", "def load(self):\n #self.df = read_file(\"../data/yelp_academic_dataset_user.json\") #Full Data.\n self.df = read_file(\"../data/user300.json\") #For local machine.\n #self.get_friend_list()\n #self.save_friend_nodes()", "def load():\n try:\n with open('learn.json', 'r') as file:\n return json.load(file)\n except IOError:\n return []", "def _load_processed_data(self):\n with open(os.path.join(self._data_root_path, self._processed_train_data_file_name),\n 'r') as f:\n train_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._processed_dev_data_file_name), 'r') as f:\n dev_examples = json.load(f)\n\n with open(os.path.join(self._data_root_path, self._word_vocab_file_name), 'r') as f:\n word_vocab = Vocab.from_json(json.load(f))\n\n with open(os.path.join(self._data_root_path, self._char_vocab_file_name), 'r') as f:\n char_vocab = Vocab.from_json(json.load(f))\n\n return train_examples, dev_examples, word_vocab, char_vocab", "def load_dataset(name):\n dataset, info = tfds.load(name=name,\n with_info=True,\n data_dir='data/external')\n train_dataset = dataset['train']\n train_dataset = train_dataset.shuffle(SHUFFLE_BUFFER_SIZE,\n reshuffle_each_iteration=False)\n\n return train_dataset", "def _load_dataset_with_labels(self, json_file):\n # Flatten JSON\n df = pd.json_normalize(\n json_file, self.JSON_RECORD_PATH, meta=[[\"data\", \"title\"]]\n )\n df_questions = pd.json_normalize(\n json_file, self.JSON_RECORD_PATH[:-1], meta=[[\"data\", \"title\"]]\n )\n df_contexts = pd.json_normalize(\n json_file, self.JSON_RECORD_PATH[:-2], meta=[[\"data\", \"title\"]]\n )\n\n # Build the flattened Pandas DataFrame\n contexts = np.repeat(df_contexts[\"context\"].values, df_contexts.qas.str.len())\n contexts = np.repeat(contexts, df_questions[\"answers\"].str.len())\n df[\"context\"] = contexts\n df[\"question_id\"] = np.repeat(\n df_questions[\"id\"].values, df_questions[\"answers\"].str.len()\n )\n df[\"question\"] = np.repeat(\n df_questions[\"question\"].values, df_questions[\"answers\"].str.len()\n )\n df[\"context_id\"] = df[\"context\"].factorize()[0]\n\n # Rename columns\n df.rename(columns={\"data.title\": \"title\", \"text\": \"answer\"}, inplace=True)\n\n # Add end index for answers\n df = self._add_end_index(df)\n\n # Remove duplicated answers\n df = df.drop_duplicates()\n\n return df", "def read(self, data_path: str = None, *args, **kwargs) -> Dict:\n\n with open(data_path) as f:\n content = f.readlines()\n\n dataset = dict()\n dataset[\"train\"] = [(line,) for line in content]\n dataset[\"valid\"] = []\n dataset[\"test\"] = []\n\n return dataset", "def from_json(cls, fname):\n d = read_json(fname)\n return cls.from_dict(d)", "def load_data(label_name='Species'):\n\n # Create a local copy of the training set.\n train_path = tf.keras.utils.get_file(fname=TRAIN_URL.split('/')[-1],\n origin=TRAIN_URL)\n # train_path now holds the pathname: (训练集和测试集路径) ~/.keras/datasets/iris_training.csv\n\n # Parse the local CSV file.(解析)\n train = pd.read_csv(filepath_or_buffer=train_path,\n names=CSV_COLUMN_NAMES, # list of column names\n header=0 # ignore the first row of the CSV file.\n )\n # train now holds a pandas DataFrame, which is data structure\n # analogous to a table.\n\n # 1. Assign the DataFrame's labels (the right-most column) to train_label.\n # 2. Delete (pop) the labels from the DataFrame.\n # 3. Assign the remainder of the DataFrame to train_features\n print(\"-\")\n train_features, train_label = train, train.pop(label_name)\n\n # Apply the preceding logic to the test set.\n test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1], TEST_URL)\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_features, test_label = test, test.pop(label_name)\n\n # Return four DataFrames.\n return (train_features, train_label), (test_features, test_label)", "def load_data(self, filepath: str) -> pd.DataFrame:\n \n with open(filepath) as f:\n self.data = json.load(f)\n\n return self.data", "def load_json_fixture(filename: str) -> Any:\n return json.loads(load_fixture(f\"jellyfin/{filename}\"))", "def load_training_data(config):\n # Load data\n LOGGER.info(\"Loading training data.\")\n train_x = load_data(config['data_source'], config['train_x_filename'])\n train_y = load_data(config['data_source'], config['train_y_filename'])\n val_x = load_data(config['data_source'], config['val_x_filename'])\n val_y = load_data(config['data_source'], config['val_y_filename'])\n LOGGER.info(\"Training data size: %d\", len(train_x))\n LOGGER.info(\"Validation data size: %d\", len(val_x))\n\n # Build datasets and create iterators\n LOGGER.info(\"Building dataset.\")\n train_dataset = get_dataset(\n train_x, train_y, config['batch_size'], config['data_shape'],\n config['n_classes'], True)\n val_dataset = get_dataset(\n val_x, val_y, config['batch_size'], config['data_shape'],\n config['n_classes'])\n\n return train_dataset, val_dataset, len(val_x)", "def load_data_from_fold(data_path):\r\n print(\"\\nLoading data from json folder {}\".format(data_path))\r\n\r\n SAMPLES_TO_CONSIDER = 22050\r\n\r\n data = preprocess_dataset(data_path, SAMPLES_TO_CONSIDER)\r\n\r\n X = np.array(data[\"MFCCs\"])\r\n y = np.array(data[\"labels\"])\r\n print(\"Training sets loaded!\")\r\n print(\"data size :\", X.shape, \"labels size: \", y.shape)\r\n print(\"release the 'data' for memories\")\r\n del data\r\n\r\n return X, y", "def load_dataset(path):\n with open(path) as f:\n data = json.load(f)['data']\n output = {'qids': [], 'questions': [], 'answers': [],\n 'contexts': [], 'qid2cid': []}\n for article in data:\n for paragraph in article['paragraphs']:\n output['contexts'].append(paragraph['context'])\n for qa in paragraph['qas']:\n output['qids'].append(qa['id'])\n output['questions'].append(qa['question'])\n output['qid2cid'].append(len(output['contexts']) - 1)\n if 'answers' in qa:\n output['answers'].append(qa['answers'])\n return output", "def load_dataset(path, test_or_train):\n senta_batch, sentb_batch, scores_batch = [], [], []\n with open(path, encoding='utf-8') as f:\n for i, line in enumerate(f):\n items = line.strip().split('\\t')\n if test_or_train == 'train':\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n elif test_or_train in ['dev', 'test']:\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n else:\n raise Exception(\"{} error\".format(test_or_train))\n senta_batch.append(senta)\n sentb_batch.append(sentb)\n scores_batch.append(score)\n return senta_batch, sentb_batch, scores_batch", "def load_from_json(filename):\n\n with open(filename, 'r') as file:\n return json.load(file)", "def load_dataset(path):\n training_data = []\n with open(path) as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\")\n next(reader)\n for row in reader:\n training_data.append(row[1])\n return training_data", "def load_from_json_file(filename):\n with open(filename) as f:\n return json.load(f)", "def load_from_json(self, file_name: str) -> bool:\n try:\n with open(file_name, 'r') as f:\n data = json.loads(f.read())\n self.__g = DiGraph.from_dict(data)\n return True\n except:\n traceback.print_exc()\n return False", "def load_dataset(dataset_path: str, mode: str, vectorizer_path: str):\n if mode == \"celeb\":\n x_path = dataset_path + \"/celebrity-feeds.ndjson\"\n else:\n x_path = dataset_path + \"/follower-feeds.ndjson\"\n y_data = [json.loads(line) for line in open(dataset_path + \"/labels.ndjson\", \"r\")]\n\n if not Path(vectorizer_path).exists():\n logging.info(\"no stored vectorizer found, creating ...\")\n vec = TfidfVectorizer(preprocessor=_preprocess_feed, ngram_range=N_GRAM_RANGE,\n max_features=MAX_WORD_FEATURES, analyzer='word', min_df=3)\n vec.fit(_read_text_linewise(x_path, mode))\n joblib.dump(vec, vectorizer_path)\n else:\n logging.info(\"loading stored vectorizer\")\n vec = joblib.load(vectorizer_path)\n\n # load x data\n logging.info(\"transforming data ...\")\n x = vec.transform(_read_text_linewise(x_path, mode))\n\n # load Y data\n y_gender = [g_dict[l[\"gender\"]] for l in y_data]\n y_occ = [o_dict[l[\"occupation\"]] for l in y_data]\n y_age = [_get_age_class(l[\"birthyear\"]) for l in y_data]\n ids = [i[\"id\"] for i in y_data]\n return x, y_age, y_gender, y_occ, ids", "def load_from_json_file(filename):\n with open(filename, 'r') as f:\n obj = json.loads(f.read())\n return obj", "def __load(self, filename):\n values = {}\n training = []\n filecontent = loader.load('csv/' + filename + '.csv').generate().strip().split('\\n')\n i = 0\n for r in filecontent:\n if i < 1000: # limit size of datasets to 1,000 records\n training.append( r )\n i += 1\n else:\n break\n\n values['training'] = '\\n'.join(training)\n values['n_training'] = int(3. * len(training) / 4.)\n values['description'] = self.possible_values.get(filename, ('', 10))[0]\n values['n_trees'] = self.possible_values.get(filename, ('', 10))[1]\n self.cached_values[ filename ] = json.dumps(values)", "def load_data(fname):\n # load the json in gzip format\n with gzip.open(fname, 'r') as fin:\n data = json.loads(fin.read().decode('utf-8'))\n return data", "def from_json(self, fpath):\n import json\n with open(fpath, 'r') as fp:\n d = json.load(fp)\n return Grid.from_dict(d)", "def load_file(self):\n self._check_setup()\n json_str = self.get_json_file()\n if json_str is None:\n return\n\n if not self._is_json_str():\n with open(json_str, 'r') as f:\n jf = json.load(f)\n else:\n jf = json.loads(json_str)\n\n\n self.jf = jf\n\n target = jf['target']\n if isinstance(target, str):\n target = eval(target)\n\n goal = jf['goal']\n if isinstance(goal, str):\n goal = eval(goal)\n\n self.gen_target_pos = np.array(target)\n self.gen_goal_pos = np.array(goal)\n\n if 'place_walls' in jf:\n self.place_walls = jf['place_walls']\n\n if self.get_is_rnd():\n self.rnd_map = jf['rnd']\n self.env_jf = jf['env']", "def load_training_data(vocab, directory):\n top_level = os.listdir(directory)\n dataset = []\n for d in top_level:\n if d[-1] == '/':\n label = d[:-1]\n subdir = d\n else:\n label = d\n subdir = d+\"/\"\n files = os.listdir(directory+subdir)\n for f in files:\n bow = create_bow(vocab, directory+subdir+f)\n dataset.append({'label': label, 'bow': bow})\n return dataset", "def load_data(path, source='assistant', splits=(70, 10, 20)):\n if not len(splits) == 3:\n raise ValueError('splits expected to have three components: found {}'\n .format(len(splits)))\n train_size, test_size, val_size = np.array(splits) / np.sum(splits)\n\n data = json.load(open(path))\n if source == 'assistant':\n # Split into training set & (test + val) set.\n data_train, data_test = train_test_split(\n data, train_size=train_size\n )\n # Now, the (test + val) set into the test, and val sets\n data_test, data_val = train_test_split(\n data_test,\n test_size=(val_size / (val_size + test_size))\n )\n data = {\n 'train': data_train,\n 'test': data_test,\n 'val': data_val\n }\n elif source == 'sentiment':\n text, label = data['text'], data['label']\n\n # Split into training set & (test + val) set.\n text_train, text_test, label_train, label_test = train_test_split(\n text, label,\n train_size=train_size\n )\n # Now, the (test + val) set into the test, and val sets\n text_test, text_val, label_test, label_val = train_test_split(\n text_test, label_test,\n test_size=(val_size / (val_size + test_size))\n )\n data = {\n 'train': (text_train, label_train),\n 'test': (text_test, label_test),\n 'val': (text_val, label_val)\n }\n else:\n raise ValueError('Invalid source: {}'.format(source))\n return data", "def from_json_file(cls, json_file):\n with open(json_file, \"r\") as reader:\n return cls.from_dict(json.load(reader))", "def load_from_json(db: Database, json_file: str):\n db.clear_db()\n with open(json_file, 'r') as f:\n source = json.load(f)\n\n for work in source['works']:\n history = [] if 'history' not in work else work['history']\n db.insert_work({'code': work['id'],\n 'name': work['name'],\n 'world': work['world'],\n 'series': work['series'],\n 'genre': work['genre'],\n 'form': work['form'],\n 'status': work['status'],\n 'word_count': work['word_count'],\n 'type': 'work',\n 'last_change': history[-1]['timestamp'] if len(history) > 0 else '',\n 'parent': None,\n 'aggregate': False},\n commit=False)\n for entry in history:\n db.set_history(work['id'], entry['timestamp'], 'word_count', entry['count'], commit=False)\n\n for name, classifier in source['classifiers'].items():\n classifier['name'] = name\n db.insert_classifier(classifier, commit=False)\n\n for language, contexts in source['i18n'].items():\n for context, string_set in contexts.items():\n for key, value in string_set.items():\n db.set_translation(language, context, key, value, commit=False)\n\n db.force_commit()", "def from_json(cls, jsonfile, **kwargs):\n jdict = ltu.loadjson(jsonfile)\n slf = cls.from_dict(jdict, **kwargs)\n # Return\n return slf", "def load(path: str) -> \"DataDescriptor\":\n\n\t\twith open(path, \"r\") as f:\n\t\t\tinfo_dict = json.load(f)\n\n\t\treturn DataDescriptor(\n\t\t\tn_gram_size=int(info_dict[\"n_gram_size\"]),\n\t\t\tcaseless=bool(info_dict[\"caseless\"]),\n\t\t\tignore_punctuation=bool(info_dict[\"ignore_punctuation\"]),\n\t\t\tadd_pos_tags=bool(info_dict[\"add_pos_tags\"]),\n\t\t\tuses_lemma=bool(info_dict[\"uses_lemma\"]),\n\t\t\tuses_sentences=bool(info_dict[\"uses_sentences\"])\n\t\t)", "def _read_train_datas(self):\r\n with open(self.train_label_path, 'r') as fb:\r\n lines = fb.readlines()\r\n return self._parse_raw_labels(lines)", "def load_from_geojson(self, filename_or_url):", "def _load_train_data(self):\n\n self.train_loader = data.Train_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n self.train_loader.load_data()\n\n # load mean and std\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])", "def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')", "def test_load_json_fobj():\n\n file_name = 'test_fooof_all'\n\n with open(os.path.join(TEST_DATA_PATH, file_name + '.json'), 'r') as f_obj:\n data = load_json(f_obj, '')\n\n assert data", "def load_data(data_path):\r\n\r\n with open(data_path, \"r\") as fp:\r\n data = json.load(fp)\r\n\r\n # Convert lists to numpy arrays.\r\n X = np.array(data[\"mel\"]) # The name in brackets is changed to \"mfccs\" if MFCC features are used to train.\r\n y = np.array(data[\"labels\"])\r\n return X, y", "def load_dataset(self):\n\n train_path = os.path.join(self.dataset_path, 'images_background')\n validation_path = os.path.join(self.dataset_path, 'images_evaluation')\n\n # First let's take care of the train alphabets\n for alphabet in os.listdir(train_path):\n if alphabet[0] == '.':\n continue\n alphabet_path = os.path.join(train_path, alphabet)\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.train_dictionary[alphabet] = current_alphabet_dictionary\n\n # Now it's time for the validation alphabets\n for alphabet in os.listdir(validation_path):\n alphabet_path = os.path.join(validation_path, alphabet)\n if alphabet[0] == '.':\n continue\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.evaluation_dictionary[alphabet] = current_alphabet_dictionary", "def ignore_test_load_local_data(self):\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n rm_tmp_file(\"tmp.json\")\n assert train_data is not None\n assert len(train_data.training_examples) == 1000\n assert \"text\" not in train_data.training_examples[0].data\n assert \"label\" in train_data.training_examples[0].data", "def load_json(path):\n with open(path) as data_file:\n return json.load(data_file)", "def load_training_dataset(data_dir):\n\tball_images = load_ball_images_to_memory(data_dir)\n\tgen = functools.partial(data_generator, data_dir, ball_images)\n\treturn tf.data.Dataset.from_generator(gen, (tf.float32, tf.float32))", "def json_loader(filename):\n\n with open(filename, \"r\", encoding=\"UTF-8\") as source:\n data = json.load(source, object_hook=object_decode)\n return data", "def load_dataset(file_path):\n return Dataset.load(file_path)", "def load_data(path='mnist.npz'):\n origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'\n path = get_file(\n path,\n origin=origin_folder + 'mnist.npz',\n file_hash=\n '731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1')\n print('############################################' + path) \n with np.load(path, allow_pickle=True) as f: # pylint: disable=unexpected-keyword-arg\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n\n return (x_train, y_train), (x_test, y_test)", "def load_config(filename):\n with open(filename, \"r\") as my_file:\n my_file = my_file.read()\n return K.models.model_from_json(my_file)", "def load_json(self):\n\n self.load_json_str(self.get_json_str())", "def _load_fixture(filename):\n\n # Read the binary data into text\n with open(filename, 'rb') as stream:\n content = stream.read().decode('utf-8')\n\n # Decode the data as JSON\n data = json.loads(content)\n\n # Instantiate a session.\n session = Session()\n\n # Iterate through the entries to add them one by one.\n for item in data:\n # Resolve model from the table reference.\n table = Base.metadata.tables[item['model'].split('.')[-1]]\n\n # Add the primary key.\n item['fields']['id'] = item['pk']\n\n # Add a new row.\n session.connection().execute(table.insert().values(**item['fields']))\n\n # Commit the session to the database.\n session.commit()", "def load_data(filepath=None):\r\n if filepath is None:\r\n filepath = LATEST_DATA_SET_PATH\r\n\r\n with open(filepath) as file:\r\n return json.load(file)", "def load_json(self, file):\n with open(file, 'r', encoding=\"utf8\") as f:\n self.extend(json.load(f))", "def train_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular/train.csv', sep=',')", "def load_from_json(file):\n with open(file, 'r') as f:\n return json.load(f)", "def load_data(self, annotation_json, images_dir):\r\n # Load json from file\r\n json_file = open(annotation_json)\r\n coco_json = json.load(json_file)\r\n json_file.close()\r\n \r\n # Add the class names using the base method from utils.Dataset\r\n source_name = \"coco_like\"\r\n ids={}\r\n i=0\r\n for category in coco_json['categories']:\r\n i+=1\r\n class_id = category['id']\r\n ids[class_id]=i\r\n class_name = category['name']\r\n if class_id < 1:\r\n print('Error: Class id for \"{}\" cannot be less than one. (0 is reserved for the background)'.format(class_name))\r\n return\r\n \r\n self.add_class(source_name, class_id, class_name)\r\n for annotation in coco_json['annotations']:\r\n annotation[\"category_id\"]=ids[annotation[\"category_id\"]]\r\n \r\n # Get all annotations\r\n \r\n annotations = {}\r\n for annotation in coco_json['annotations']:\r\n image_id = annotation['image_id']\r\n if image_id not in annotations:\r\n annotations[image_id] = []\r\n annotations[image_id].append(annotation)\r\n \r\n # Get all images and add them to the dataset\r\n seen_images = {}\r\n for image in coco_json['images']:\r\n image_id = image['id']\r\n if image_id in seen_images:\r\n print(\"Warning: Skipping duplicate image id: {}\".format(image))\r\n else:\r\n seen_images[image_id] = image\r\n try:\r\n image_file_name = image['file_name']\r\n image_width = image['width']\r\n image_height = image['height']\r\n except KeyError as key:\r\n print(\"Warning: Skipping image (id: {}) with missing key: {}\".format(image_id, key))\r\n \r\n image_path = os.path.abspath(os.path.join(images_dir, image_file_name))\r\n image_annotations = annotations[image_id]\r\n \r\n # Add the image using the base method from utils.Dataset\r\n self.add_image(\r\n source=source_name,\r\n image_id=image_id,\r\n path=image_path,\r\n width=image_width,\r\n height=image_height,\r\n annotations=image_annotations\r\n )", "def load_json(self, json_path=None):\n if json_path is None:\n json_path = self.json_path\n with open(json_path, encoding='utf-8', mode='r') as f:\n data = json.load(f)\n return data", "def loadJson (self, path):\n\n # get all lines in json, concatenate then into a big string then parse it\n with open(path, \"r\") as file_content:\n all_lines = file_content.readlines()\n all_content_str = \"\".join(all_lines)\n json_dict = json.loads(all_content_str)\n self.tile_reprs = list(json_dict['tiles']['structural-tiles'].keys())\n\n # remove this empty char\n self.tile_reprs.remove(\"-\")", "def from_json(cls, file_path):\n profile = cls()\n with open(file_path, 'r') as fd:\n profile._ngrams = json.load(fd)\n return profile", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def load_data(self, json_file, target_index='infinity'):\n # data = json.loads(json_file)\n with open(json_file) as f:\n data = json.load(f)\n self.elasticsearch.index(index=target_index, doc_type='image_vector', body=data)", "def load(self, path):\n with open(path, \"rt\") as open_file:\n data = json.load(open_file)\n return data" ]
[ "0.7791885", "0.7707302", "0.70239383", "0.6984626", "0.6925664", "0.6893865", "0.6847589", "0.67815673", "0.6773288", "0.66926444", "0.6675152", "0.6651707", "0.6640259", "0.66312504", "0.6630534", "0.6623066", "0.66220605", "0.6606385", "0.65876716", "0.65810245", "0.65713644", "0.6505979", "0.64789426", "0.64755666", "0.64690053", "0.64564073", "0.64432675", "0.64190453", "0.64051354", "0.63528025", "0.6338239", "0.6329401", "0.63268745", "0.63077235", "0.62888277", "0.6286131", "0.6276163", "0.6261153", "0.62574524", "0.621978", "0.6212854", "0.6205364", "0.6189412", "0.6187334", "0.61744684", "0.617394", "0.61671996", "0.6165447", "0.61649555", "0.61483", "0.61388934", "0.6134972", "0.613142", "0.6124025", "0.61219925", "0.6110298", "0.61006457", "0.60902065", "0.6079083", "0.60781497", "0.60781246", "0.60659367", "0.6058301", "0.6043083", "0.6042904", "0.6022686", "0.60220486", "0.60196406", "0.60171527", "0.60108596", "0.6005297", "0.6004082", "0.6003016", "0.5997511", "0.5997309", "0.5993864", "0.59911436", "0.59906906", "0.598785", "0.5985788", "0.598369", "0.598368", "0.59829897", "0.5982295", "0.59816647", "0.59773767", "0.5974526", "0.5954236", "0.5952561", "0.59378296", "0.5936877", "0.5932717", "0.59311426", "0.5925998", "0.5925396", "0.59240174", "0.59209645", "0.5918728", "0.5917995", "0.5916831" ]
0.71105415
2
Creates train, validation and test sets.
def prepare_dataset(data_path, test_size=0.2, validation_size=0.2): # load dataset if data_path.endswith('json'): X, y = load_data_from_json(data_path) else: X, y = load_data_from_fold(data_path) # create train, validation, test split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size) X_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=validation_size) # add an axis to nd array X_train = X_train[..., np.newaxis] X_test = X_test[..., np.newaxis] X_validation = X_validation[..., np.newaxis] return X_train, y_train, X_validation, y_validation, X_test, y_test
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_train_valid_set(self):\n\n if not self.eq_train:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level, self.train_weights, self.y_train,\n train_size=0.7, test_size=0.3\n )\n else:\n X_train_high_level, X_valid_high_level, X_train_low_level, X_valid_low_level, train_w, valid_w, w_train_eq, w_valid_eq, y_train, y_valid = train_test_split(self.X_train_high_level, self.X_train_low_level,\n self.train_weights, self.train_weights_eq, self.y_train,\n train_size=0.7, test_size=0.3\n )\n self.train_weights_eq = w_train_eq\n\n #NOTE: might need to re-equalise weights in each folds as sumW_sig != sumW_bkg anymroe!\n self.train_weights = train_w\n self.valid_weights = valid_w #validation weights should never be equalised weights!\n\n print 'creating validation dataset'\n self.X_train_high_level = X_train_high_level\n self.X_train_low_level = self.join_objects(X_train_low_level)\n\n self.X_valid_high_level = X_valid_high_level\n self.X_valid_low_level = self.join_objects(X_valid_low_level)\n print 'finished creating validation dataset'\n\n self.y_train = y_train\n self.y_valid = y_valid", "def createTrainTestSets():\n tweets = open(noDuplicatesFilename, 'r').read().splitlines()\n name_mapping = loadNameMapping()\n holdoutLocations = [u'Frederiksberg, Danmark', u'T\\xe5rnby, Danmark', u'Kolding, Danmark', u'T\\xe4by, Sverige', u'Kungsbacka, Sverige', u'Kristianstad, Sverige', u'Bod\\xf8, Norge', u'Kvinnherad, Norge', u'Ullensaker, Norge']\n testSetLocation = []\n rest = []\n for tweet in tweets:\n if stringToTweet(tweet).getFullName() in holdoutLocations:\n testSetLocation.append(tweet)\n else:\n rest.append(tweet)\n tweets = rest\n testIndex = int(round(len(tweets) * (1 - test_set_ratio)))\n random.seed(1)\n random.shuffle(tweets)\n trainSet = tweets[:testIndex]\n testSet = tweets[testIndex:]\n open(trainSetFilename, 'w').write('\\n'.join(trainSet))\n open(testSetNormalFilename, 'w').write('\\n'.join(testSet))\n open(testSetLocationFilename, 'w').write('\\n'.join(testSetLocation))\n print \"Wrote %d tweets to train set\" % len(trainSet)\n print \"Wrote %d tweets to normal test set\" % len(testSet)\n print \"Wrote %d tweets to location test set\" % len(testSetLocation)", "def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels", "def _generate_datasets(self):\n\n degrade_test = False\n if self._opts['degrade_step'] == 'test':\n degrade_test = True\n\n use_trainset_for_tests = UseTrainForTest.IDENTICAL # can be different in few shot workflow\n\n train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['train_classes'],\n self._opts['test_classes'],\n is_superclass=self._opts['superclass'],\n class_proportion=self._opts['class_proportion'],\n degrade_test=degrade_test,\n degrade_type=self._opts['degrade_type'], # only relevant if degrade_test = True\n degrade_val=self._opts['min_val'], # only relevant if degrade_test = True\n recurse_train=self._is_train_recursive(),\n recurse_test=self._is_inference_recursive(),\n num_batch_repeats=self._opts['num_repeats'],\n recurse_iterations=self._opts['recurse_iterations'],\n evaluate_step=self._opts['evaluate'],\n use_trainset_for_tests=use_trainset_for_tests,\n invert_images=self._opts['invert_images'],\n min_val=self._opts['min_val'])\n return train_dataset, test_dataset", "def initSets(self):\n data_frame = pd.read_csv(self.train_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n if self.test_file is None:\n self.train_feat, self.test_feat, self.train_labl, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n else:\n data_frame = pd.read_csv(self.test_file, header=None)\n data_frame = data_frame.drop(columns=self.drop_cols)\n self.train_feat, _, self.train_labl, _ = train_test_split(features, labels, test_size=self.test_size)\n features = data_frame.iloc[:, :-1].values\n labels = data_frame.iloc[:, -1].values\n _, self.test_feat, _, self.test_labl = train_test_split(features, labels, test_size=self.test_size)\n # kfold = KFold(n_splits=3)\n # self.train_index, self.test_index = kfold.split(features,labels)", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def generateTrainAndValidateset(trainSets, validateSets, validatePercentage=20):\n\tvalidateFiles = []\n\ttrainFiles = []\n\n\tfor validateSet in validateSets:\n\t\tif \".\" in validateSet:\n\t\t\tvalidateSet, percentage = validateSet.split(\".\")\n\n\t\t\tif percentage == \"all\":\n\t\t\t\t#overwrite any further checks and security measures, just append all files:\n\t\t\t\tvalidateFiles += getAllFiles([validateSet])\n\t\t\t\tcontinue\n\n\t\t\tpercentage = int(percentage)\n\t\telse:\n\t\t\tpercentage = validatePercentage\n\n\t\tif validateSet not in _dataSets:\n\t\t\traise ValueError(\"Not a valid validate set: \" + validateSet)\n\n\t\tallFiles = sorted(filter(lambda x: x.endswith(\".txt\"), os.listdir(_dataSets[validateSet])))\n\t\tallFiles = list(map(lambda x: _dataSets[validateSet] + x, allFiles))\n\t\trandom.seed(42) #make sure all lists are randomized equally each time\n\t\trandom.shuffle(allFiles)\n\n\t\tallAroused = list(filter(lambda x: isAroused(x), allFiles))\n\t\tallNonAroused = list(filter(lambda x: not isAroused(x), allFiles))\n\n\t\tvalidateFiles += allAroused[len(allAroused) - int(percentage * len(allFiles) / 100 / 2):]\n\t\tvalidateFiles += allNonAroused[len(allNonAroused) - int(percentage * len(allFiles) / 100 / 2):]\n\n\n\tfor trainSet in trainSets:\n\t\tif \".\" in trainSet:\n\t\t\ttrainSet, percentage = trainSet.split(\".\", 1)\n\n\t\t\tif percentage == \"all\":\n\t\t\t\t#overwrite any further checks and security measures, just append all files:\n\t\t\t\ttrainFiles += getAllFiles([trainSet])\n\t\t\t\tcontinue\n\n\t\t\tpercentage = int(percentage)\n\t\telse:\n\t\t\tpercentage = 100 - validatePercentage\n\t\t\tvalidatePercentage = validatePercentage\n\n\t\tif trainSet not in _dataSets:\n\t\t\traise ValueError(\"Not a valid train set: \" + trainSet)\n\n\t\tallFiles = sorted(filter(lambda x: x.endswith(\".txt\"), os.listdir(_dataSets[trainSet])))\n\t\tallFiles = list(map(lambda x: _dataSets[trainSet] + x, allFiles))\n\t\trandom.seed(42) #make sure all lists are randomized equally each time\n\t\trandom.shuffle(allFiles)\n\n\t\tallAroused = list(filter(lambda x: isAroused(x), allFiles))\n\t\tallNonAroused = list(filter(lambda x: not isAroused(x), allFiles))\n\n\t\ttrainFiles += filter(lambda x: x not in validateFiles, allAroused[:int(percentage * len(allFiles) / 100 / 2)])\n\t\ttrainFiles += filter(lambda x: x not in validateFiles, allNonAroused[:int(percentage * len(allFiles) / 100 / 2)])\n\n\tif not any(map(lambda x: x.endswith(\".all\"), list(trainSets) + list(validateSets))):\n\t\t#assert no validatefiles are also trainfiles\n\t\tassert(set(trainFiles) - set(validateFiles) == set(trainFiles))\n\t\t#assert an equal amount of aroused and non-aroused validatefiles\n\t\tassert(len(list(filter(isAroused, validateFiles))) == len(validateFiles) / 2)\n\n\treturn trainFiles, validateFiles", "def create_train_sets(self, proportion_val):\n l_path = os.listdir(self.image_folder_path)\n lr_path = random.sample(l_path, len(l_path))\n val_files = lr_path[: round(proportion_val * len(lr_path))]\n train_files = lr_path[round(proportion_val * len(lr_path)) :]\n delete_files(self.root_name, \"/VOC2021/ImageSets/Main\")\n write_txt(\"train.txt\", self.txt_path, train_files)\n write_txt(\"val.txt\", self.txt_path, val_files)", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n\n x_all = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_all = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n\n # split the data into 10% validation-set and 90% training set\n raw_train, raw_valid, y_train, y_valid = train_test_split(x_all, y_all, test_size=0.2, random_state=43)\n return raw_train, raw_valid, y_train, y_valid", "def get_training_and_testing_sets(data, Y):\r\n data = pd.concat([data, Y], axis=1)\r\n x,y=data.shape\r\n train_X_sub1=data[0:x//6]\r\n dev_X_sub1 = data[x//6:x//6 + x//12]\r\n test_X_sub1 = data[x//6 + x//12:x//3]\r\n\r\n train_X_sub2 = data[x//3:x//3+x//6]\r\n dev_X_sub2 = data[x//6 + x//3:x//3 + x//6 + x//12]\r\n test_X_sub2 = data[x//3 + x//6 + x//12:2*x//3]\r\n\r\n train_X_sub3 = data[2*x//3:(2*x//3) +x//6]\r\n dev_X_sub3 = data[x//6 + 2*x//3: (2*x//3) + x//6 + x//12]\r\n test_X_sub3 = data[2*x//3 + x//6 + x//12:x]\r\n\r\n train_X=train_X_sub1.append(train_X_sub2,ignore_index = True)\r\n train_X =train_X.append(train_X_sub3,ignore_index = True)\r\n dev_X= dev_X_sub1.append(dev_X_sub2,ignore_index = True)\r\n dev_X = dev_X.append(dev_X_sub3,ignore_index = True)\r\n test_X = test_X_sub1.append(test_X_sub2,ignore_index = True)\r\n test_X = test_X.append(test_X_sub3,ignore_index = True)\r\n\r\n\r\n train_X = util.shuffle(train_X)\r\n train_X = train_X.reset_index(drop=True)\r\n\r\n dev_X = util.shuffle(dev_X)\r\n dev_X = dev_X.reset_index(drop=True)\r\n\r\n test_X = util.shuffle(test_X)\r\n test_X = test_X.reset_index(drop=True)\r\n\r\n train_X_final=train_X\r\n dev_X_final = dev_X\r\n test_X_final = test_X\r\n x, y = train_X_final.shape\r\n train_X = train_X_final.iloc[:, 0:y - 1]\r\n train_Y = train_X_final.iloc[:, y - 1]\r\n\r\n x, y = test_X_final.shape\r\n test_X = test_X_final.iloc[:, 0:y - 1]\r\n test_Y = test_X_final.iloc[:, y - 1]\r\n\r\n x, y = dev_X_final.shape\r\n dev_X = dev_X_final.iloc[:, 0:y - 1]\r\n dev_Y = dev_X_final.iloc[:, y - 1]\r\n\r\n return train_X, train_Y, dev_X,dev_Y,test_X, test_Y", "def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()", "def create_sets(test, data, test_size=0.2, write=False):\n y_test = test['y_old']\n X_test = test.drop('y_old', 1)\n y_data = data['y_old']\n X_data = data.drop('y_old', 1)\n X_train, X_val, y_train, y_val = train_test_split(X_data, y_data, test_size=test_size, random_state=123)\n if write:\n pickle.dump((X_train, X_val, y_train, y_val), open(obj_save_path+'train_val_df.p', 'wb'))\n #X_train, X_val, y_train, y_val = pickle.load(open(obj_save_path+'train_val_df.p', 'rb'))\n return X_train, y_train, X_val, y_val, X_test, y_test", "def make_generators():\n \n # All images will be rescaled by 1./255\n train_datagen = ImageDataGenerator(rescale=1./255)\n test_datagen = ImageDataGenerator(rescale=1./255)\n\n \n train_generator = train_datagen.flow_from_directory(\n TRAIN_DATA_PATH,\n target_size= (150, 150),\n batch_size= 20,\n class_mode= 'sparse')\n\n validation_generator = test_datagen.flow_from_directory(\n VAL_DATA_PATH,\n target_size= (150, 150),\n batch_size= 20,\n class_mode= 'sparse')\n\n return train_generator, validation_generator", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def setup():\n for dir_path in [train_dir, output_dir]:\n Path(dir_path).mkdir(exist_ok=True)\n\n # create the training and test data files that we will use\n create_jsonlines_feature_files(train_dir)", "def create_train_test_sets(self,x,y,lenTest):\n \n nbInd = x.shape[0]\n shuffler = np.random.permutation(nbInd)\n x_train = x[shuffler][0:(nbInd-lenTest),]\n y_train = y[shuffler][0:(nbInd-lenTest),]\n\n x_test = x[shuffler][(nbInd-lenTest):nbInd,]\n y_test = y[shuffler][(nbInd-lenTest):nbInd,]\n\n return x_train,y_train,x_test,y_test", "def test_training(self):\n\t\tpass", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def create_train_validate_test_sets(self,X,Y):\n\n\t\tprint \"Size of the original images\"\n\n\t\tX = np.asarray(X, dtype=theano.config.floatX)\n\t\t\n\t\ttrain_length = int(round(len(X) * 0.60))\n\t\tvalid_length = int(round(len(X) * 0.20))\n\t\ttest_length = int(round(len(X) * 0.20))\n\n\t\tX_train = X[0:train_length]\n\t\tX_valid = X[train_length: (train_length + valid_length)]\n\t\tX_test = X[-test_length:]\n\n\t\t# sample = X_train[0].reshape(64,64)\n\n\t\t# X_train = X_train.transpose(0, 3, 1, 2)\n\t\t# X_valid = X_valid.transpose(0, 3, 1, 2)\n\t\t# X_test = X_test.transpose(0, 3, 1, 2)\n\n\t\t# X = X.transpose(0, 3, 1, 2)\n\n\t\tX_train = map(self.flaten_aux, X_train)\n\t\tX_valid = map(self.flaten_aux, X_valid)\n\t\tX_test = map(self.flaten_aux, X_test)\n\n\t\t# X = map(self.flaten_aux, X)\n\n\t\t#print X_train.shape\n\t\t#X = X.transpose(0, 3, 1, 2)\n\t\t# X = np.asarray(X, dtype=theano.config.floatX)\n\t\t# X = X.reshape((21, 3, 64, 64))\n\t\t# print X.shape\n\t\t# #X_train = X_train.transpose(0, 3, 1, 2)\n\t\t# #print X[0].\n\t\t# im = Image.fromarray(X[0],mode=\"RGB\")\n\t\t# im.show()\n\t\t#self.reconstructImage(X[0]).show()\n\t\t# sample = X_train[0].reshape(64,64)\n\t\t# Image.fromarray(sample,mode=\"L\").show()\n\n\t\t#X = map(self.flaten_aux, X)\n\n\t\t# X_train = X[0:train_length]\n\t\t# X_valid = X[train_length: (train_length + valid_length)]\n\t\t# X_test = X[-test_length:]\n\n\t\tY_train = Y[0:train_length]\n\t\tY_valid = Y[train_length:(train_length + valid_length)]\n\t\tY_test = Y[-test_length:]\n\n\t\t#pkl_file = open( '../data/lb.pkl', 'rb')\n\t\t#lb = cPickle.load(pkl_file)\n\n\t\t#arr = np.array(np.round((X_train[0] * 256).reshape((64,64))),dtype=np.uint8)\n\t\t# Image.fromarray(arr,mode=\"L\").show()\n\t\t# print lb.classes_\n\t\t# print Y_train[0]\n\n\t\ttrain_set = [X_train,Y_train]\n\t\tvalid_set = [X_valid,Y_valid]\n\t\ttest_set = [X_test,Y_test]\n\t\tinput = [X,Y]\n\n\t\tif self.verbose:\n\t\t\tprint \"X_train {} X_validation {} X_test {}\".format(len(X_train),len(X_valid),len(X_test))\n\t\t\tprint \"Y_train {} Y_validation {} Y_test {}\".format(len(Y_train),len(Y_valid),len(Y_test))\n\n\t\toutput = open(self.data_path + 'train_set.pkl', 'wb')\n\t\tcPickle.dump(train_set, output,protocol=-1)\n\t\toutput.close()\n\n\t\toutput = open(self.data_path + 'valid_set.pkl', 'wb')\n\t\tcPickle.dump(valid_set, output,protocol=-1)\n\t\toutput.close()\n\n\t\toutput = open(self.data_path + 'test_set.pkl', 'wb')\n\t\tcPickle.dump(test_set, output,protocol=-1)\n\t\toutput.close()\n\t\t\n\t\treturn train_set,valid_set,test_set", "def creation_data_sets(quality, dataset, test_case=False):\n current_path = Path.cwd()\n if dataset == 0:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Mnist_{}\".format(quality))\n test_path = current_path.joinpath(\"Mnist_{}_test\".format(quality))\n else:\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Cifar-10_{}\".format(quality))\n test_path = current_path.joinpath(\"Cifar-10_{}_test\".format(quality))\n\n create_directories(train_path, test_path)\n convert(train_path, x_train, dataset, quality, test_case)\n convert(test_path, x_test, dataset, quality, test_case)", "def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)", "def setup(self, stage: Optional[str] = None):\n if stage in (None, 'fit'):\n # Get a 20% of the train data for validation in a stratified way.\n _x = [i[1] for i in self.splits['train']]\n _y = [i[0] for i in self.splits['train']]\n\n _train_x, _val_x, _train_y, _val_y = train_test_split(_x, _y, test_size=0.2,\n stratify=_y)\n #print(np.unique(_train_y, return_counts=True))\n #print(np.unique(_val_y, return_counts=True))\n\n self.splits['train'] = [[i, j] for i,j in zip(_train_y, _train_x)]\n self.splits['valid'] = [[i, j] for i,j in zip(_val_y, _val_x)]\n\n self.datasets['train'] = FewShotDataset(self.splits['train'], self.ops)\n self.datasets['valid'] = FewShotDataset(self.splits['valid'], self.ops)\n\n if stage in (None, 'test'):\n self.datasets['test'] = FewShotDataset(self.splits['test'], self.ops)", "def create_train_test(option, transform, params, split=0.2):\r\n clip_im_dir = option.clip_im_dir\r\n matting_dir = option.matting_dir\r\n csv_path = option.csv_path\r\n \r\n print(\"create datasets\")\r\n \r\n \r\n data_df = pd.read_csv(csv_path)\r\n # data_df = MergeDataframe(clip_im_dir, matting_dir)\r\n \r\n #separate data in training and test data (20/80)\r\n train_df, test_df = train_test_split(data_df, test_size=split)\r\n \r\n #search right Dataset class\r\n package_dir = Path(src.dataset.__file__).resolve().parent\r\n\r\n for (_, module_name, _) in iter_modules([package_dir]):\r\n # print(module_name, self.ComType)\r\n if option.dataset.lower() == module_name.lower() :\r\n modelModule = importlib.import_module(\".\"+module_name)\r\n break\r\n \r\n # train data\r\n training_set = modelModule(train_df, clip_im_dir, matting_dir, transform, transform)\r\n train_loader = DataLoader(training_set, **params)\r\n \r\n \r\n #test data\r\n testing_set = modelModule(test_df, clip_im_dir, matting_dir, transform, transform)\r\n test_loader = DataLoader(testing_set, **params)\r\n \r\n return train_loader, test_loader", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n path = os.getcwd() # reads the current path\n x_train = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_train = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n x_test = np.load(path + '/files/tinyX_test.npy', 'r') # reads the input file\n x_train, y_train = shuffle(x_train, y_train)\n\n return x_train, y_train, x_test", "def setup_datasets(self):\r\n\r\n train_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.RandomRotation(degrees=self.random_angle, resample=Image.BILINEAR),\r\n transforms.RandomResizedCrop(\r\n size=self.crop_size, scale=(1-self.random_scale, 1+self.random_scale), ratio=(1, 1)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n val_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.CenterCrop(self.crop_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='train2014',\r\n transform=train_transform,\r\n dataset_size_ratio=self.dataset_size_ratio\r\n )\r\n train_subset_dataset = Subset(train_dataset, range(0, len(train_dataset), 5*self.dataset_size_ratio))\r\n val_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='val2014',\r\n transform=val_transform,\r\n )\r\n\r\n train_loader = DataLoader(\r\n train_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=True,\r\n num_workers=self.num_workers\r\n )\r\n train_subset_loader = DataLoader(\r\n train_subset_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n val_loader = DataLoader(\r\n val_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n return train_loader, train_subset_loader, val_loader", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def read_and_split_sets():\n gen_train_test_sets(\"Data_Sent_Embds/en_sent.pkl\", \"Data_Sent_Embd_Splitted/en_train.pkl\",\n \"Data_Sent_Embd_Splitted/en_test.pkl\")\n gen_train_test_sets(\"Data_Sent_Embds/es_sent.pkl\", \"Data_Sent_Embd_Splitted/es_train.pkl\",\n \"Data_Sent_Embd_Splitted/es_test.pkl\")\n gen_train_test_sets(\"Data_Sent_Embds/pr_sent.pkl\", \"Data_Sent_Embd_Splitted/pr_train.pkl\",\n \"Data_Sent_Embd_Splitted/pr_test.pkl\")", "def load_training_set():\n global training_set\n f = gzip.open('mnist.pkl.gz', 'rb')\n train, valid, test = cPickle.load(f)\n [training_set, training_labels] = train\n [validation_set, validation_labels] = valid\n [testing_set, testing_labels] = test\n training_set = np.concatenate((training_set, validation_set))\n f.close()\n np.random.shuffle(training_set)", "def create_sets():\n global train_x, train_y, val_x, val_y\n\n print('Creating sets')\n\n dataframe = pd.read_csv('LoggerBot.log', names=NAMES).sample(frac=1)\n inputs = dataframe.values[:,:-1].astype(np.float32)\n outputs = dataframe.values[:,-1].astype(np.int32)\n\n train_set_size = int(len(dataframe) * 0.7)\n train_x, train_y = inputs[:train_set_size], outputs[:train_set_size]\n val_x, val_y = inputs[train_set_size:], outputs[train_set_size:]", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def create_models(outdir, train_features=None, train_groundtruths=None, train_file=None, train_dir=None, separator=\" \", classifiers=None):\n\n utils.print_success(\"Creating models\")\n\n outdir = utils.abs_path_dir(outdir) + \"/\"\n\n if train_file is not None:\n features, groundtruths = read_train_file(train_file)\n elif train_dir is not None:\n features, groundtruths = read_train_files(train_dir, separator=separator)\n else:\n utils.print_warning(\"TODO Manage train feat and gts\")\n\n if classifiers is None:\n classifiers = {\n \"RandomForest\": RandomForestClassifier(),\n \"LogisticRegression\":LogisticRegression(),\n \"KNeighbors\":KNeighborsClassifier(),\n \"DecisionTree\":DecisionTreeClassifier(),\n \"AdaBoost\":AdaBoostClassifier(),\n \"GradientBoosting\":GradientBoostingClassifier(),\n \"ExtraTrees\":ExtraTreesClassifier(),\n \"SVM\":SVC(kernel=\"linear\", C=0.025, probability=True)\n\n # \"GaussianProcess\":GaussianProcessClassifier(),\n # \"MLP\":MLPClassifier(),\n # \"GaussianNB\":GaussianNB(),\n # \"QDA\":QuadraticDiscriminantAnalysis(),\n # \"LinearDiscriminantAnalysis\":LinearDiscriminantAnalysis()\n }\n else:\n if \"RandomForest\" in classifiers:\n clf_name = \"RandomForest\"\n begin = int(round(time.time() * 1000))\n utils.print_success(\"Starting \" + clf_name)\n clf_dir = outdir + clf_name + \"/\"\n utils.create_dir(clf_dir)\n clf = RandomForestClassifier(n_jobs=-1)\n # clf = RandomForestClassifier(verbose=100)\n clf.fit(features, groundtruths)\n joblib.dump(clf, clf_dir + clf_name + \".pkl\")\n utils.print_info(clf_name + \" done in \" + str(int(round(time.time() * 1000)) - begin) + \"ms\")\n\n # # Parallel computing\n # clf = []\n # for key in classifiers:\n # clf.append(key)\n # partial_create_model = partial(create_model, features=features, groundtruths=groundtruths, outdir=outdir, classifiers=classifiers)\n # # pool = multiprocessing.Pool(4)\n # pool = multiprocessing.Pool(len(classifiers))\n # pool.map(partial_create_model, clf) #make our results with a map call\n # pool.close() #we are not adding any more processes\n # pool.join() #tell it to wait until all threads are done before going on", "def split_train_test_dev(self):\n for dir_name in (self.config.train_dir, self.config.dev_dir,\n self.config.test_dir):\n create_dir(dir_name)\n\n self.split_helper(self.config.parsed_train_file_pos, 'pos')\n self.split_helper(self.config.parsed_train_file_neg, 'neg')", "def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')", "def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )", "def set_train(self):\n for m in self.models.values():\n m.train()", "def make_train_validation_test_sets(path_to_json, out_dir, path_to_images,\n train_fraction=0.6,\n validation_fraction=0.2,\n test_fraction=0.2,\n do_print=False):\n assert train_fraction + validation_fraction + test_fraction == 1, 'Sum of subsets fractions must be 1'\n df = pd.read_json(path_to_json)\n # one-hot encode labels\n df['Class'] = df['Class'].replace(to_replace=[3, 4, 5, 7, 8, 10],\n value=['Unilamellar', 'Multilamellar', 'Uncertain', 'Empty', 'Full', 'Uncertain'])\n\n\n # present class captions as one hot encoding\n df = pd.concat([df, pd.get_dummies(df['Class'], prefix='Label')], axis=1)\n\n # Check that all images in dataframe have corresponding file on the disk\n for index, row in df.iterrows():\n if not os.path.isfile(path_to_images + row['Image']):\n print '{} image was not found. This example will be deleted'.format(row['Image'])\n df.drop(index, inplace=True)\n\n # prepare new dataframes\n df_train = pd.DataFrame()\n df_validation = pd.DataFrame()\n df_test = pd.DataFrame()\n\n if do_print:\n print '----------\\nEntire set:\\n', df['Class'].value_counts()\n\n class_counts = df['Class'].value_counts().to_dict()\n for label, count in class_counts.iteritems():\n df_test = pd.concat([df_test, df[df['Class'] == label].sample(frac=test_fraction)])\n df = df[~df.index.isin(df_test.index)]\n\n validation_fraction_adjusted = validation_fraction / (1 - test_fraction)\n df_validation = pd.concat([df_validation, df[df['Class'] == label].sample(frac=validation_fraction_adjusted)])\n df = df[~df.index.isin(df_validation.index)]\n\n df_train = pd.concat([df_train, df[df['Class'] == label]])\n df = df[~df.index.isin(df_train.index)]\n\n if do_print:\n print '----------\\nTrain set:\\n', df_train['Class'].value_counts()\n print '----------\\nValidation set:\\n', df_validation['Class'].value_counts()\n print '----------\\nTest set:\\n', df_test['Class'].value_counts()\n\n # remove out_file if it exists\n filenames = ['train_set.json', 'test_set.json', 'validation_set']\n for f in filenames:\n try:\n os.remove(out_dir + f)\n except OSError:\n pass\n except IOError:\n pass\n\n df_train.to_json(out_dir + 'train_set.json')\n df_validation.to_json(out_dir + 'validation_set.json')\n df_test.to_json(out_dir + 'test_set.json')", "def read_data_sets(data_path, fake_data=False, one_hot=False,\n validation_size=5000, source_url={},\n augment=False,\n percentage_train=100.,\n unbalance=False, unbalance_dict={\"percentage\": 20, \"label1\": 0, \"label2\": 8},\n ):\n\n class DataSets(object):\n pass\n\n data_sets = DataSets()\n\n if fake_data:\n data_sets.train = DataSet([], [], fake_data=True, one_hot=True)\n data_sets.validation = DataSet([], [], fake_data=True, one_hot=True)\n data_sets.test = DataSet([], [], fake_data=True, one_hot=True)\n return data_sets\n\n if not source_url: # empty string check\n if 'fashion' in data_path:\n source_url = DEFAULT_SOURCE_URL_FASHION\n else:\n source_url = DEFAULT_SOURCE_URL_MNIST\n\n if 'fashion' in data_path or 'mnist' in data_path: # mnist or fashion\n train_images, train_labels, val_images, val_labels, test_images, test_labels = \\\n load_mnist(data_path, validation_size, source_url, one_hot)\n reshape = True\n else:\n train_images, train_labels, val_images, val_labels, test_images, test_labels = \\\n load_medical_data(data_path)\n reshape = False\n\n # add random permutation to train & validation\n np.random.seed(42)\n\n n_train = train_images.shape[0]\n perm = np.random.permutation(n_train)\n train_images = train_images[perm]\n train_labels = train_labels[perm]\n\n n_val = val_images.shape[0]\n perm = np.random.permutation(n_val)\n val_images = val_images[perm]\n val_labels = val_labels[perm]\n\n # For experiments with data-augmentation\n if augment:\n if 'fashion' in data_path: # rotations +-10 and horizontal flips\n augmented_images, augmented_labels = augment_data(train_images, train_labels, hflip=True)\n elif 'mnist' in data_path: # rotations +-10\n augmented_images, augmented_labels = augment_data(train_images, train_labels, hflip=False)\n train_images = np.concatenate([train_images, np.expand_dims(augmented_images, 3)])\n train_labels = np.concatenate([train_labels, augmented_labels])\n # for the medical datasets, you can use the \"augment\" argument while doing patch extraction\n\n # For experiments with limited amount of data\n if percentage_train != 100.:\n train_size = int(0.01*percentage_train*train_images.shape[0])\n Xtrain_images, Xval_images, ytrain, yval = train_test_split(train_images, train_labels, train_size=train_size)\n train_images = Xtrain_images\n train_labels = ytrain\n\n # For experiments with class-imbalance distribution\n if unbalance:\n n_classes = len(np.unique(np.argmax(train_labels, 1)))\n reduceto = 0.01*unbalance_dict['percentage']\n label1 = unbalance_dict['label1']\n label2 = unbalance_dict['label2']\n\n pick_ids = []\n newsize = 0\n all_classes = np.arange(0, n_classes)\n all_classes = np.delete(all_classes, np.where(all_classes == label1)[0])\n all_classes = np.delete(all_classes, np.where(all_classes == label2)[0])\n\n for lab in [label1, label2]:\n allids = np.where(np.argmax(train_labels, 1) == lab)[0]\n selectedids = np.random.choice(allids, int(reduceto * allids.shape[0]), replace=False)\n pick_ids.append(selectedids)\n newsize += len(selectedids)\n\n new_ids = convert_list_to_array(pick_ids, newsize)\n\n other_ids = []\n othersize = 0\n for lab in all_classes.tolist():\n selectedids = np.where(np.argmax(train_labels, 1) == lab)[0]\n other_ids.append(selectedids)\n othersize += len(selectedids)\n\n keep_ids = convert_list_to_array(other_ids, othersize)\n\n # new_ids: contains the indices of the reduced (imbalance) classes\n # keep_ids: contains the indices of the rest (keep the same class distribution)\n resulting_ids = np.concatenate((new_ids, keep_ids))\n np.random.shuffle(resulting_ids)\n\n train_images = train_images[resulting_ids, ...]\n train_labels = train_labels[resulting_ids, ...]\n\n data_sets.train = DataSet(train_images, train_labels, fake_data=True, one_hot=True, reshape=reshape)\n data_sets.validation = DataSet(val_images, val_labels, fake_data=True, one_hot=True, reshape=reshape)\n data_sets.test = DataSet(test_images, test_labels, fake_data=True, one_hot=True, reshape=reshape)\n\n return data_sets", "def train(self, trainingData, trainingLabels, validationData, validationLabels): \n \n # might be useful in your code later...\n # this is a list of all features in the training set.\n self.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n \n if (self.automaticTuning):\n kgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n else:\n kgrid = [self.k]\n \n self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)", "def create_train_test_sets(conform_shape=True, indi_proportion=0.50, incl_group_imgs=True):\r\n X_train_indi, y_train_indi = build_dataframe('Individual_Training_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n X_test_indi, y_test_indi = build_dataframe('Individual_Test_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n \r\n X_train_group, y_train_group = build_dataframe('Group_Training_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n X_test_group, y_test_group = build_dataframe('Group_Test_Images',\r\n img_input_shape, conform_shape=conform_shape)\r\n \r\n X_train_indi, y_train_indi = subsample_dataframe(X_train_indi, y_train_indi,indi_proportion)\r\n \r\n if incl_group_imgs:\r\n X_train = np.concatenate([X_train_indi,X_train_group])\r\n y_train = np.concatenate([y_train_indi,y_train_group])\r\n else: \r\n X_train = X_train_indi.copy()\r\n y_train = y_train_indi.copy()\r\n\r\n return X_train, y_train, X_test_indi, y_test_indi, X_test_group, y_test_group", "def train(\n train_sets: tuple,\n test_sets: tuple,\n input_shape: tuple = (1, 128, 128, 1),\n model_version=\"1.0.0\",\n epochs: int = 100,\n classes: int = 2,\n batch_size: int = 1,\n verbose=1,\n out_dir: str = \"saved_models\"):\n (x_train, y_train), (x_test, y_test) = train_sets, test_sets\n y_train = keras.utils.to_categorical(y_train, classes)\n y_test = keras.utils.to_categorical(y_test, classes)\n m = get_model(model_version)\n if not m:\n return\n model = m.build_model(input_shape)\n model.compile(\n loss=BinaryCrossentropy(),\n optimizer=RMSprop(learning_rate=0.0001),\n metrics=['accuracy']\n )\n saver = ModelSaver(out_dir)\n csv_logger = CSVLogger(\n \"%s/%s/log.csv\" %\n (out_dir, datetime.datetime.now().date().strftime(\"%Y_%m_%d\")),\n append=True,\n separator=','\n )\n history = model.fit(\n x_train,\n y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=verbose,\n validation_data=(x_test, y_test),\n callbacks=[saver, csv_logger]\n )\n model.save(\"%s/%s/final.hd5\" %\n (out_dir, datetime.datetime.now().date().strftime(\"%Y_%m_%d\")))\n print(\"Model saved in %s as final.hd5\" % out_dir)\n plot_results(\n history,\n epochs,\n out_dir\n )", "def train(self, trainingData, trainingLabels, validationData, validationLabels):\t \n\t \n\t# might be useful in your code later...\n\t# this is a list of all features in the training set.\n\tself.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n\t\n\tif (self.automaticTuning):\n\t\tkgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n\telse:\n\t\tkgrid = [self.k]\n\t\t\n\tself.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test", "def train_test_split(self):\n random.seed(self.args.seed)\n nodes = [node for node in range(self.ncount)]\n random.shuffle(nodes)\n self.train_nodes = torch.LongTensor(nodes[0:self.args.training_size])\n self.validation_nodes = torch.LongTensor(nodes[self.args.training_size:self.args.training_size+self.args.validation_size])\n self.test_nodes = torch.LongTensor(nodes[self.args.training_size+self.args.validation_size:])", "def run_training_save_tests():\n test_training_save()\n test_distributed_training_save()\n test_multimodel_training_save()\n test_distributed_multimodel_training_save()", "def create_folders():\n if not os.path.exists(\"data/train-npy/\"):\n os.makedirs(\"data/train-npy/\")\n if not os.path.exists(\"data/test-npy/\"):\n os.makedirs(\"data/test-npy/\")\n if not os.path.exists(\"data/valid-npy/\"):\n os.makedirs(\"data/valid-npy/\")", "def build_all_datasets(\n cfg, tokenizer, train_valid_test_num_samples,\n):\n train_dataset = RetroQAFineTuneDataset(\n cfg.train_ds.get('file_name'),\n tokenizer,\n cfg.train_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.train_ds.get('seq_length'),\n cfg.train_ds.get('add_bos'),\n cfg.train_ds.get('add_eos'),\n train_valid_test_num_samples[0],\n cfg.train_ds.get('seed'),\n cfg.train_ds.get('neighbors'),\n )\n val_dataset = RetroQAFineTuneDataset(\n cfg.val_ds.get('file_name'),\n tokenizer,\n cfg.val_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.val_ds.get('seq_length'),\n cfg.val_ds.get('add_bos'),\n cfg.val_ds.get('add_eos'),\n train_valid_test_num_samples[1],\n cfg.val_ds.get('seed'),\n cfg.val_ds.get('neighbors'),\n )\n test_dataset = RetroQAFineTuneDataset(\n cfg.test_ds.get('file_name'),\n tokenizer,\n cfg.test_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.test_ds.get('seq_length'),\n cfg.test_ds.get('add_bos'),\n cfg.test_ds.get('add_eos'),\n train_valid_test_num_samples[2],\n cfg.test_ds.get('seed'),\n cfg.test_ds.get('neighbors'),\n )\n\n return train_dataset, val_dataset, test_dataset", "def prepare_train_validation(self) -> Tuple:\n Xt, Xv, Yt, Yv = self.dataset.train_test_split_representations()\n\n Xt = self.dataset.prepare_input_samples(Xt)\n Yt = self.dataset.prepare_output_samples(Yt)\n traindataset = tf.data.Dataset.from_tensor_slices((Xt, Yt))\n traindataset = traindataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n Xv = self.dataset.prepare_input_samples(Xv)\n Yv = self.dataset.prepare_output_samples(Yv)\n validdataset = tf.data.Dataset.from_tensor_slices((Xv, Yv))\n validdataset = validdataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n return traindataset, validdataset", "def test_init(self, epochs):\n i = -1\n for p in self.P:\n for subband in self.SUBBANDS:\n i += 1\n\n # --- load model ----\n pref = self.model_dir + \"/\" + self.name % (subband, p)\n model = copy.deepcopy(self.model)\n model.model.load_weights(pref + \"_epochs_%d\" % epochs[i])\n self.NET.append(model)\n # --- end load model ----\n\n # --- load permutation ----\n self.permutation.append(\n np.load(self.model_dir + \"/permutation_\" + self.name %\n (subband, p) + \".npy\"))\n # --- end load permutation ----", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def construct_data(paths=DEFAULT_PATHS, use_saved=True):\n if not verify_paths(paths):\n raise FileNotFoundError('Some of the required data files could not be '\n 'found. Before running the project, run '\n '`setup.sh` to create/download them.')\n\n # Paths to save or load the constructed datasets from\n saved_train = os.path.join(paths['dir_output'], 'train.pk')\n saved_test = os.path.join(paths['dir_output'], 'test.pk')\n\n # Load the data if possible\n if (os.path.exists(saved_train) and os.path.exists(saved_test)\n and use_saved):\n print('Found existing saved dataset; loading it...')\n with open(saved_train, mode='rb') as train_file:\n train = pickle.load(train_file)\n with open(saved_test, mode='rb') as test_file:\n test = pickle.load(test_file)\n return train, test\n\n print('Constructing dataset...')\n\n # Read in the .csv files and create DataFrames for train, test observations\n depths = pd.read_csv(paths['df_depths'], index_col='id')\n train = pd.read_csv(paths['df_train'], index_col='id', usecols=[0])\n train = train.join(depths)\n test = depths[~depths.index.isin(train.index)].copy()\n\n # (Training images)\n print('Reading training images...')\n path = paths['dir_train_images'] + '{}.png'\n train['image'] = [read_image(path.format(img))\n for img in tqdm(train.index)]\n\n # (Training masks)\n print('Reading training masks...')\n path = paths['dir_train_masks'] + '{}.png'\n train['mask'] = [read_image(path.format(img)) for img in tqdm(train.index)]\n\n # (Testing images)\n print('Reading test images...')\n path = paths['dir_test_images'] + '{}.png'\n test['image'] = [read_image(path.format(img)) for img in tqdm(test.index)]\n\n # Calculate the coverage for the training images\n # Then, bin the images into discrete classes corresponding to coverage\n train['coverage'] = train['mask'].map(np.sum) / pow(101, 2)\n train['cov_class'] = train['coverage'].map(\n lambda cov: np.int(np.ceil(cov * 10)))\n\n # Write to file\n print('Saving the constructed dataset...')\n try:\n with open(saved_train, mode='wb') as train_file:\n pickle.dump(train, train_file)\n with open(saved_test, mode='wb') as test_file:\n pickle.dump(test, test_file)\n except OSError:\n print('Could not save the data due to an occasional Python bug on '\n 'some systems. :( If this is happening on macOS, try running on '\n 'Linux instead.')\n\n return train, test", "def _train(args, pretrain_args):\n start_time = time.time()\n print('Training', ', '.join(args.speakers), '...')\n\n # randomly sample validation set monte_carlo_cv_num times\n for num in range(args.monte_carlo_cv_num):\n # get seed used to sub-sample validation dataset (use 42 for 1st run)\n seed = utils.get_seed(num)\n\n # get train/valid/test data and convert to sequences\n train_data, valid_data, test_data, id_to_word = data_reader.get_data(\n args, seed=seed)\n # set configurations/hyperparameters for model\n config, test_config = utils.set_config(args, id_to_word)\n\n # initialize word embeddings\n init_embed = utils.init_embedding(id_to_word, dim=args.embed_size,\n init_scale=args.init_scale,\n embed_path=args.embed_path)\n\n with tf.Graph().as_default():\n # initializer used to initialize TensorFlow variables\n initializer = tf.random_uniform_initializer(-config['init_scale'],\n config['init_scale'])\n # create Train model\n with tf.name_scope('Train'):\n with tf.variable_scope('Model', reuse=None,\n initializer=initializer):\n m_train = model.Model(args, is_training=True, config=config,\n init_embed=init_embed, name='Train')\n m_train.build_graph()\n\n # create Valid model\n with tf.name_scope('Valid'):\n with tf.variable_scope('Model', reuse=True,\n initializer=initializer):\n m_valid = model.Model(args, is_training=False, config=config,\n init_embed=init_embed, name='Valid')\n m_valid.build_graph()\n\n # create Test model\n with tf.name_scope('Test'):\n with tf.variable_scope('Model', reuse=True,\n initializer=initializer):\n m_test = model.Model(args, is_training=False, config=test_config,\n init_embed=init_embed, name='Test')\n m_test.build_graph()\n\n # create summaries to be viewed in TensorBoard\n tb_summaries = utils.TensorBoardSummaries()\n tb_summaries.create_ops()\n\n init = tf.global_variables_initializer()\n\n # if pretrained, must create dict to initialize TF Saver\n if bool(pretrain_args):\n # get trainable variables and convert to dict for Saver\n reuse_vars = tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES)\n reuse_vars_dict = dict(\n [(var.op.name, var) for var in reuse_vars])\n # create saver for TF session (see function for addl details)\n saver = utils.create_tf_saver(args, pretrain_args,\n reuse_vars_dict)\n else:\n saver = tf.train.Saver()\n\n # ppls dict has perplexities that are stored in results database\n ppls = {}\n ppls, _ = _update_ppls(ppls, initialize=True)\n\n with tf.Session() as sess:\n sess.run(init)\n\n if args.load_path != '':\n print('Restoring model...')\n saver.restore(sess, args.load_path)\n\n for epoch in range(config['max_epoch']):\n print('Epoch: {0} Learning rate: {1:.3f}\\n'.format(\n epoch + 1, sess.run(m_train.lr)))\n for i, speaker in enumerate(args.speakers):\n print('Training {0} ...'.format(speaker))\n\n # run epoch on training data\n train_perplexity = _run_epoch(sess, m_train, args, train_data,\n i, tb_summaries, id_to_word,\n train_op=m_train.train_op,\n verbose=True)\n print('Epoch: {0} Train Perplexity: {1:.3f}'.format(\n epoch + 1, train_perplexity))\n ppls, _ = _update_ppls(ppls, epoch=epoch+1,\n speaker=speaker,\n ppl=train_perplexity,\n dataset='train')\n\n print('Validating...')\n # run epoch on validation data\n valid_perplexity = _run_epoch(sess, m_valid, args,\n valid_data, i, tb_summaries,\n id_to_word, verbose=True)\n print('Epoch: {0} Valid Perplexity: {1:.3f}'.format(\n epoch + 1, valid_perplexity))\n ppls, improved = _update_ppls(ppls, epoch=epoch+1,\n speaker=speaker,\n ppl=valid_perplexity,\n dataset='valid')\n\n if improved:\n # save model if valid ppl is lower than current\n # best valid ppl\n if args.save_path != '':\n print('Saving model to {0}.'.format(\n args.save_path))\n saver.save(sess, args.save_path)\n\n for i, speaker in enumerate(args.speakers):\n print('Testing {0} ...'.format(speaker))\n print('Restoring best model for testing...')\n saver.restore(sess, args.save_path)\n # run model on test data\n test_perplexity = _run_epoch(sess, m_test, args, test_data, i)\n ppls['test_ppl_' + speaker] = test_perplexity\n print('Test Perplexity: {0:.3f}'.format(test_perplexity))\n\n if args.insert_db == 'True':\n # write params/config/results to sql database\n results_db.insert_results(args, config, start_time, ppls)", "def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def pickle_dataset(train_set, test_set, validation_set, path):\n\n train_set_filename = open(os.path.join(path, TRAIN_DATA_SET), 'wb')\n # Pickle classes_count\n cPickle.dump(train_set, train_set_filename, protocol=cPickle.HIGHEST_PROTOCOL)\n # Close the file\n train_set_filename.close()\n # Save hierarchy_mapping file\n test_set_filename = open(os.path.join(path, TEST_DATA_SET), 'wb')\n # Pickle hierarchy_mapping\n cPickle.dump(test_set, test_set_filename, protocol=cPickle.HIGHEST_PROTOCOL)\n # Close the file\n test_set_filename.close()\n # Save entities list\n validation_set_filename = open(os.path.join(path, VALIDATION_DATA_SET), 'wb')\n # Pickle entities\n cPickle.dump(validation_set, validation_set_filename, protocol=cPickle.HIGHEST_PROTOCOL)\n # Close the file\n validation_set_filename.close()\n\n print(\"Debug printing- the number of train samples: {0}, the number of test samples: {1}, \"\n \"the number of validation samples: {2}\".format(len(train_set), len(test_set), len(validation_set)))", "def copy_files(self,train_dir,test_dir):\n\n def _copy_files(src_paths,dst_dir,class_numbers):\n\n \"\"\"\n Creo una lista di directory per ogni classe\n (knify-spoony/test/class1)\n (knify-spoony/test/class2)\n \n \n :param src_paths: \n :param dst_dir: \n :param class_numbers: \n :return: \n \"\"\"\n\n class_dirs=[os.path.join(dst_dir,class_name+\"/\")for class_name in self.class_names]\n\n for dir in class_dirs:\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n for src,cls in zip(src_paths,class_numbers):\n shutil.copy(src=src,dst=class_dirs[cls])\n\n # Copy the files for the training-set.\n #self.get_paths ritorna i path delle immagini da copiare nel training set test=False\n _copy_files(src_paths=self.get_paths(test=False),\n dst_dir=train_dir,\n class_numbers=self.class_numbers)\n\n print(\"- Copied training-set to:\", train_dir)\n\n # Copy the files for the test-set.\n #test=True ritorna tutte le path del test\n _copy_files(src_paths=self.get_paths(test=True),\n dst_dir=test_dir,\n class_numbers=self.class_numbers_test)\n\n print(\"- Copied test-set to:\", test_dir)", "def load_datasets(self):\n if self.processed_extension == '.npz':\n logger.info(f'Loading sets from npz:')\n \n logger.info(f'train: {self.train_path}')\n self.train_data = sparse.load_npz(self.train_path)\n\n logger.info(f'val: {self.val_path}')\n self.val_data = sparse.load_npz(self.val_path)\n\n logger.info(f'test: {self.test_path}')\n self.test_data = sparse.load_npz(self.test_path)\n \n # Split x and y\n self.train_data = [sparse.lil_matrix(sparse.csr_matrix(self.train_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.train_data)[:,-1])]\n \n self.val_data = [sparse.lil_matrix(sparse.csr_matrix(self.val_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.val_data)[:,-1])]\n \n self.test_data = [sparse.lil_matrix(sparse.csr_matrix(self.test_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.test_data)[:,-1])]\n \n elif self.processed_extension == '.csv':\n logger.info(f'Loading sets from csv:')\n \n logger.info(f'train: {self.train_path}')\n self.train_data = pd.read_csv(self.train_path)\n train_cols = self.train_data.columns\n self.train_data = [self.train_data[train_cols.difference(['TARGET'])],\n self.train_data['TARGET']]\n \n logger.info(f'val: {self.val_path}')\n self.val_data = pd.read_csv(self.val_path)\n self.val_data = [self.val_data[train_cols.difference(['TARGET'])],\n self.val_data['TARGET']]\n \n logger.info(f'test: {self.test_path}')\n self.test_data = pd.read_csv(self.test_path)\n self.test_data = [self.test_data[train_cols.difference(['TARGET'])],\n self.test_data['TARGET']]\n else:\n raise AttributeError(f'Wrong extension: {self.processed_extension}')\n self.n_train = self.train_data[0].shape[0]\n self.n_val = self.val_data[0].shape[0]\n self.n_test = self.test_data[0].shape[0]\n self.input_size = self.train_data[0].shape[1]\n self.n_examples = self.n_train + self.n_val + self.n_test\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')", "def __init__(self):\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)", "def train(self, trainingData, trainingLabels, validationData, validationLabels):\n self.trainingData = trainingData\n self.trainingLabels = trainingLabels", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 1, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 1, 1, 1, 0), # Layer 4: Convolution(Layer1)\n (5, 1, 5, 4, 0), # Layer 5: Convolution(Layer4)\n (6, 6, 0, 3, 5), # Layer 6: Concat(Layer3, Layer5)\n (7, 2, 3, 1, 0), # Layer 7: MaxPooling(Layer1)\n (8, 1, 1, 7, 0), # Layer 8: Convolution(Layer7)\n (9, 6, 0, 6, 8), # Layer 9: Concat(Layer6, Layer8)\n (10, 7, 0, 0, 0), # Layer 10: Terminal\n ]\n\n # Load training and eval data\n (train_data, train_labels), (eval_data, eval_labels) = \\\n tf.keras.datasets.mnist.load_data()\n\n # Fix the dataset\n self.train_data = normalize_dataset(dataset=train_data, baseline=255)\n self.train_labels = train_labels.astype(np.int32)\n\n self.eval_data = normalize_dataset(dataset=eval_data, baseline=255)\n self.eval_labels = eval_labels.astype(np.int32)\n\n # The batch size\n self.batch_size = 256\n\n # Workspace directory\n workspace_dir = \"./workspace\"\n self.training_dir = \"{workspace}/trainer_test\".format(\n workspace=workspace_dir\n )", "def train_and_test(self, train_fn, test_fn):\n logging.info(\"Training..\")\n self.train(train_fn)\n logging.info(\"Testing..\")\n return self.test(test_fn)\n logging.info(\"Done!\")", "def test_setup(self, test_data: list=None):\n print(\"[dataset]: using test setup ...\")\n self.vocabulary = [\"empty\"]\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=\"bert\", test=True)\n return", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 1, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 1, 1, 1, 0), # Layer 4: Convolution(Layer1)\n (5, 1, 5, 4, 0), # Layer 5: Convolution(Layer4)\n (6, 6, 0, 3, 5), # Layer 6: Concat(Layer3, Layer5)\n (7, 2, 3, 1, 0), # Layer 7: MaxPooling(Layer1)\n (8, 1, 1, 7, 0), # Layer 8: Convolution(Layer7)\n (9, 6, 0, 6, 8), # Layer 9: Concat(Layer6, Layer8)\n (10, 7, 0, 0, 0), # Layer 10: Terminal\n ]\n\n # Load training and eval data\n (train_data, train_labels), (eval_data, eval_labels) = \\\n tf.keras.datasets.mnist.load_data()\n\n # Fix the dataset\n self.train_data = normalize_dataset(dataset=train_data, baseline=255)\n self.train_labels = train_labels.astype(np.int32)\n\n self.eval_data = normalize_dataset(dataset=eval_data, baseline=255)\n self.eval_labels = eval_labels.astype(np.int32)\n\n # The batch size\n self.batch_size = 256\n\n # Workspace directory\n workspace_dir = \"./workspace\"\n self.training_dir = \"{workspace}/trainer_test_earlystop\".format(\n workspace=workspace_dir\n )", "def setup(self, stage=None):\n self.data_train, self.data_val, self.data_test = [None] * 3", "def split_data_into_train_and_test(raw_training_data):\n train_set, test_set = train_test_split(raw_training_data, test_size=0.2, random_state=42)\n return train_set, test_set", "def test_training():\n config = SmartDict()\n\n config.NETWORK_CLASS = LMBiSeNet\n config.DATASET_CLASS = DummyCamvid\n\n config.IS_DEBUG = False\n config.IMAGE_SIZE = [128, 160]\n config.BATCH_SIZE = 2\n config.TEST_STEPS = 1\n config.MAX_STEPS = 2\n config.SAVE_CHECKPOINT_STEPS = 1\n config.KEEP_CHECKPOINT_MAX = 5\n config.SUMMARISE_STEPS = 1\n config.IS_PRETRAIN = False\n config.TASK = Tasks.SEMANTIC_SEGMENTATION\n\n # network model config\n config.NETWORK = SmartDict()\n config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer\n config.NETWORK.OPTIMIZER_KWARGS = {\"learning_rate\": 0.001}\n config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE\n config.NETWORK.BATCH_SIZE = config.BATCH_SIZE\n config.NETWORK.DATA_FORMAT = \"NHWC\"\n\n # daasegt config\n config.DATASET = SmartDict()\n config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)\n config.DATASET.BATCH_SIZE = config.BATCH_SIZE\n config.DATASET.DATA_FORMAT = \"NHWC\"\n\n environment.init(\"test_lm_bisenet\")\n prepare_dirs(recreate=True)\n start_training(config, profile_step=1)", "def create_directories(train_path, test_path):\n train_path.joinpath(\"images\").mkdir(parents=True)\n test_path.joinpath(\"images\").mkdir(parents=True)", "def setUp(self):\n self.batch_size = 8\n num_keypoints = 15\n self.data_batch = []\n self.data_samples = []\n\n for i in range(self.batch_size):\n gt_instances = InstanceData()\n keypoints = np.zeros((1, num_keypoints, 2))\n keypoints[0, i] = [0.5 * i, 0.5 * i]\n gt_instances.keypoints = keypoints\n gt_instances.keypoints_visible = np.ones(\n (1, num_keypoints, 1)).astype(bool)\n gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False\n gt_instances.bboxes = np.random.random((1, 4)) * 20 * i\n gt_instances.head_size = np.random.random((1, 1)) * 10 * i\n\n pred_instances = InstanceData()\n pred_instances.keypoints = keypoints\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict(),\n }\n\n self.data_batch.append(data)\n self.data_samples.append(data_sample)", "def setUp(self):\n self.batch_size = 8\n num_keypoints = 15\n self.data_batch = []\n self.data_samples = []\n\n for i in range(self.batch_size):\n gt_instances = InstanceData()\n keypoints = np.zeros((1, num_keypoints, 2))\n keypoints[0, i] = [0.5 * i, 0.5 * i]\n gt_instances.keypoints = keypoints\n gt_instances.keypoints_visible = np.ones(\n (1, num_keypoints, 1)).astype(bool)\n gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False\n gt_instances.bboxes = np.random.random((1, 4)) * 20 * i\n gt_instances.head_size = np.random.random((1, 1)) * 10 * i\n\n pred_instances = InstanceData()\n pred_instances.keypoints = keypoints\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict(),\n }\n\n self.data_batch.append(data)\n self.data_samples.append(data_sample)", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def load_data(self):\n # make sure preprocessing is same as preprocessing as the network\n # reduce mean, and divide by a value to do scaling\n self.train_datagen = ImageDataGenerator(\n rescale=1./ 255,\n shear_range=0.05,\n rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)\n zoom_range=[0.9, 1.1], # Randomly zoom image\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n brightness_range=[0.8, 1.2],\n fill_mode='reflect',\n validation_split=0.2)\n\n self.test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n self.train_generator = self.train_datagen.flow_from_directory(\n self.train_dir,\n target_size=(224, 224),\n shuffle=True,\n batch_size=self.batchsize,\n class_mode='categorical',\n subset=\"training\")\n\n self.validation_generator = self.train_datagen.flow_from_directory(\n self.train_dir,\n target_size=(224, 224),\n shuffle=True,\n batch_size=self.batchsize,\n class_mode='categorical',\n subset=\"validation\")\n\n self.test_generator = self.test_datagen.flow_from_directory(\n self.test_dir,\n target_size=(224, 224),\n shuffle=False,\n batch_size=1,\n class_mode='categorical')", "def load_data():\n data = gzip.open(\"mnist.pkl.gz\", \"rb\")\n train_set, valid_set, test_set = cPickle.load(data)\n data.close()\n\n # Combine validation and train folds to recreate the master 60k set.\n new_images = numpy.concatenate((train_set[0], valid_set[0]))\n new_labels = numpy.concatenate((train_set[1], valid_set[1]))\n\n train_set = (new_images, new_labels)\n \n return (train_set, test_set)", "def load_train_test_transactions(train_size=0.7):\n X, y = features_target_split()\n X_train, X_test, y_train, y_test = train_test_split(X,y,train_size=train_size, random_state=7)\n print('\\nTraining and testing data creation successful\\n')\n return X_train, X_test, y_train,y_test", "def setup():\n # change working directory to\n os.chdir(ROOT_DIR)\n # move to dataFiles\n with misc.cd('dataFiles'):\n print('Now in:', os.getcwd())\n # Load in data\n model_test = models.MlModel('rf', 'water-energy.csv', 'expt')\n # Get feature. I use rdkit2d as it is fast to generate\n df, num_feat, feat_time = features.featurize(model_test.data, model_test.algorithm, [0])\n # Split the data\n train_features, test_features, train_target, test_target, feature_list = features.targets_features(df, 'expt')\n return train_features, test_features, train_target, test_target", "def train(train_set, test_set, train_label, test_label, data_name, test_filenames, dimension_reduce=False,\n distribute_training=False):\n train_set = np.array(train_set)\n test_set = np.array(test_set)\n\n print(\"The shape of training set before dimension reduction is {0}\".format(train_set.shape))\n print(\"The shape of test set before dimension reduction is {0}\".format(test_set.shape))\n print('Use distribute training ? >> {0}'.format(distribute_training))\n reg = linear_model.BayesianRidge()\n\n if dimension_reduce:\n pca = PCA(n_components=128)\n train_set = pca.fit_transform(train_set)\n test_set = pca.fit_transform(test_set)\n\n print(\"The shape of training set after dimension reduction is {0}\".format(train_set.shape))\n print(\"The shape of test set after dimension reduction is {0}\".format(test_set.shape))\n\n if not distribute_training:\n reg.fit(train_set, train_label)\n else:\n train_set, test_set, train_label, test_label = da.array(train_set), da.array(test_set), da.array(\n train_label), da.array(test_label)\n reg.fit(train_set, train_label)\n\n predicted_label = reg.predict(test_set)\n mae_lr = round(mean_absolute_error(test_label, predicted_label), 4)\n rmse_lr = round(math.sqrt(mean_squared_error(test_label, predicted_label)), 4)\n pc = round(np.corrcoef(test_label, predicted_label)[0, 1], 4)\n print('===============The Mean Absolute Error of Model is {0}===================='.format(mae_lr))\n print('===============The Root Mean Square Error of Model is {0}===================='.format(rmse_lr))\n print('===============The Pearson Correlation of Model is {0}===================='.format(pc))\n\n mkdirs_if_not_exist('./model')\n joblib.dump(reg, './model/BayesRidge_%s.pkl' % data_name)\n print('The regression model has been persisted...')\n\n mkdirs_if_not_exist('./result')\n\n out_result(test_filenames, predicted_label, test_label, None, path='./result/Pred_GT_{0}.csv'.format(data_name))\n\n df = pd.DataFrame([mae_lr, rmse_lr, pc])\n df.to_csv('./result/%s.csv' % data_name, index=False)\n print('The result csv file has been generated...')", "def __init__(self, input, output, options, local=False):\n super().__init__(\n \"create_training_set\",\n None,\n input,\n output,\n local,\n \"multi_training_set.snakefile\",\n )\n self.options = options", "def train(self, trainingData, trainingLabels, testData, testLabels, validate): \n\t\t \n\t\tself.features = trainingData[0].keys() # this could be useful for your code later...\n\n\t\tif (self.automaticTuning):\n\t\t\tCgrid = [0.001, 0.002, 0.003, 0.004, 0.005]\n\t\telse:\n\t\t\tCgrid = [self.C]\n\t\t\t\n\t\treturn self.trainAndTune(trainingData, trainingLabels, testData, testLabels, Cgrid, validate)", "def test_trainGenerator():\n\n # check type\n assert isinstance(trainset, surprise.trainset.Trainset)\n\n # the number of users in trainset should be equal to the user from database plus 1\n assert len(trainset.all_users()) == len(svd.song_df.user_id.unique())+1", "def PrepareSets(args, tokenizer, train_set, dev_set, test_set, first_label=False):\n\n # filter out al instances where the emotion is neutral\n train_set = train_set.filter(lambda example: not 27 in example['labels'])\n dev_set = dev_set.filter(lambda example: not 27 in example['labels'])\n test_set = test_set.filter(lambda example: not 27 in example['labels'])\n\n # remove unnecessary columns\n train_set = train_set.remove_columns(['text', 'id'])\n dev_set = dev_set.remove_columns(['text', 'id'])\n test_set = test_set.remove_columns(['text', 'id'])\n\n # function that creates new instances for all labels\n def handle_multiple_labels(batch):\n new_batch = {'attention_mask': [],\n 'input_ids': [],\n 'labels': [],\n 'token_type_ids': [],\n }\n for instance_idx, instance in enumerate(batch['labels']):\n for label in instance:\n new_batch['attention_mask'].append(batch['attention_mask'][instance_idx])\n new_batch['input_ids'].append(batch['input_ids'][instance_idx])\n new_batch['labels'].append(label)\n new_batch['token_type_ids'].append(batch['token_type_ids'][instance_idx])\n return new_batch\n\n # function that takes the first label\n def handle_first_label(batch):\n batch['labels'] = batch['labels'][0]\n return batch\n\n # check which label function to use\n if first_label:\n label_fn = handle_first_label\n batched = False\n else:\n label_fn = handle_multiple_labels\n batched = True\n\n # filter the labels\n train_set = train_set.map(label_fn, batched=batched)\n dev_set = dev_set.map(label_fn, batched=batched)\n test_set = test_set.map(label_fn, batched=batched)\n\n # return the prepared datasets\n return train_set, dev_set, test_set", "def get_loaders(train_dataset, val_dataset, test_dataset, batch_size=128):\n train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=8,\n shuffle=True)\n\n val_loader = DataLoader(val_dataset, batch_size=batch_size, num_workers=8,\n shuffle=False)\n\n test_loader = DataLoader(test_dataset, batch_size=batch_size, num_workers=8,\n shuffle=False)\n\n return train_loader, val_loader, test_loader", "def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()", "def train_and_test(self, data):\n\n np.random.shuffle(data)\n datalist = self.unpack_data(data)\n\n logger.info('[*] 75-25 partition of datasets ...')\n\n markline1 = math.floor(0.75*(len(datalist['features'])))\n markline2 = math.floor(0.75*len(datalist['labels']))\n\n train_features = datalist['features'][:(markline1)]\n test_features = datalist['features'][(markline1):]\n \n train_labels = datalist['labels'][:(markline2)]\n test_labels = datalist['labels'][(markline2):]\n\n logger.info('[*] Training started with 75% Dataset ...')\n\n self.knn_model.fit(train_features, train_labels)\n\n logger.info('[*] Testing started with 25% Dataset ...')\n print('\\n/---------------Accuracy----------------/') \n \n accuracy = self.knn_model.score(train_features, train_labels)\n print('Test set accuracy {:.2f} %'.format(accuracy*100))\n\n if accuracy < 0.40:\n logger.warning('[-.-!] Thanks for tryin\\' but this machine ain\\'t learning.')\n\n return True", "def setUp(self):\n self.X_train, self.y_train = load_data(\"../data/traindata.mat.tar.gz\")\n self.nn = NN_hwr([len(self.X_train[0]), 50, 10])", "def prepare_learning(self):\n print 'Separating inputs and outputs...'\n self.inputs, self.outputs = extract_samples(self.matches,\n self.input_features,\n self.output_feature)\n\n print 'Normalizing data...'\n self.normalizer, self.inputs = normalize(self.inputs)\n\n print 'Separating train and test sets...'\n self.train_inputs, self.train_outputs, self.test_inputs, self.test_outputs = split_samples(self.inputs, self.outputs)\n\n print 'Building neural network...'\n self.network = buildNetwork(len(self.input_features),\n 2 * len(self.input_features),\n 1,\n outclass=SigmoidLayer,\n bias=True)\n\n print 'Building and filling pybrain train set object...'\n self.train_set = ClassificationDataSet(len(self.input_features))\n\n for i, input_line in enumerate(self.train_inputs):\n self.train_set.addSample(self.train_inputs[i],\n [self.train_outputs[i] - 1])\n\n self.trainer = BackpropTrainer(self.network, dataset=self.train_set,\n momentum=0.5, weightdecay=0.0)\n\n self.train_set.assignClasses()", "def create_train_test_val_dirs(root_dir=os.getcwd()):\n try:\n os.makedirs(root_dir + '/train')\n os.makedirs(root_dir + '/train/CoregisteredImages')\n os.makedirs(root_dir + '/train/BlurryImages')\n os.makedirs(root_dir + '/val')\n os.makedirs(root_dir + '/val/CoregisteredImages')\n os.makedirs(root_dir + '/val/BlurryImages')\n os.makedirs(root_dir + '/test')\n os.makedirs(root_dir + '/test/CoregisteredImages')\n os.makedirs(root_dir + '/test/BlurryImages')\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise", "def learning_set_builders(self):\n # Transform in a dataframe:\n original_train_x = pd.DataFrame(self.original_train_x, columns=self.original_x_header)\n original_train_y = pd.DataFrame(self.original_train_y, columns=self.original_y_header)\n original_test_x = pd.DataFrame(self.original_test_x, columns=self.original_x_header)\n original_test_y = pd.DataFrame(self.original_test_y, columns=self.original_y_header)\n original_validation_x = pd.DataFrame(self.original_validation_x, columns=self.original_x_header)\n original_validation_y = pd.DataFrame(self.original_validation_y, columns=self.original_y_header)\n\n # Training set\n x, y = self.convertor(original_train_x, original_train_y)\n self.pairs_train_x = x.to_numpy()\n self.pairs_train_y = y.to_numpy()\n\n # Headers\n self.pairs_x_header = x.columns\n self.pairs_y_header = y.columns\n\n # Testing set\n x, y = self.convertor(original_test_x, original_test_y)\n self.pairs_test_x = x.to_numpy()\n self.pairs_test_y = y.to_numpy()\n\n # Validation set\n x, y = self.convertor(original_validation_x, original_validation_y)\n self.pairs_validation_x = x.to_numpy()\n self.pairs_validation_y = y.to_numpy()", "def setUp(self):\n self.batch_size = 8\n num_keypoints = 16\n self.data_batch = []\n self.data_samples = []\n\n for i in range(self.batch_size):\n gt_instances = InstanceData()\n keypoints = np.zeros((1, num_keypoints, 2))\n keypoints[0, i] = [0.5 * i, 0.5 * i]\n gt_instances.keypoints = keypoints + 1.0\n gt_instances.keypoints_visible = np.ones(\n (1, num_keypoints, 1)).astype(bool)\n gt_instances.keypoints_visible[0, (2 * i) % 8, 0] = False\n gt_instances.bboxes = np.random.random((1, 4)) * 20 * i\n gt_instances.head_size = np.random.random((1, 1)) * 10 * i\n\n pred_instances = InstanceData()\n pred_instances.keypoints = keypoints\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict(),\n }\n\n self.data_batch.append(data)\n self.data_samples.append(data_sample)", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def __init__(self, data_set):\r\n self.name = data_set\r\n\r\n # The training and test labels\r\n self.labels = {'train': None, 'test': None}\r\n\r\n # The training and test examples\r\n self.examples = {'train': None, 'test': None}\r\n\r\n # Load all the data for this data set\r\n for data in ['train', 'test']:\r\n self.load_file(data)\r\n\r\n # The shape of the training and test data matrices\r\n self.num_train = self.examples['train'].shape[0]\r\n self.num_test = self.examples['test'].shape[0]\r\n self.dim = self.examples['train'].shape[1]", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def init_datasets(self, display_samples = False):\n print(\"==> Loading images from \", self.img_dir)\n self.image_data_gen = ImageDataGenerator(\n rescale=1./255,\n #rotation_range=30,\n #shear_range=30,\n #width_shift_range=.15,\n #height_shift_range=.15,\n #zoom_range=0.5,\n validation_split=0.2)\n\n self.train_dataset = self.image_data_gen.flow_from_directory(\n batch_size=BATCH_SIZE,\n directory=self.img_dir,\n shuffle=True,\n target_size=self.img_size,\n class_mode='sparse',\n subset='training')\n\n self.val_dataset = self.image_data_gen.flow_from_directory(\n batch_size=BATCH_SIZE,\n directory=self.img_dir,\n shuffle=True,\n target_size=self.img_size,\n class_mode='sparse',\n subset='validation')\n\n if display_samples:\n self.display_sample_images()", "def create_tf_datasets(self):\n images = []\n labels = []\n\n images = self.dataframe_labeled_samples.index.values\n\n labels.append(\n tuple(self.dataframe_labeled_samples['Intersection'].values.astype('uint8')))\n\n images = [\n os.path.join(\n os.path.dirname(\n self.summary_manager.current_labelgui_summary_filepath),\n img_name) for img_name in images]\n labels = list(chain.from_iterable(labels))\n\n\n if self.validation_split == 0:\n images = np.array([\n self.image_preprocessor(\n imageio.imread(f)) for f in tqdm(images)])\n images = tf.data.Dataset.from_tensor_slices(images)\n labels = tf.data.Dataset.from_tensor_slices(labels)\n dataset = tf.data.Dataset.zip((images, labels))\n return dataset, None\n\n images, images_val, labels, labels_val = train_test_split(\n images, labels, test_size=self.validation_split, random_state=0)\n\n train_split_filename = ((\n f'{self.save_checkpoint_filepath or self.checkpoint_filepath}'\n f'_train_split.txt'\n ))\n print(f\"Saving train split files to: {train_split_filename}\")\n with open(train_split_filename, 'w+')\\\n as train_split_file:\n for img in images:\n train_split_file.write(img + '\\n')\n \n val_split_filename = ((\n f'{self.save_checkpoint_filepath or self.checkpoint_filepath}'\n f'_val_split.txt'\n ))\n print(f\"Saving train split files to: {val_split_filename}\")\n with open(val_split_filename, 'w+')\\\n as val_split_file:\n for img in images_val:\n val_split_file.write(img + '\\n')\n\n print(f\"Loading validation image paths ({len(images)}) with preprocessor\")\n images = np.array([\n self.image_preprocessor(\n imageio.imread(f)) for f in tqdm(images)])\n images = tf.data.Dataset.from_tensor_slices(images)\n\n print(f\"Loading labels into tf tensor\")\n labels = tf.data.Dataset.from_tensor_slices(labels)\n print(f\"Creating zipped dataset with images and labels\")\n dataset = tf.data.Dataset.zip((images, labels))\n\n print(f\"Loading validation image paths ({len(images_val)}) with preprocessor\")\n images_val = np.array([\n self.image_preprocessor(\n imageio.imread(f)) for f in tqdm(images_val)])\n #images_val = np.array([self.image_preprocessor(f) for f in tqdm(images_val)])\n images_val = tf.data.Dataset.from_tensor_slices(images_val)\n #images_val = tf.data.Dataset.list_files(images_val)\n #images_val = images_val.map(tf.io.read_file)\n print(f\"Loading validation labels into tf tensor\")\n labels_val = tf.data.Dataset.from_tensor_slices(labels_val)\n print(f\"Creating validation zipped dataset with images and labels\")\n dataset_val = tf.data.Dataset.zip((images_val, labels_val))\n\n return dataset, dataset_val", "def Init(train,test,VarList):\n ROOT.TMVA.Tools.Instance()\n ROOT.TMVA.PyMethodBase.PyInitialize()\n\n output = ROOT.TFile.Open('~/Data/NNOutput.root', 'RECREATE')\n factory = ROOT.TMVA.Factory('TMVAClassification', output,'!V:!Silent:Color:DrawProgressBar:AnalysisType=Classification')\n dataloader = ROOT.TMVA.DataLoader('dataset')\n\n for Var in VarList:\n dataloader.AddVariable(Var)\n\n add_classification_events(dataloader,train.Events,train.OutTrue,weights=train.Weights,signal_label=1)\n add_classification_events(dataloader,test.Events,test.OutTrue,weights=test.Weights,signal_label=1,test=True)\n\n dataloader.PrepareTrainingAndTestTree(ROOT.TCut(''),'SplitSeed=100') #:NormMode=None\n #CrossCheck(dataloader)\n\n return dataloader, factory , output", "def Train_data():\n print (\"loading train data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n with h5py.File(join(data_root, './data/train_real2.h5')) as f:\n data_real = f['train_real'][:]\n num, nt, ny, nx = data_real.shape\n data_real = np.transpose(data_real, (0, 1, 3, 2))\n with h5py.File(join(data_root, './data/train_imag2.h5')) as f:\n data_imag = f['train_imag'][:]\n num, nt, ny, nx = data_imag.shape\n data_imag = np.transpose(data_imag, (0, 1, 3, 2))\n data = data_real+1j*data_imag\n num_train = 15000\n num_validate = 2000\n train_data = data[0:num_train]\n validate_data = data[num_train:num_train+num_validate]\n\n train_data = np.random.permutation(train_data)\n\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end-time_start))\n return train_data, validate_data", "def make_suite():\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n for test_class in test_classes():\n tests = loader.loadTestsFromTestCase(test_class)\n suite.addTests(tests)\n return suite", "def make_suite():\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n for test_class in test_classes():\n tests = loader.loadTestsFromTestCase(test_class)\n suite.addTests(tests)\n return suite", "def main():\n X_train, Y_train, y_train = load_batch(\"data_batch_1\")\n X_test, Y_test, y_test = load_batch(\"test_batch\")\n X_val, Y_val, y_val = load_batch((\"data_batch_2\"))\n\n X_train, X_train_mean, X_train_std = normalize(X_train)\n X_test = normalize_mean_std(X_test, X_train_mean, X_train_std)\n X_val = normalize_mean_std(X_val, X_train_mean, X_train_std)\n\n data = {\n \"X_train\": X_train,\n \"Y_train\": Y_train,\n \"y_train\": y_train,\n \"X_test\": X_test,\n \"Y_test\": Y_test,\n \"y_test\": y_test,\n \"X_val\": X_val,\n \"Y_val\": Y_val,\n \"y_val\": y_val,\n }\n\n network = Network(data)", "def test__load_training_set():\n classifier = classifier_module.Classifier(None)\n set = classifier._load_training_set('test')\n for i in range(0, 5):\n signal_list = set[i]\n assert signal_list[0].get_x() == 1.0 + i * 0.028\n assert signal_list[0].get_y() == 1.00 - i * i * 0.20 * 0.30\n\n assert signal_list[1].get_x() == 2.0 - i * 0.011\n assert signal_list[1].get_y() == 2.00 - i * 0.020", "def train(self):\n # setup model\n self.createModel()\n self.setGenerators()\n self.buildCallbacks()\n self.printParameters()\n \n # train model\n _ = self.model.fit_generator(\n generator = self.trainGen,\n validation_data = self.validateGen,\n steps_per_epoch = self.steps_per_epoch,\n validation_steps = self.validation_steps,\n epochs = self.epochs,\n use_multiprocessing = True,\n callbacks = self.callbacks)\n # clear save paths to avoid overwriting accidentaly\n self.saveName = None" ]
[ "0.76895845", "0.715086", "0.7096839", "0.7073061", "0.70712376", "0.7044257", "0.70330745", "0.69933313", "0.69472665", "0.6942843", "0.69403505", "0.6925283", "0.6864578", "0.6856286", "0.6808348", "0.67936116", "0.6744948", "0.67354846", "0.67186093", "0.6708335", "0.6669742", "0.6665276", "0.6652924", "0.6629961", "0.66268766", "0.6596545", "0.65658647", "0.65576863", "0.65436727", "0.65393496", "0.65058607", "0.6470278", "0.6467769", "0.64198333", "0.64152324", "0.6409898", "0.6397107", "0.63920945", "0.63894945", "0.63884985", "0.6386047", "0.63781", "0.636016", "0.6346979", "0.6342632", "0.63374513", "0.6336209", "0.6334179", "0.63166773", "0.6287126", "0.62842965", "0.6282553", "0.62803274", "0.62750214", "0.6258623", "0.6252814", "0.6244642", "0.623918", "0.62316346", "0.62277883", "0.622673", "0.6225163", "0.62139684", "0.620423", "0.6203865", "0.6201518", "0.6196973", "0.6195169", "0.61929375", "0.61929375", "0.6186748", "0.6184946", "0.61828107", "0.6182578", "0.6177484", "0.6175356", "0.6167106", "0.6166338", "0.61653465", "0.6164087", "0.615909", "0.61566836", "0.61509335", "0.6150609", "0.6144155", "0.6139352", "0.61383915", "0.6132033", "0.61283976", "0.612592", "0.6123834", "0.6114809", "0.6113012", "0.61123013", "0.6108879", "0.61064345", "0.60984105", "0.60984105", "0.60914904", "0.6088533", "0.60866386" ]
0.0
-1
Build neural network using keras.
def build_model(input_shape, loss="sparse_categorical_crossentropy", learning_rate=0.0001): # build network architecture using convolutional layers model = tf.keras.models.Sequential() # 1st conv layer model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=input_shape, kernel_regularizer=tf.keras.regularizers.l2(0.001))) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D((3, 3), strides=(2,2), padding='same')) # 2nd conv layer model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001))) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D((3, 3), strides=(2,2), padding='same')) # 3rd conv layer model.add(tf.keras.layers.Conv2D(32, (2, 2), activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001))) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.MaxPooling2D((2, 2), strides=(2,2), padding='same')) # flatten output and feed into dense layer model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(64, activation='relu')) tf.keras.layers.Dropout(0.3) # softmax output layer model.add(tf.keras.layers.Dense(10, activation='softmax')) optimiser = tf.optimizers.Adam(learning_rate=learning_rate) # compile model model.compile(optimizer=optimiser, loss=loss, metrics=["accuracy"]) # print model parameters on console model.summary() return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_neural_network():\n network_input = keras.layers.Input((NETWORK_INPUT_SIZE,))\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_input)\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_layer)\n network_output = keras.layers.Dense(NETWORK_OUTPUT_SIZE, kernel_initializer='random_uniform', activation='linear')(network_layer)\n network = keras.models.Model(inputs=network_input, outputs=network_output)\n network.compile(loss=\"mse\", optimizer=\"Adam\")\n return network", "def build_model(input_dim, hidden_neurons, output_dim):\n\tmodel = Sequential([\n \tDense(hidden_neurons, input_dim=input_dim),\n Activation('relu'),\n Dropout(0.2),\n Dense(hidden_neurons),\n Activation('relu'),\n Dropout(0.2),\n Dense(output_dim, activation='softmax')\n ])\n\n\tmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\treturn model", "def neural_network():\n model = Sequential()\n model.add(Conv2D(64, kernel_size=3, activation=\"relu\", input_shape=(28, 28, 1)))\n model.add(Conv2D(64, kernel_size=3, activation=\"relu\"))\n model.add(Flatten())\n model.add(Dense(10, activation=\"softmax\"))\n model.compile(optimizer='adam', loss='categorical_crossentropy')\n\n return model", "def construct_model():\n # model = Sequential()\n # model.add(Dense(units=64, activation='relu', input_dim=100))\n # model.add(Dense(units=10, activation='softmax'))\n # model.compile(loss='categorical_crossentropy',\n # optimizer='sgd',\n # metrics=['accuracy'])\n # return model\n\n model = Sequential()\n # Input Layer\n model.add(Conv2D(64, 3, data_format='channels_last', activation='relu', padding='same',\n input_shape=(img_width, img_height, 3)))\n model.add(MaxPool2D(pool_size=2, strides=2))\n # Hidden Layer 1\n model.add(Conv2D(64, 3, activation='relu', padding='same'))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n # Hidden Layer 2\n model.add(Conv2D(128, 3, activation='relu', padding='same'))\n model.add(Conv2D(128, 3, activation='relu', padding='same', strides=2))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n # Hidden Layer 3\n model.add(Conv2D(256, 3, activation='relu', padding='same'))\n model.add(Conv2D(256, 3, activation='relu', padding='same'))\n model.add(Conv2D(256, 3, activation='relu', padding='same', strides=2))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n\n # Fully Connected Layer\n model.add(Flatten())\n # 512 Neuron Layer\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.5))\n # Output Layer\n model.add(Dense(num_of_classes))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model", "def compile_model(self, nb_classes, input_shape):\n\n # Get our network parameters.\n number_of_layers = self.network['number_of_layers']\n neurons_per_layer = self.network['neurons_per_layer']\n dropout_per_layer = self.network['dropout_per_layer']\n activation = self.network['activation']\n optimizer = self.network['optimizer']\n\n model = Sequential()\n\n if type(neurons_per_layer) is int:\n self.network['neurons_per_layer'] = [neurons_per_layer]\n\n if type(dropout_per_layer) is float:\n self.network['dropout_per_layer'] = [dropout_per_layer]\n\n\n if number_of_layers != len(self.network['neurons_per_layer']):\n self.network['neurons_per_layer'] = correct_neurons_per_layer(self.network)\n\n if number_of_layers != len(self.network['dropout_per_layer']):\n self.network['dropout_per_layer'] = correct_dropout_per_layer(self.network)\n\n print(\"number of layers: \" + str(number_of_layers))\n print(\"neurons: \" + str(self.network['neurons_per_layer']) + \" with \" + activation)\n print(\"dropout: \" + str(self.network['dropout_per_layer']))\n\n # Add each layer.\n for i in range(len(self.network['neurons_per_layer'])):\n # Need input shape for first layer.\n if i == 0:\n model.add(Dense(self.network['neurons_per_layer'][i], activation=activation, input_shape=input_shape))\n else:\n model.add(Dense(self.network['neurons_per_layer'][i], activation=activation))\n\n model.add(Dropout(self.network['dropout_per_layer'][i]))\n\n # Output layer.\n model.add(Dense(nb_classes, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer=optimizer,\n metrics=['accuracy'])\n\n return model", "def create_neural_network():\n model = Sequential()\n model.add(LSTM(32, input_shape=(4, 45))) # 4 time-steps and 45 features\n model.add(Dense(64))\n model.add(Activation('tanh'))\n model.add(Dense(units=45)) # 45 is the number of class\n model.add(Activation('softmax')) # Output the density of probability\n\n model.compile(optimizer=adam(lr=0.001, decay=1e-6),\n loss=\"categorical_crossentropy\",\n metrics=['accuracy'])\n\n model.summary()\n print(\"Creation of the Neural Network is finished.\")\n return model", "def compile_model(network, nb_classes, input_shape):\r\n nb_layers = network['nb_layers']\r\n layer = network['layer']\r\n nb_neurons = network['nb_neurons']\r\n activation = network['activation']\r\n optimizer = network['optimizer']\r\n\r\n model = Sequential()\r\n\r\n for i in range(nb_layers):\r\n if i == 0:\r\n model.add(Conv2D(nb_neurons, activation=activation, input_shape=input_shape))\r\n else:\r\n model.add(layer(nb_neurons, activation=activation))\r\n \r\n model.add(Dropout(0.2))\r\n\r\n model.add(Dense(nb_classes, activation='softmax'))\r\n\r\n model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\r\n\r\n return model", "def build_model(input_classes,output_classes):\n dimensions = 20\n inputs = []\n embedded_outputs = []\n for i in input_classes:\n input_layer = Input((1,))\n inputs.append(input_layer)\n embedder = Embedding(input_dim=i,output_dim=dimensions,input_length=1,embeddings_constraint=UnitNorm(axis=0))\n embedded_layer = embedder(input_layer)\n embedded_outputs.append(embedded_layer)\n\n embedded_concats = Concatenate()(embedded_outputs)\n flatten_layer = Flatten()\n\n dense_layer = Dense(output_classes)\n\n flattened_output = flatten_layer(embedded_concats)\n dense_output = dense_layer(flattened_output)\n\n # dense_output = dense_layer(embedded_concats)\n\n model = Model(inputs,dense_output)\n print(model.summary())\n model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')\n\n return model", "def modelbuilder():\n model = Sequential()\n # Add a convolution layer with with a sigmoid activation function\n model.add(layers.Conv2D(1, (2, 2), strides=(1, 1), activation='sigmoid', padding='same', input_shape=(256, 256, 3)))\n model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])\n model.summary()\n return model", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def build(width, height, depth, classes):\n model = Sequential()\n inputShape = (height, width, depth)\n # Variable chanDim is set to -1 if the order of the inputShape is (height, width, depth)\n # meaning the depth of the channel comes last in the triple\n chanDim = -1\n\n if K.image_data_format == \"channel_first\":\n inputShape = (depth, height, width)\n # if the channel is first in the triple (depth, height, width) we set chanDim to 1\n # Batch normalization layers use the channel dimension in the process, that is why we specficy the order\n chanDim = 1\n\n # The first set of CONV -> RELU where after each we apply BN layers to avoid overfitting\n # and a POOL -> DO that also help in reducing overfitting and increase the classification accuracy\n # First set of CONV -> RELU -> BN use 32 filters each with 3x3 shape\n # The consecutive CONV -> RELU -> BN layers allow the network to learn more rich features, which\n # is a common practice when training deeper CNNs, before applying POOL layer to reduce the spatial dimensions\n # of the input image\n # Then we apply POOL layer with a size of 2x2, and since we do not provide explicitly stride, keras asumes 2x2 S\n # Finally, a DROPOUT layer with a probabliy of 25%\n model.add(Conv2D(32, (3, 3), padding=\"same\", input_shape=inputShape))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(Conv2D(32, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n # The second set of CONV -> RELU -> BN layers now learn 64 filters with 3x3 shape\n # It is common to increase the number of filters as the spatial input size decreases deeper in the network.\n model.add(Conv2D(64, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(Conv2D(64, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n # We add flatten layer to flatten the output of the previous layer\n # Then we add the only FC layer (512 nodes) with a RELU activation and a BN\n # Further applying a DO layer with p = 0.5\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n\n # Finally a softmax classifier\n model.add(Dense(classes))\n model.add(Activation(\"softmax\"))\n\n return model", "def _build_network(self,\n input_dim,\n dense_layers,\n nodes_per_layer=None,\n hidden_act='relu',\n output_act='sigmoid',\n dropout_layers=None):\n\n if nodes_per_layer is None:\n nodes = [10] * dense_layers\n else:\n nodes = nodes_per_layer\n\n if dropout_layers is None:\n do_layers = [0] * dense_layers\n else:\n do_layers = dropout_layers\n\n self.model.add(Dense(nodes[0], input_dim=input_dim,\n activation=hidden_act))\n\n if dense_layers > 1:\n for l in range(1, dense_layers - 1):\n if do_layers[l - 1] != 0:\n self.model.add(Dropout(do_layers[l - 1]))\n\n self.model.add(Dense(nodes[l], activation=hidden_act))\n\n self.model.add(Dense(1, activation=output_act))", "def build(data_shape_1, data_shape_2):\n # create NN model \n # design network\n \n inputs = keras.Input(shape=(data_shape_1, data_shape_2), name='inp')\n cnn1 = layers.Conv1D(16, 5, activation='relu')(inputs)\n cnn2 = layers.Conv1D(32, 3, activation='relu')(cnn1)\n cnn3 = layers.Conv1D(64, 3, activation='relu')(cnn2)\n cnn3 = layers.Flatten()(cnn3)\n lstm = layers.LSTM(100,return_sequences = True, activation='relu')(inputs)\n lstm = layers.Flatten()(lstm)\n x = layers.concatenate([cnn3,lstm])\n x = layers.Dense(100, activation='sigmoid')(x)\n outputs = layers.Dense(24)(x)\n\n model = keras.Model(inputs=inputs, outputs=outputs, name='mnist_model')\n \n return model", "def _make_network(self):\n inp = Input(shape = (self.input_dim,))\n x = Dense(256, activation='relu')(inp)\n x = GaussianNoise(1.0)(x)\n #x = Flatten()(x) # I assume this is if the input is a convolutional neural net?\n x = Dense(128, activation='relu')(x)\n x = GaussianNoise(1.0)(x)\n out = Dense(self.output_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(out)\n return Model(inp, out)", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def build_model():\n model_weights = np.load(WEIGHTS_PATH, encoding='latin1').item()\n model = Sequential()\n model.add(InputLayer(batch_input_shape=(1, None, 1)))\n\n filter_parameters = [\n {'name': 'conv1', 'num_filters': 16, 'padding': 32,\n 'kernel_size': 64, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv2', 'num_filters': 32, 'padding': 16,\n 'kernel_size': 32, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv3', 'num_filters': 64, 'padding': 8,\n 'kernel_size': 16, 'conv_strides': 2},\n\n {'name': 'conv4', 'num_filters': 128, 'padding': 4,\n 'kernel_size': 8, 'conv_strides': 2},\n\n {'name': 'conv5', 'num_filters': 256, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2,\n 'pool_size': 4, 'pool_strides': 4},\n\n {'name': 'conv6', 'num_filters': 512, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv7', 'num_filters': 1024, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv8_2', 'num_filters': 401, 'padding': 0,\n 'kernel_size': 8, 'conv_strides': 2},\n ]\n\n for x in filter_parameters:\n model.add(ZeroPadding1D(padding=x['padding']))\n model.add(Conv1D(x['num_filters'],\n kernel_size=x['kernel_size'],\n strides=x['conv_strides'],\n padding='valid'))\n weights = model_weights[x['name']]['weights'].reshape(model.layers[-1].get_weights()[0].shape)\n biases = model_weights[x['name']]['biases']\n\n model.layers[-1].set_weights([weights, biases])\n\n if 'conv8' not in x['name']:\n gamma = model_weights[x['name']]['gamma']\n beta = model_weights[x['name']]['beta']\n mean = model_weights[x['name']]['mean']\n var = model_weights[x['name']]['var']\n\n model.add(BatchNormalization())\n model.layers[-1].set_weights([gamma, beta, mean, var])\n model.add(Activation('relu'))\n if 'pool_size' in x:\n model.add(MaxPooling1D(pool_size=x['pool_size'],\n strides=x['pool_strides'],\n padding='valid'))\n\n #\n return Model(inputs=model.input, outputs=model.get_layer('activation_7').output)", "def build_mlp(input_data, output_data, n_neurons=[512, 256, 128]):\n input_layer = keras.layers.Input([input_data.shape[-1]], name='input-layer')\n for i, n_unit in enumerate(n_neurons):\n if i == 0:\n x = keras.layers.Dense(units=n_unit, activation='relu', name='hidden-layer'+str(i+1))(input_layer)\n else:\n x = keras.layers.Dense(units=n_unit, activation='relu', name='hidden-layer'+str(i+1))(x)\n \n output_layer = keras.layers.Dense(units=output_data.shape[-1],activation='softmax' , name='output-layer')(x)\n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n return model", "def build(width, height, depth, classes):\n model = Sequential()\n inputShape = (width, height, depth)\n\n # defining 1st layer : conv => relu\n model.add(Conv2D(32, (3, 3), padding='same', input_shape=inputShape))\n model.add(Activation('relu'))\n\n # softmax classifier\n model.add(Flatten())\n model.add(Dense(classes, activation='softmax'))\n return model", "def build_model():\n model = keras.Sequential()\n\n model.add(Conv2D(32, (5, 5), activation='relu', input_shape=(32, 32, 1)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.1))\n\n model.add(Conv2D(64, (5, 5), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(43, activation='softmax'))\n\n model.compile(optimizer=keras.optimizers.Adam(),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n return model", "def build_model(self):\n self.model = models.Sequential()\n self.model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))\n self.model.add(layers.MaxPooling2D((2, 2)))\n self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n self.model.add(layers.MaxPooling2D((2, 2)))\n self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n self.model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss='mse', metrics=['mae'])\n self.model.add(layers.Flatten())\n self.model.add(layers.Dense(64, activation='relu'))\n self.model.add(layers.Dense(10, activation='softmax'))\n self.model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])", "def trainNet():", "def build_model(self):\n self.model = Sequential()\n # print self.layers[0].identifier\n # print self.layers[0].parameters\n for layer in self.layers:\n # print layer.identifier\n # print layer.parameters\n self.model.add(layer.toKerasFn())\n\n\n # super(SequentialModelWrapper, self).compile(optimizer=self.optimizer.toKerasFn(),\n # loss=self.loss,\n # metrics=self.metrics)\n self.model.compile(optimizer=self.optimizer.toKerasFn(),\n loss=self.loss,\n metrics=self.metrics)", "def build_cnn(self):\n model = Sequential()\n model.add(Conv2D(24, (1, 3), activation = 'relu', input_shape = (1, grid_size*grid_size+2, 1)))\n model.add(Conv2D(24, (1, 3), activation = 'relu', input_shape = (1, grid_size*grid_size+2, 1)))\n model.add(Flatten())\n model.add(Dense(len(ACTIONS), activation = 'linear'))\n model.compile(loss = 'mse', optimizer = Adam(lr = alpha))\n\n return model", "def build_model(nx, layers, activations, lambtha, keep_prob):\n model = K.Sequential()\n for i in range(len(layers)):\n model.add(K.layers.Dense(layers[i],\n activation=activations[i],\n input_shape=(nx,),\n kernel_regularizer=K.regularizers.l2(lambtha)))\n if i + 1 < len(layers):\n model.add(K.layers.Dropout(1 - keep_prob))\n return model", "def build_model():\n mdl = Sequential()\n\n # normalization\n mdl.add(Lambda(lambda x: x/128. - 1, input_shape=IMAGE_SHAPE, name=\"input\"))\n\n # trim image\n mdl.add(Lambda(lambda x: x[:, 10:-10, :, :]))\n\n # convolutions\n mdl.add(Convolution2D(24, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(36, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(48, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Flatten())\n\n mdl.add(Dense(128, activation='relu'))\n mdl.add(Dense(64, activation='relu'))\n mdl.add(Dense(1, name=\"output\"))\n\n mdl.summary()\n\n return mdl", "def create_base_networks(in_dims):\n model = Sequential()\n model.add(Dense(300, input_dim=in_dims))\n model.add(Activation(\"tanh\"))\n model.add(Dropout(0.1))\n model.add(Dense(300, kernel_initializer='normal', activation='tanh'))\n model.add(Dropout(0.1))\n model.add(Dense(300, kernel_initializer='normal', activation='tanh'))\n model.add(Dropout(0.1))\n model.add(Dense(10, kernel_initializer='normal', activation='tanh'))\n model.add(Dropout(0.1))\n model.add(Activation(\"sigmoid\"))\n # model.add(Dense(600))\n\n return model", "def make_model():\n model = Sequential()\n model.add(Dense(1000, input_shape=(INPUT_SIZE,), activation='relu'))\n model.add(Dense(1000, activation='relu'))\n model.add(Dense(4, activation='sigmoid'))\n model.compile(loss='mse', metrics=['accuracy'])\n return model", "def build_model(self):\n # Define input layer (states)\n states = K.layers.Input(shape=(self.state_size,), name='states')\n net = states\n # Add the hidden layers\n for layer_count in range(len(self.layer_sizes)):\n net = K.layers.Dense(units=self.layer_sizes[layer_count])(net)\n net = K.layers.Activation('relu')(net)\n if self.batch_norm_options[layer_count]:\n net = K.layers.BatchNormalization()(net)\n net = K.layers.Dropout(self.dropout_options[layer_count])(net)\n\n # Add final output layer with sigmoid activation\n actions = K.layers.Dense(units=self.action_size, activation='linear',\n name='raw_actions')(net)\n\n # Create Keras model\n self.model = K.models.Model(inputs=states, outputs=actions)\n\n # Print the created model summary\n self.logger.debug(\"Model Summery:\")\n self.model.summary(print_fn=self.logger.debug)\n\n # Define optimizer and training function\n self.optimizer = K.optimizers.Adam(lr=self.learning_rate)\n self.model.compile(loss='mse', optimizer=self.optimizer)", "def __build_model(self) -> Sequential:\n self.__name = 'Training model'\n input_dim, *hidden_dims, output_dim = parameters.ANET_DIMENSIONS\n\n model = Sequential()\n model.add(Input(shape=(input_dim,)))\n\n for dimension in hidden_dims:\n model.add(Dense(dimension, activation=self.__activation_function))\n\n model.add(Dense(output_dim, activation=softmax))\n\n model.compile(\n optimizer=(self.__optimizer(learning_rate=self.__learning_rate) if self.__learning_rate is not None else self.__optimizer()),\n loss=self.__loss_function\n )\n model.summary()\n return model", "def model(input_shape, output_dim, num_hidden_units,num_hidden_units_2,num_hidden_units_3, num_code_units, batch_size=BATCH_SIZE):\n shape = tuple([None]+list(input_shape[1:]))\n print(shape)\n l_in = lasagne.layers.InputLayer(shape=shape)\n\n l_hidden_1 = lasagne.layers.DenseLayer(\n l_in,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_2 = lasagne.layers.DenseLayer(\n l_hidden_1,\n num_units=num_hidden_units_2,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_3 = lasagne.layers.DenseLayer(\n l_hidden_2,\n num_units=num_hidden_units_3,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_code_layer = lasagne.layers.DenseLayer(\n l_hidden_3,\n num_units=num_code_units,\n nonlinearity=lasagne.nonlinearities.softmax,\n )\n\n l_hidden_3 = lasagne.layers.DenseLayer(\n l_code_layer,\n num_units=num_hidden_units_3,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_4 = lasagne.layers.DenseLayer(\n l_hidden_3,\n num_units=num_hidden_units_2,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_hidden_5 = lasagne.layers.DenseLayer(\n l_hidden_4,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n l_out = lasagne.layers.DenseLayer(\n l_hidden_5,\n num_units=output_dim,\n nonlinearity=None,\n )\n\n return l_out", "def build_model(self):\n import tensorflow as tf\n \n y = tf.nn.relu(tf.matmul(self.variables[\"input_observation\"], self.variables[\"W1\"]) + \n self.variables[\"b1\"], name=\"y1\")\n \n for i in range(self.n_hidden-1):\n y = tf.nn.relu(tf.matmul(y, self.variables[\"W\"+str(i+2)]) + \n self.variables[\"b\"+str(i+2)], name=\"y\"+str(i+2))\n \n self.variables[\"y\"] = [tf.matmul(y, self.variables[\"Wo_0\"]) + self.variables[\"bo_0\"]]\n for i in range(1, len(self.output_size)):\n self.variables[\"y\"] += [tf.matmul(y, self.variables[\"Wo_%s\"%i]) + self.variables[\"bo_%s\"%i]]", "def neural_network(X, Y, X_test, Y_test, num_layers, activation):\n \n X_n = (X - np.mean(X, axis = 0)) / np.std(X, axis = 0)\n Y_n = (Y - np.mean(Y, axis = 0)) / np.std(Y, axis = 0)\n \n if num_layers == 1:\n num_neurons = 1000\n elif num_layers == 2:\n num_neurons = 200\n elif num_layers == 3:\n num_neurons = 141\n elif num_layers == 4:\n num_neurons = 115\n\n if activation == \"ReLU\":\n activation = ReLUActivation\n elif activation == \"tanh\":\n activation = TanhActivation\n else:\n print('Null activation')\n\n model = Model(X_n.shape[1])\n model.addLayer(DenseLayer(num_neurons,activation()))\n if num_layers >= 2:\n model.addLayer(DenseLayer(num_neurons,activation()))\n if num_layers >= 3:\n model.addLayer(DenseLayer(num_neurons,activation()))\n\n model.addLayer(DenseLayer(Y.shape[1],LinearActivation()))\n model.initialize(QuadraticCost())\n \n Y_pred = model.predict((X_test - np.mean(X, axis = 0)) / np.std(X, axis = 0)) \n Y_pred = Y_pred * np.std(Y, axis = 0) + np.mean(Y, axis = 0)\n mse = np.mean(np.sqrt(np.sum((Y_pred - Y_test) ** 2, axis = 1))) \n return mse", "def build_model(input_shape):\n model = keras.Sequential(\n [\n layers.Dense(\n 1, use_bias=False, activation=\"sigmoid\", input_shape=[input_shape]\n ),\n ]\n )\n\n model.compile(\n loss=\"binary_crossentropy\",\n optimizer=tf.train.AdamOptimizer(),\n metrics=[\"accuracy\"],\n )\n\n return model", "def _build_model(self):\n \n #convolutional part\n conv_inputs = keras.Input(shape = self._state_shape[0])\n c1 = layers.Conv2D(filters = 4, kernel_size = 2, strides = (2,2), padding = \"same\", activation = 'relu')(conv_inputs)\n c2 = layers.Conv2D(filters = 8, kernel_size = 2, strides = (1,1), padding = \"same\", activation = 'relu')(c1)\n flat = layers.Flatten()(c2)\n\n\n #current green phase layer\n # phase_inputs = keras.Input(shape = (self._state_shape[1],))\n \n #elapsed green time layer\n elapsed_time_inputs = keras.Input(shape = (self._state_shape[2],))\n \n \n #combine elapsed time and green time layer\n # combined_green = layers.concatenate([phase_inputs, elapsed_time_inputs])\n # green_dense = layers.Dense(10, activation='relu')(elapsed_time_inputs)\n \n #combine green layer with conv layer\n all_combined = layers.concatenate([elapsed_time_inputs, flat])\n dense = layers.Dense(32, activation='relu')(all_combined)\n dense = layers.Dense(16, activation='relu')(dense)\n outputs = layers.Dense(self._output_dim, activation='linear')(dense)\n \n model = keras.Model(inputs = [conv_inputs, elapsed_time_inputs], outputs = outputs, name='simple_CNN') \n model.compile(loss=losses.mean_squared_error, optimizer=Adam(lr=self._learning_rate))\n \n return model", "def create_nn(self):\n\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(32, input_dim=self.state_size, activation='relu'))\n\t\tmodel.add(Dense(32, activation='relu'))\n\t\tmodel.add(Dense(64, activation='relu'))\n\t\tmodel.add(Dense(self.action_size, activation='linear'))\n\t\tmodel.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n\t\treturn model", "def build_simple_model():\n model = keras.Sequential()\n\n model.add(Flatten(input_shape=(32, 32, 1)))\n model.add(Dense(128, activation='relu'))\n model.add(Dense(256, activation='relu'))\n model.add(Dense(43, activation='softmax'))\n\n model.compile(optimizer=keras.optimizers.Adam(),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n return model", "def gennet(self, dims, learning_rate, opt='SGD', loss='MeanSquaredError()', activation=\"relu\", last_activation=\"sigmoid\"):\n model = keras.models.Sequential()\n opt = eval('keras.optimizers.' + opt)\n loss = eval('tf.keras.losses.' + loss)\n model.add(keras.layers.Dense(input_shape=(dims[0],), # Determines shape after first input of a board state\n units=dims[0], activation=activation))\n for layer in range(1, len(dims)-1):\n model.add(keras.layers.Dense(\n units=dims[layer], activation=activation))\n model.add(keras.layers.Dense(\n units=dims[-1], activation=last_activation))\n model.compile(optimizer=opt(learning_rate=learning_rate), loss=loss)\n return model", "def build_model_cnn(x_train, n=32, d=0.25, k=5):\n model = Sequential()\n model.add(Dropout(rate = d, input_shape = (x_train.shape[1], 1)))\n model.add(Conv1D(filters=n, kernel_size=(5), strides=1, activation = 'relu', kernel_constraint=max_norm(4)))\n model.add(Dropout(rate = d))\n model.add(MaxPooling1D(pool_size=k))\n model.add(Conv1D(filters=n, kernel_size=(5), strides=1, activation = 'relu', kernel_constraint=max_norm(4)))\n model.add(Dropout(rate = d))\n model.add(MaxPooling1D(pool_size=k))\n model.add(Flatten())\n model.add(Dense(n, activation ='relu', kernel_constraint=max_norm(4)))\n model.add(Dense(4, activation = 'softmax')) # eliminated Q label\n model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['categorical_accuracy'])\n return model", "def build(imageWidth, imageHeight, imageDepth, classesNumber, finalAct=\"sigmoid\"):\n\n # inizializzo il modello come sequenziale\n model = Sequential()\n inputShape = (imageHeight, imageWidth, imageDepth)\n chanDim = -1\n\n # Primo blocco Conv2D, Relu, Normalization, MaxPool\n # Utilizzo 32 filtri 3*3\n model.add(Conv2D(filters=32, kernel_size=(3, 3), padding=\"same\", input_shape=inputShape))\n # con attivazione Rectified Linear Unit\n model.add(Activation(\"relu\"))\n # applico una batch normalization\n model.add(BatchNormalization(axis=chanDim))\n # un MaxPooling 3*3\n model.add(MaxPooling2D(pool_size=(3, 3)))\n # ed un 25% di dropout per ridurre overfitting\n model.add(Dropout(0.25))\n\n # Secondo blocco\n model.add(Conv2D(64, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(Conv2D(64, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n # Terzo blocco\n model.add(Conv2D(128, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(Conv2D(128, (3, 3), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(BatchNormalization(axis=chanDim))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n # Passo ai Fully Connected Layers\n # Trasformo il modello in un vettore\n model.add(Flatten())\n model.add(Dense(1024))\n model.add(Activation(\"sigmoid\"))\n model.add(BatchNormalization())\n model.add(Dropout(0.5))\n\n # Infine utilizzo l'attivazione per la rete\n model.add(Dense(classesNumber))\n model.add(Activation(finalAct))\n\n return model", "def build_model(num_classes=43):\n model = models.Sequential()\n model.add(layers.Conv2D(64, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n\n model.add(layers.Conv2D(128, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n \n\n model.add(layers.Flatten())\n model.add(layers.Dense(num_classes, activation='softmax'))\n model.summary()\n\n return model", "def build_model(train_generator, test_generator, epochs, shape):\n model = Sequential()\n\n # Convolutional layer\n model = add_conv_blocks(model, 4, 6, initial_input_shape=shape)\n\n # Feature aggregation across time\n model.add(Lambda(lambda x: K.mean(x, axis=1)))\n\n model.add(Flatten())\n\n # Linear classifier\n model.add(Dense(4, activation=K.softmax))\n\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=['accuracy', k_f1_score])\n\n model.fit_generator(train_generator,\n validation_data=test_generator,\n steps_per_epoch=shape[0] / GENERATOR_BATCH_SIZE,\n epochs=epochs,\n verbose=1,\n validation_steps=5)\n return model", "def create_nueral_network(X, y, epochs=8):\n model = Sequential()\n model.add(layers.Dense(500, input_dim=X.shape[1]))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(128, activation='relu'))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(32, activation='relu'))\n model.add(layers.Dense(5,activation='softmax'))\n\n model.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy'])\n print(model.summary())\n model.fit(X, y, epochs=epochs, batch_size=500)\n return model", "def compile_network(model, optimizer):\n compile_network_model(model, optimizer, categorical_crossentropy)", "def build(self, hp):\n\n model = Sequential()\n model.add(Conv2D(filters=hp.Choice('num_filters_0', values=[8, 16, 32, 64]),\n kernel_size=hp.Choice('kernel_size_0', values=[3, 4, 5]),\n activation=hp.Choice('activation_0', values=['relu', 'tanh']),\n input_shape=self.input_shape))\n\n for i in range(hp.Int('num_layers', 1, 3)):\n model.add(Conv2D(filters=hp.Choice('num_filters_%d' % (i + 1), values=[8, 16, 32, 64]),\n kernel_size=hp.Choice('kernel_size_%d' % (i + 1), values=[3, 4, 5]),\n activation=hp.Choice('activation_%d' % (i + 1), values=['relu', 'tanh'])))\n model.add(Flatten())\n model.add(Dense(N_zern))\n model.summary()\n\n model.compile(optimizer=keras.optimizers.Adam(hp.Choice('learning_rate', values=[1e-3, 5e-4, 1e-4])),\n loss='mean_squared_error')\n return model", "def build_network(self, inputs, targets, training=False):\n raise NotImplementedError", "def gennet(self, dims, learning_rate, opt='SGD', loss='MeanSquaredError()', activation=\"relu\", last_activatiion=\"sigmoid\"):\n model = keras.models.Sequential()\n opt = eval('keras.optimizers.' + opt)\n loss = eval('tf.keras.losses.' + loss)\n model.add(keras.layers.Dense(input_shape=(dims[0],), # Determines shape after first input of a board state\n units=dims[0], activation=activation))\n for layer in range(1, len(dims)-1):\n model.add(keras.layers.Dense(\n units=dims[layer], activation=activation))\n model.add(keras.layers.Dense(\n units=dims[-1], activation=last_activatiion))\n model.compile(optimizer=opt(lr=learning_rate), loss=loss)\n return model", "def build_model(nx, layers, activations, lambtha, keep_prob):\n λ = lambtha\n\n # create model\n a_model = K.Sequential()\n n_layers = len(layers)\n regularizer = K.regularizers.l2(λ)\n\n for i in range(n_layers):\n # Adds a densely-connected layer with layer[i] units to the model:\n a_model.add(K.layers.Dense(\n units=layers[i],\n input_dim=nx,\n kernel_regularizer=regularizer,\n activation=activations[i],\n )\n )\n # To avoid creation of:\n # Layer (type) Output Shape Param #\n # dropout_2 (Dropout) (None, 10) 0\n if i < n_layers - 1:\n a_model.add(K.layers.Dropout(1 - keep_prob))\n return a_model", "def buildNet(inputShape, numUniqueClasses):\n layers = InputLayer((None,) + inputShape[1:4])\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1))\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1), stride= (5,1))\n layers = ResidualLayer(layers, 8, \n filter_size = (3,1))\n layers = ResidualLayer(layers, 1, \n filter_size = (3,1), stride= (3,1))\n layers = NonlinearityLayer(layers, nonlinearity = nonlinearity)\n layers = DropoutLayer(layers,p=.3) \n layers = batch_norm(NNHelpers.LocallyConnected2DLayer(layers,1,(5,1),\n W=He('relu'),\n nonlinearity=nonlinearity)) \n layers = DenseLayer(layers,num_units=numUniqueClasses,\n nonlinearity=linear) \n layers = NonlinearityLayer(layers, nonlinearity=softmax) \n return layers", "def build_model():\n model = models.Sequential()\n\n # # Anti-overfit methods\n # model.add(layers.BatchNormalization())\n # model.add(layers.Dropout(0.5))\n # regularizers.l1_l2(l1=0.01, l2=0.01)\n\n model.add(layers.Conv2D(200, (3, 3), activation='relu',\n input_shape=nnc.INPUT_SHAPE))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(200, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(150, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(100, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(7, activation='sigmoid'))\n model.compile(optimizer=nnc.OPTIMIZER, loss=nnc.LOSS, metrics=nnc.METRICS)\n\n # # Print the model to the console\n model.summary()\n # # Print the model to a png file\n # utils.plot_model(model, show_shapes=True, to_file=nnc.MODEL_PLOT_PATH)\n # # Turn into multi-gpu model\n # model = utils.multi_gpu_model(model, gpus=2)\n\n return model", "def MLP_model(self):\n print(\"Building model..\")\n self.model = Sequential()\n\n # first hidden layer (0)\n self.model.add(Dense(self.h_nodes0, input_dim=self.input_size, use_bias=True))\n self.model.add(Activation(self.activation0))\n self.model.add(Dropout(self.dropout0))\n\n # second hidden layer (1)\n if self.h_nodes1 != None:\n self.model.add(Dense(self.h_nodes1, use_bias=True))\n self.model.add(Activation(self.activation1))\n self.model.add(Dropout(self.dropout1))\n\n # third hidden layer (2)\n if self.h_nodes2 != None:\n self.model.add(Dense(self.h_nodes2, use_bias=True))\n self.model.add(Activation(self.activation2))\n self.model.add(Dropout(self.dropout2))\n\n #output layer\n self.model.add(Dense(self.output_size))\n self.model.add(Activation(self.activation_out))\n\n #compile model\n self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=[R_squared])\n\n return self.model", "def create_neural_network(NumberOfFeatures, NumberOfClasses, optimizer_type, lr, moment, lr_decay):\n model = create_base_network(NumberOfFeatures, NumberOfClasses)\n if optimizer_type == 'sgd':\n opt = optimizers.SGD(lr=lr, momentum=moment, decay=lr_decay)\n else:\n opt = optimizer_type\n\n model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n print(model.summary())\n return model", "def train(self):\r\n raw_dataset = pd.read_csv(self.datafile, sep = ',', header = 0,\r\n na_values = '?', comment = '\\t',\r\n skipinitialspace = True)\r\n\r\n dataset = raw_dataset.copy()\r\n dataset.tail()\r\n\r\n # Clear unknown values\r\n dataset.isna().sum()\r\n dataset = dataset.dropna()\r\n\r\n # takes a sample of 80% of the data points\r\n train_dataset = dataset.sample(frac = 0.8, random_state = 0)\r\n test_dataset = dataset.drop(train_dataset.index)\r\n\r\n # Split features from labels for training and test datasets\r\n train_features = train_dataset.copy()\r\n test_features = test_dataset.copy()\r\n train_labels = train_features.pop('Quality')\r\n test_labels = test_features.pop('Quality')\r\n\r\n # normalize data\r\n normalizer = preprocessing.Normalization()\r\n normalizer.adapt(np.array(train_features))\r\n\r\n # builds the model\r\n def build_and_compile_model(norm):\r\n model = keras.Sequential([\r\n norm,\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(1)\r\n ])\r\n\r\n model.compile(loss='mean_absolute_error',\r\n optimizer=tf.keras.optimizers.Adam(0.001))\r\n return model\r\n\r\n deep_neural_network_model = build_and_compile_model(normalizer)\r\n\r\n history = deep_neural_network_model.fit(\r\n train_features, train_labels,\r\n validation_split=0.2,\r\n verbose=0, epochs=100)\r\n\r\n deep_neural_network_model.save('deep_neural_network_model')", "def create_base_network(NumberOfFeatures, NumberOfClasses,init_mode='glorot_normal'):\n network = Sequential()\n network.add(Dense(44, activation='sigmoid', kernel_initializer=init_mode,input_dim=NumberOfFeatures))\n# network.add(Dense(22, activation='sigmoid',kernel_initializer=init_mode))\n network.add(Dense(NumberOfClasses, activation='softmax',kernel_initializer=init_mode))\n return network", "def __init__(self, num_filters, filter_dimensions, input_shape, pooling_shape, dense_shape, num_categories):\n\t\tself.model = Sequential()\n\t\tself.model.add(Convolution2D(num_filters, filter_dimensions[0], \\\n\t\t\t\tfilter_dimensions[1], activation='relu', input_shape=input_shape))\n\t\tself.model.add(MaxPooling2D(pool_size=pooling_shape))\n\t\tself.model.add(Dropout(0.2))\n\t\tself.model.add(Flatten())\n\t\tself.model.add(Dense(dense_shape, activation='relu'))\n\t\tself.model.add(Dropout(0.2))\n\t\tself.model.add(Dense(dense_shape, activation='relu'))\n\t\tself.model.add(Dropout(0.2))\n\t\tself.model.add(Dense(num_categories, activation='softmax'))\n\t\t\n\t\tself.model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])", "def model(input_shape, output_dim, num_hidden_units,num_hidden_units_2, num_code_units, filter_size, batch_size=BATCH_SIZE):\n shape = tuple([None]+list(input_shape[1:]))\n print(shape)\n l_in = lasagne.layers.InputLayer(shape=shape)\n\n print(\"Input shape: \",lasagne.layers.get_output_shape(l_in))\n\n # print(shaped_units)\n # shaped_units = shaped_units[0]\n shaped_units = 2800\n\n # print(shape)\n\n l_conv2D_1 = lasagne.layers.Conv2DLayer(\n l_in, \n num_filters=8,\n filter_size=filter_size, \n stride=(1, 1), \n border_mode=\"valid\", \n untie_biases=False, \n nonlinearity=None,\n )\n\n print(\"Conv 2D shape: \",lasagne.layers.get_output_shape(l_conv2D_1))\n\n l_reshape_1 = lasagne.layers.ReshapeLayer(\n l_conv2D_1,\n shape=(([0], -1))\n )\n\n print(\"Reshape 1 shape: \", lasagne.layers.get_output_shape(l_reshape_1))\n\n l_hidden_1 = lasagne.layers.DenseLayer(\n l_reshape_1,\n num_units= num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 1 shape: \", lasagne.layers.get_output_shape(l_hidden_1))\n\n l_code_layer = lasagne.layers.DenseLayer(\n l_hidden_1,\n num_units=num_code_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Code layer shape: \",lasagne.layers.get_output_shape(l_code_layer))\n\n l_hidden_2 = lasagne.layers.DenseLayer(\n l_code_layer,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 2 shape: \",lasagne.layers.get_output_shape(l_hidden_2))\n\n l_hidden_3 = lasagne.layers.DenseLayer(\n l_hidden_2,\n num_units=shaped_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 3 shape: \",lasagne.layers.get_output_shape(l_hidden_3))\n\n l_reshape_2 = lasagne.layers.ReshapeLayer(\n l_hidden_3,\n shape=(([0],8,7,50))\n )\n\n print(\"Reshape 2 shape: \",lasagne.layers.get_output_shape(l_reshape_2))\n\n l_out = lasagne.layers.Conv2DLayer(\n l_reshape_2, \n num_filters=1,\n filter_size=filter_size, \n stride=(1, 1), \n border_mode=\"valid\", \n untie_biases=False, \n nonlinearity=None,\n )\n\n # print(\"Deconv shape: \",lasagne.layers.get_output_shape(l_deconv2D_1))\n\n print(\"Output shape: \",lasagne.layers.get_output_shape(l_out))\n\n return l_out", "def build_model():\n\n if K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\n else:\n input_shape = (img_width, img_height, 3)\n\n model = Sequential()\n model.add(Conv2D(32, (3, 3), input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(32, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n\n # FC layer\n model.add(Dense(64))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n\n model.compile(loss='binary_crossentropy',\n optimizer='adadelta',\n metrics=['accuracy'])\n\n return model", "def model_build(self):\n\n # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!\n X_input = Input(self.inputData[0].shape)\n\n '''\n # CONV -> BN -> RELU Block applied to X\n X = Conv2D(8, (8, 8), name='conv0')(X_input)\n X = BatchNormalization(name='bn0')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool0')(X)\n X = Dropout(0.1, name='dropout0')(X)\n\n X = Conv2D(16, (16, 16), name='conv1')(X)\n X = BatchNormalization(name='bn1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool1')(X)\n X = Dropout(0.1, name='dropout1')(X)\n\n X = Conv2D(16, (32, 32), name='conv2')(X)\n X = BatchNormalization(name='bn2')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool2')(X)\n X = Dropout(0.1, name='dropout2')(X)\n' '''\n\n X = Dense(500, activation='relu', name='fc0')(X_input)\n X = Dropout(0.1, name='dropout1')(X)\n X = Dense(500, activation='relu', name='fc1')(X)\n X = Dropout(0.1, name='dropout2')(X)\n X = Dense(3, activation='softmax', name='fc2')(X)\n\n # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.\n self.model = Model(inputs=X_input, outputs=X, name='acouModel')", "def create_base_network(image_input_shape, embedding_size):\n input_image = Input(shape=image_input_shape)\n x = input_image \n #x = Flatten()(input_image)\n x = Dense(128, activation='relu')(x)\n x = Dropout(0.1)(x)\n x = Dense(128, activation='relu')(x)\n x = Dropout(0.1)(x)\n x = Dense(embedding_size)(x)\n\n base_network = Model(inputs=input_image, outputs=x)\n #plot_model(base_network, to_file='base_network.png', show_shapes=True, show_layer_names=True)\n return base_network", "def create_model(input_shape=None):\n\n model = Sequential()\n #n,height,width,chennel = input_shape\n height = 146\n width = 243\n chennel = 3\n\n model.add(Conv2D(filters=4, input_shape=(width, height, chennel), kernel_size=(3, 3), padding='same'))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(filters=4,kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(filters=4, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=16, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=16, kernel_size=(5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(64))\n model.add(Activation('relu'))\n model.add(Dropout(0.25))\n model.add(Dense(32))\n model.add(Activation('relu'))\n model.add(Dropout(0.25))\n model.add(Dense(8))\n model.add(Activation('softmax'))\n\n sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.87, nesterov=True)\n model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=[\"accuracy\"])\n return model", "def build(input_shape, classes):\n model = Sequential()\n model.add(Conv2D(20, kernel_size=9, padding=\"same\", input_shape=input_shape,\n kernel_initializer='glorot_normal'))\n\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2), dim_ordering=\"tf\"))\n model.add(Dropout(0.3))\n\n model.add(Conv2D(40, kernel_size=5, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2), dim_ordering=\"tf\"))\n model.add(Dropout(0.3))\n\n model.add(Conv2D(40, kernel_size=3, padding=\"same\"))\n model.add(Activation(\"relu\"))\n\n model.add(Conv2D(50, kernel_size=3, padding=\"same\"))\n model.add(Activation(\"relu\"))\n\n model.add(Conv2D(50, kernel_size=3, padding=\"same\"))\n model.add(Activation(\"relu\"))\n\n model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2), dim_ordering=\"tf\"))\n model.add(Dropout(0.3))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation(\"relu\"))\n model.add(Dropout(0.3))\n\n model.add(Dense(512))\n model.add(Activation(\"relu\"))\n\n model.add(Dense(classes))\n model.add(Activation(\"softmax\"))\n\n return model", "def build(self):\n\n self.W = self.init([self.n_atom_input_feat, self.n_output])\n self.b = model_ops.zeros(shape=[\n self.n_output,\n ])\n\n self.trainable_weights = self.W + self.b", "def build_model(hyperparameters):\r\n model = keras.Sequential()\r\n\r\n model.add(layers.BatchNormalization(input_shape=[hyperparameters['input_size']]))\r\n model.add(layers.Dense(hyperparameters['nodes'], activation='relu'))\r\n model.add(layers.BatchNormalization())\r\n model.add(layers.Dropout(hyperparameters['dropout_value']))\r\n model.add(layers.Dense(hyperparameters['nodes'], activation='sigmoid'))\r\n model.add(layers.BatchNormalization())\r\n model.add(layers.Dropout(hyperparameters['dropout_value']))\r\n model.add(layers.Dense(hyperparameters['nodes'], activation='relu'))\r\n model.add(layers.BatchNormalization())\r\n model.add(layers.Dropout(hyperparameters['dropout_value']))\r\n model.add(layers.Dense(hyperparameters['nodes'], activation='sigmoid'))\r\n model.add(layers.BatchNormalization())\r\n model.add(layers.Dropout(hyperparameters['dropout_value']))\r\n model.add(layers.Dense(5, activation='softmax'))\r\n\r\n model.compile(optimizer=keras.optimizers.Adam(learning_rate=hyperparameters['learning_rate']),\r\n loss='categorical_crossentropy',\r\n metrics=['categorical_accuracy'])\r\n\r\n return model", "def NN(train_df, val_df, test_df, sub_path):\n logging.info('Neural Network preprocessing')\n \n if train_df is not None: \n y_train = train_df['is_attributed'].values\n train_df = train_df.drop('is_attributed', axis = 1)\n train_df = train_df.drop('attributed_time', axis = 1) \n #train_df = train_df.drop('click_time', axis = 1) #only if no preprocessing\n gc.collect()\n if val_df is not None:\n y_val = val_df['is_attributed'].values \n val_df = val_df.drop(['is_attributed'], axis = 1)\n val_df = get_keras_data(val_df)\n \n list_variables = get_values(train_df)\n print(list_variables)\n \n logging.info('Model is creating...') \n \n max_var = []\n if test_df is not None:\n for i, var in enumerate(list_variables):\n max_var.append(np.max([train_df[var].max(), test_df[var].max()])+1) \n train_df = get_keras_data(train_df)\n else:\n for i, var in enumerate(list_variables):\n max_var.append(train_df[var].max()+1) \n train_df = get_keras_data(train_df)\n \n emb_n = 50\n dense_n = 1000\n \n in_var = []\n emb_var = [] \n for i, var in enumerate(list_variables):\n in_var.append(Input(shape=[1], name = var))\n emb_var.append(Embedding(max_var[i], emb_n)(in_var[i]))\n \n fe = concatenate([emb for emb in emb_var])\n s_dout = SpatialDropout1D(0.2)(fe)\n fl1 = Flatten()(s_dout)\n #conv = Conv1D(100, kernel_size=4, strides=1, padding='same')(s_dout)\n dl = Dense(100)(s_dout)\n fl2 = Flatten()(dl)\n concat = concatenate([(fl1), (fl2)])\n x = Dropout(0.2)(Dense(dense_n,activation='relu')(concat))\n x = Dropout(0.2)(Dense(dense_n,activation='relu')(x))\n outp = Dense(1,activation='sigmoid')(x)\n \n model = Model(inputs=[var for var in in_var], outputs=outp)\n \n logging.info('Model is compiling...')\n \n batch_size = 50000\n epochs = 2 #12 for sample_train\n exp_decay = lambda init, fin, steps: (init/fin)**(1/(steps-1)) - 1\n steps = int(len(list(train_df)[0]) / batch_size) * epochs\n lr_init, lr_fin = 0.002, 0.0002\n lr_decay = exp_decay(lr_init, lr_fin, steps)\n optimizer_adam = Adam(lr=lr_init, decay=lr_decay)\n \n model.compile(loss='binary_crossentropy',optimizer=optimizer_adam,metrics=['accuracy'])\n model.summary()\n \n logging.info('Model is training...')\n \n model.fit(train_df, y_train, batch_size=batch_size, epochs=epochs, shuffle=True, verbose=2, validation_split=0.1)\n del train_df, y_train; gc.collect()\n \n if val_df is not None:\n logging.info('Prediction on validation set')\n predictions_NN_prob = model.predict(val_df, batch_size=batch_size, verbose=2)\n del val_df; gc.collect()\n predictions_NN_prob = predictions_NN_prob[:,0]\n \n predictions_NN = np.where(predictions_NN_prob > 0.5, 1, 0)\n acc_NN = accuracy_score(y_val, predictions_NN)\n print('Overall accuracy of Neural Network model:', acc_NN)\n \n if test_df is not None:\n logging.info('Prediction on test set')\n sub = pd.DataFrame()\n sub['click_id'] = test_df['click_id'].astype('int')\n test_df = test_df.drop(['click_id'], axis=1)\n test_df = get_keras_data(test_df)\n \n sub['is_attributed'] = model.predict(test_df, batch_size=batch_size, verbose=2)\n del test_df; gc.collect()\n logging.info(\"Writing....\")\n with file_io.FileIO(sub_path, mode='wb') as fout:\n sub.to_csv(fout,index=False)\n logging.info(\"Done...\")\n logging.info(sub.info())", "def build_model(nx, layers, activations, lambtha, keep_prob):\n model = K.Sequential()\n reg = K.regularizers.l2\n model.add(K.layers.Dense(layers[0], input_shape=(nx,),\n activation=activations[0],\n kernel_regularizer=reg(lambtha)))\n\n for layer, act in zip(layers[1:], activations[1:]):\n model.add(K.layers.Dropout(1 - keep_prob))\n model.add(K.layers.Dense(layer, activation=act,\n kernel_regularizer=reg(lambtha)))\n\n return model", "def _keras_build_fn(architecture=None, prediction_periods=1):\n \n # List of optimizers that can be specified in the architecture\n optimizers = ['Adadelta', 'Adagrad', 'Adam', 'Adamax', 'Nadam', 'RMSprop', 'SGD']\n \n # The model definition should contain at least one layer and the compilation parameters\n if architecture is None or len(architecture) < 2:\n err = \"Invalid Keras architecture. Expected at least one layer and compilation parameters.\"\n raise Exception(err)\n # The last row of the model definition should contain compilation parameters\n elif not architecture.iloc[-1,0].capitalize() in ['Compile', 'Compilation']:\n err = \"Invalid Keras architecture. The last row of the model definition should provide 'Compile' parameters.\"\n raise Exception(err)\n \n # sys.stdout.write(\"Architecture Data Frame in _keras_build_fn:\\n{}\\n\\n\".format(architecture.to_string()))\n\n neural_net = keras.models.Sequential()\n\n for i in architecture.index:\n # Name items in the row for easy access\n name, args, kwargs = architecture.iloc[i,0], architecture.iloc[i,1], architecture.iloc[i,2]\n\n # The last row of the DataFrame should provide compilation keyword arguments\n if i == max(architecture.index):\n # Check if an optimizer with custom parameters has been defined\n try:\n kwargs = kwargs.copy() # Copy so that we don't modify the architecture dataframe\n kwargs['optimizer'] = opt\n except UnboundLocalError:\n pass\n \n # Compile the model\n neural_net.compile(**kwargs)\n # Watch out for a row providing optimizer parameters\n elif name in optimizers:\n opt = getattr(keras.optimizers, name)(**kwargs) \n # All other rows of the DataFrame define the model architecture\n else:\n # Check if the name includes a layer wrapper e.g. TimeDistributed Dense\n names = name.split(' ')\n if len(names) == 2:\n wrapper = names[0]\n name = names[1]\n \n # Get wrapper kwargs\n wrapper_kwargs = dict()\n if 'merge_mode' in kwargs:\n wrapper_kwargs['merge_mode'] = kwargs.pop('merge_mode')\n else:\n wrapper = None\n\n # Create a keras layer of the required type with the provided positional and keyword arguments\n layer = getattr(keras.layers, name)(*args, **kwargs)\n\n if wrapper:\n # Create the layer wrapper\n wrapper = getattr(keras.layers, wrapper)(layer, **wrapper_kwargs)\n # Add the layer wrapper to the model\n neural_net.add(wrapper) \n else:\n # Add the layer to the model\n neural_net.add(layer)\n \n # Get the number of nodes for the final layer\n output_features = neural_net.layers[-1].get_config()['units']\n assert prediction_periods == output_features, \"The number of nodes in the final layer of the network must match the prediction_periods execution argument. Expected {} nodes but got {}.\".format(prediction_periods, output_features)\n \n return neural_net", "def create_model(learning_rate, dense_layers, nodes, activation, dropout_rate=0.1):\n\tmodel = Sequential()\n\tglobal train_samples\n\t## Input-shape must be a tuple without the batch size.\n\tinput_shape = (1,) + train_samples.shape\n\tmodel.add(InputLayer(input_shape=(len(train_samples[0]),)))\n\t## Needful only in case of convolutional layers.\n\t# model.add(Reshape(img_shape_full))\n\tfor i in range(dense_layers):\n\t\t## Name each layer, because Keras should give them unique names.\n\t\tname = 'layer_dense_{0}'.format(i+1)\n\t\t## Add these fully-connected layers to the model.\n\t\tmodel.add(Dense(nodes, activation=activation, name=name))\n\t\tmodel.add(Dropout(dropout_rate))\n\n\t## Last output layer with softmax-activation.\n\t## Used heavily for classification.\n\tmodel.add(Dense(1, activation='sigmoid'))\n\n\toptimizer = Adam(lr=learning_rate)\n\t## Compile the model\n\tmodel.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])\n\n\treturn model", "def create_model(training, layers, activation='relu',\n loss='mean_absolute_error', optimizer='adam'):\n\n model = Sequential()\n\n # Add the first layer with the input dimension\n model.add(Dense(layers[0], activation=activation, input_dim=1))\n\n for number_of_neurons in layers[1:]:\n model.add(Dense(number_of_neurons, activation=activation))\n\n # Specify that there is only one returned value\n model.add(Dense(1, activation=activation))\n\n model.compile(loss=loss, optimizer=optimizer)\n\n return model", "def build(input_shape, num_outputs, repetitions):\n if len(input_shape) != 2:\n raise Exception(\"Input shape should be a tuple (nb_sequence_size, nb_channels)\")\n inputs = Input(shape=input_shape)\n conv1 = Conv1D(filters=64, kernel_size=2, strides=1, padding='causal',\n kernel_initializer=\"he_normal\", kernel_regularizer=l2(1.e-4))(inputs) \n block = conv1\n filters = 64\n for i, r in enumerate(repetitions):\n block = _residual_block(bottleneck, filters=filters, repetitions=r)(block)\n filters *= 2\n \n # last activation\n block = _bn_relu(block)\n \n # Classifier block\n block_shape = K.int_shape(block)\n pool = AveragePooling1D(pool_size=block_shape[1], strides=1)(block)\n flatten = Flatten()(pool)\n if num_outputs == 2:\n dense = Dense(units=1, kernel_initializer=\"he_normal\",\n activation=\"sigmoid\")(flatten)\n else:\n dense = Dense(units=num_outputs, kernel_initializer=\"he_normal\",\n activation=\"softmax\")(flatten)\n model = Model(inputs=inputs, outputs=dense)\n return model", "def create_model():\n model = Sequential()\n\n model.add(Lambda(lambda x: x/127.5-1.0, input_shape=INPUT_SHAPE))\n\n # Now we are going to add some Convulation Layers identical to paper\n\n model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n\n # And now finally we will Flatten our layers and eventually use Fully Connected Layers to reduce features.\n\n model.add(Dropout(0.4))\n model.add(Flatten())\n\n model.add(Dense(256, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(100, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(25, activation='elu'))\n model.add(Dense(1))\n\n model.summary()\n\n return model", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrode_number in [10]:\n\n print(\"Loading the model parameters from {}\".format(MODEL_FILENAME+str(tetrode_number)))\n f = open(MODEL_FILENAME+str(tetrode_number),'r')\n all_param_values = pickle.load(f)\n f.close()\n # print(all_param_values)\n lasagne.layers.set_all_param_values(network, all_param_values)\n\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(dataset['data'].shape)\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n for i in range(NUM_EPOCHS):\n costs = []\n\n for start, end in zip(range(0, dataset['data'].shape[0], BATCH_SIZE), range(BATCH_SIZE, dataset['data'].shape[0], BATCH_SIZE)):\n cost = training['train'](dataset['data'][start:end],dataset['data'][start:end])\n costs.append(cost)\n\n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n # accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Training cost: {}\".format(i+1,meanTrainCost))\n # NUM_POINTS = 5000\n codes = training['code'](dataset['data'][0:NUM_POINTS])\n\n \n\n # y = set(list(d.predict(dataset['data'][0:NUM_POINTS])))\n\n # print(y)\n\n # activations_1 = training['activations_1'](dataset['data'][0:NUM_POINTS])\n # activations_2 = training['activations_2'](dataset['data'][0:NUM_POINTS])\n # codes = training['code'](dataset['data'][0:NUM_POINTS])\n # # print(codes.shape)\n # # codes_2d = bh_sne(codes)\n\n # for k in range(3):\n # print(k)\n\n # codes_2d = bh_sne(np.asarray(codes[:(k+1)*12000],dtype=np.float64))\n\n # # d = DPGMM(n_components=10, covariance_type='full')\n # d = DPGMM(n_components=15,n_iter=100)\n\n # d.fit(codes_2d[:(k+1)*12000])\n\n # hdp = d.predict_proba(codes_2d[:(k+1)*12000])\n\n # hdp_1d = [np.argmax(z) for z in hdp]\n\n # print(set(list(hdp_1d)))\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/hdp_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # m = TSNE(n_components=2, random_state=0)\n \n # # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # # activations_1_2d = bh_sne(activations_1)\n # # activations_2_2d = bh_sne(activations_2)\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS][:(k+1)*12000],alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # This is where the code for the video will go\n # ##############################################################################\n # # Compute DBSCAN\n # db = None\n # core_samples_mask = None\n # labels = None\n\n # num_labels = 0\n # eps=1.0\n # while(num_labels < 10):\n # db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n # core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n # core_samples_mask[db.core_sample_indices_] = True\n # labels = db.labels_\n # num_labels = np.amax(labels)\n # eps -= 0.1\n\n # print(\"Num learned labels: {}\".format(num_labels))\n\n # plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS][:(k+1)*12000],lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # # pickle.dump(labels, f)\n # # f.close()\n\n codes_2d = bh_sne(np.asarray(codes,dtype=np.float64),theta=0.4)\n\n # d = DPGMM(n_components=10, covariance_type='full')\n d = DPGMM(n_components=15,n_iter=1000)\n\n d.fit(codes_2d)\n\n hdp = d.predict_proba(codes_2d)\n\n hdp_1d = [np.argmax(z) for z in hdp]\n\n print(set(list(hdp_1d)))\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/hdp_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # m = TSNE(n_components=2, random_state=0)\n \n # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # activations_1_2d = bh_sne(activations_1)\n # activations_2_2d = bh_sne(activations_2)\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS],alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # This is where the code for the video will go\n ##############################################################################\n # Compute DBSCAN\n db = None\n core_samples_mask = None\n labels = None\n\n num_labels = 0\n eps=1.0\n while(num_labels < 10):\n db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n num_labels = np.amax(labels)\n eps -= 0.1\n\n print(\"Num learned labels: {}\".format(num_labels))\n\n plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS],lw=0)\n plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # pickle.dump(labels, f)\n # f.close()", "def build_model(self):\n # Define input layer (states)\n states = Input(shape=self.state_size)\n \n # Add hidden layers\n net = Dense(units=self.fc1_units, activation='relu', kernel_initializer='glorot_uniform')(states)\n net = Dense(units=self.fc2_units, activation='relu', kernel_initializer='glorot_uniform')(net)\n\n # Add final output layer with linear activation\n Q_values = Dense(units=self.action_size, activation='linear', kernel_initializer='glorot_uniform')(net)\n\n # Create Keras model\n self.model = Model(inputs=states, outputs=Q_values, name=self.name)", "def build_model(self, input_size=(9, 1), k_reg=keras.regularizers.l2(1e-8), a_reg=keras.regularizers.l1(1e-8)):\n data_input = keras.Input(shape=input_size)\n normed_data = keras.layers.BatchNormalization()(data_input)\n flat_normed_data = keras.layers.Flatten()(normed_data)\n\n out = keras.layers.Dense(1, activation=\"sigmoid\")(flat_normed_data)\n\n self.model = keras.Model(inputs=data_input, outputs=out)\n self.model.summary()", "def build_model(self,nn1=32,nn2=64,lr=0.01,dp=0.1,decay=1e-4,dn1=50,dn2=100):\n\n opt = Adam(lr=self.lr, beta_1=0.9, beta_2=0.999, decay=self.decay)\n model = models.Sequential()\n model.add(Conv1D(filters=self.nn1, kernel_size=3, padding=\"same\", input_shape=(self.n_stp, self.n_feats)))\n model.add(PReLU(alpha_initializer=Constant(value=0.20)))\n model.add(Conv1D(filters=self.nn2, kernel_size=2, padding=\"same\"))\n model.add(MaxPool1D(pool_size=1))\n model.add(PReLU(alpha_initializer=Constant(value=0.20)))\n\n model.add(Dropout(self.dp))\n model.add(Flatten())\n model.add(Dense(self.dn1))\n model.add(PReLU(alpha_initializer=Constant(value=0.20)))\n model.add(Dense(self.dn2))\n model.add(PReLU(alpha_initializer=Constant(value=0.20)))\n model.add(Dense(1))\n model.add(Activation('relu'))\n\n model.compile(loss=\"mse\",\n optimizer=opt,\n metrics=[self.soft_acc])\n\n return model", "def build_network(weights_path: str=''):\n\n input_tensor = Input(shape=(9,))\n one = Dense(32)(input_tensor)\n two = Dense(64)(one)\n three = Dense(128)(two)\n four = Dense(128)(three)\n five = Dense(128)(four)\n six = Dense(64)(five)\n seven = Dense(32)(six)\n eight = Dense(8)(seven)\n output = Dense(1)(eight)\n\n\n model = Model(inputs=input_tensor, outputs=output)\n model.compile('adadelta', loss='mean_squared_error')\n\n if weights_path:\n model.load_weights(weights_path)\n\n return model", "def build_model(self, n_inputs, n_outputs, trainable=True):\n\n\t\t# common layers\n\t\tcomm_input = Input(shape=(n_inputs,))\n\t\tX = Dense(32, activation='relu', name=\"val0\", trainable=trainable)(comm_input)\n\t\tX = Dense(64, activation='relu', name=\"vaaal3\", trainable=trainable)(X)\n\t\tX = Dense(64, activation='relu', name=\"valdd3\", trainable=trainable)(X)\n\n\t\t# value network\n\t\tval_head = Dense(32, activation='relu', name=\"val3\", trainable=trainable)(X)\n\t\tval_head = Dense(1, activation='linear', name=\"val4\", trainable=trainable)(val_head)\n\t\tval_head = RepeatVector(n_outputs)(val_head)\n\t\tval_head = Flatten(name='meanActivation')(val_head)\n\n\t\t# advantage network\n\t\tadv_head = Dense(32, activation='tanh', name=\"val2\", trainable=trainable)(X)\n\t\tadv_head = Dense(n_outputs, activation='linear', name='Activation', trainable=trainable)(adv_head)\n\n\t\tm_adv_head = Lambda(lambda layer: layer - K.mean(layer))(adv_head)\n\t\t# adv_head= Subtract()([adv_head,m_adv_head])\n\n\t\t# Merge both\n\t\tq_values = Add(name=\"Q-value\")([val_head, adv_head])\n\t\tmodel = Model(inputs=[comm_input], outputs=q_values)\n\t\tmodel.compile(loss='mse', optimizer=Adam(lr=0.001))\n\t\tmodel.summary()\n\t\treturn model", "def build_all_connected_models(feature_dim, output_scheme=\"regression\", num_classes=num_classes):\n\tinput_layer = keras.layers.Input(shape=[feature_dim])\n\thidden1 = keras.layers.Dense(800, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(input_layer)\n\t#bn1 = keras.layers.BatchNormalization()(hidden1)\n\tdropout1 = keras.layers.AlphaDropout(rate=0.5)(hidden1)\n\t#hidden2 = keras.layers.Dense(500, activation='relu', \n\t#\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t#\tkernel_initializer=\"he_uniform\")(dropout1)\n\t#bn2 = keras.layers.BatchNormalization()(hidden2)\n\t#dropout2 = keras.layers.AlphaDropout(rate=0.5)(hidden2)\n\thidden3 = keras.layers.Dense(200, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(dropout1)\n\tdropout3 = keras.layers.AlphaDropout(rate=0.3)(hidden3)\n\t# trait-specific network\n\thidden4_1 = keras.layers.Dense(50, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(dropout3)\n\thidden4_2 = keras.layers.Dense(50, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(dropout3)\n\thidden4_3 = keras.layers.Dense(50, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(dropout3)\n\thidden4_4 = keras.layers.Dense(50, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(dropout3)\n\thidden4_5 = keras.layers.Dense(50, activation='relu', \n\t\tkernel_regularizer=keras.regularizers.l2(0.01), \n\t\tkernel_initializer=\"he_uniform\")(dropout3)\n\t#bn3 = keras.layers.BatchNormalization()(hidden3)\n\tif (output_scheme==\"regression\"):\n\t\t#outputs = [keras.layers.Dense(1, hidden3) for i in range(5)] #\n\t\tout1 = keras.layers.Dense(1, activation=lambda x: keras.activations.relu(x, max_value=num_classes-1.0), \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out1\")(hidden4_1)\n\t\tout2 = keras.layers.Dense(1, activation=lambda x: keras.activations.relu(x, max_value=num_classes-1.0), \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out2\")(hidden4_2)\n\t\tout3 = keras.layers.Dense(1, activation=lambda x: keras.activations.relu(x, max_value=num_classes-1.0), \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out3\")(hidden4_3)\n\t\tout4 = keras.layers.Dense(1, activation=lambda x: keras.activations.relu(x, max_value=num_classes-1.0), \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out4\")(hidden4_4)\n\t\tout5 = keras.layers.Dense(1, activation=lambda x: keras.activations.relu(x, max_value=num_classes-1.0), \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out5\")(hidden4_5)\n\telif (output_scheme == \"ordinal_clf\"):\n\t\tout1 = keras.layers.Dense(num_classes, activation=\"sigmoid\", \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out1\")(hidden4_1)\n\t\tout2 = keras.layers.Dense(num_classes, activation=\"sigmoid\", \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out2\")(hidden4_2)\n\t\tout3 = keras.layers.Dense(num_classes, activation=\"sigmoid\", \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out3\")(hidden4_3)\n\t\tout4 = keras.layers.Dense(num_classes, activation=\"sigmoid\", \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out4\")(hidden4_4)\n\t\tout5 = keras.layers.Dense(num_classes, activation=\"sigmoid\", \n\t\t\tkernel_regularizer=keras.regularizers.l2(0.01), kernel_initializer=\"he_uniform\", \n\t\t\tname=\"out5\")(hidden4_5)\n\tmodel = keras.models.Model(inputs=[input_layer],\n\t\t\t\t\t\t\t outputs=[out1, out2, out3, out4, out5])\n\tif (output_scheme == \"regression\"):\n\t\tmodel.compile(loss=\"mse\", \n\t\t\t\t\t optimizer=tf.keras.optimizers.Adam(0.0001),\n\t\t\t\t\t metrics=['mae', custom_r2_metric])\n\telif (output_scheme == \"ordinal_clf\"):\n\t\tmodel.compile(loss=tf.keras.losses.CategoricalCrossentropy(), \n\t\t\t\t\t optimizer=tf.keras.optimizers.Adam(0.001))\n\tprint(model.summary())\n\treturn model", "def build_model(self):\n # model type\n self.model = Sequential()\n \n # Add embedding layer for first layer\n self.model.add(Embedding(self.embeding_matrix.shape[0], self.embeding_matrix.shape[1], input_length=self.tweet_len,\n weights=[self.embeding_matrix], name='emb'))\n # Add one dimensional convolution layer\n self.model.add(Conv1D(filters=self.params[\"filters\"] , kernel_regularizer=regularizers.l2(0.01), \n kernel_size=self.params[\"kernel_size\"], activation=self.params[\"activation\"]))\n # Add one dimensional max pooling layer\n self.model.add(MaxPooling1D(pool_size=self.params[\"MP_pool_size\"]))\n # Add flatten layer\n self.model.add(Flatten())\n # Add dense layer to predict label\n self.model.add(Dense(1, activation=self.params[\"dense_activation\"]))\n # Compile\n self.model.compile(loss=self.params[\"loss\"] , metrics=['accuracy'] , optimizer='adam')", "def define(self, optimizer = Adam(lr=1e-5)): \n \n self.optimizer = optimizer\n\n model = Sequential()\n\n #Layer 1\n model.add(Conv2D( filters = 96, \n kernel_size = (11,11), \n strides = 4, \n padding = 'same', \n activation = 'relu', \n input_shape = (224, 224, 3), \n kernel_initializer = 'he_normal'))\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None)) # overlapping pooling\n #Layer 2\n model.add(Conv2D( filters = 256, \n kernel_size = (5,5), \n strides = 1, \n padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None)) \n #Layer 3\n model.add(Conv2D( filters = 384, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', kernel_initializer = 'he_normal'))\n #Layer 4\n model.add(Conv2D( filters = 384, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n #Layer 5\n model.add(Conv2D( filters = 256, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n #Layer 6\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None))\n \n #Layer 7\n model.add(Flatten())\n \n #Layer 8\n model.add(Dense( units = 4096, activation = 'relu'))\n model.add(Dense( units = 1024, activation = 'relu'))\n model.add(Dense( units = 512, activation = 'relu'))\n model.add(Dense( units = 256, activation = 'relu'))\n model.add(Dense( units = 128, activation = 'relu'))\n \n #Layer end\n model.add(Dense( units = 3, activation = 'softmax'))\n model.summary()\n \n self.model = model", "def build_keras_model(self,\n neurons: int = 20,\n hidden_layers: int = 2,\n input_shape: tuple = (4,),\n output_shape: tuple = 3,\n dropout_rate: float = 0.5\n ) -> tf.keras.Model:\n # keras adds the first hidden layer implicitly\n _layers = max(0, hidden_layers - 1)\n\n model = tf.keras.Sequential()\n # add input layer with input shape and first hidden layer\n model.add(tf.keras.layers.Dense(neurons,\n input_shape=input_shape,\n activation=tf.nn.relu,\n dtype=\"float32\"\n ))\n for _ in range(_layers):\n model.add(tf.keras.layers.Dense(neurons,\n activation=tf.nn.relu\n ))\n # add output layer\n model.add(tf.keras.layers.Dense(output_shape))\n\n self.model = model\n return self.get_model()", "def __init__(self):\n self.model = Sequential()\n self.model.add(AveragePooling2D(pool_size=(4, 4), input_shape=(224, 224, 3)))\n self.model.add(Conv2D(16, (9, 9)))\n self.model.add(Activation('relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Conv2D(16, (5, 5)))\n self.model.add(Activation('relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Flatten())\n self.model.add(Dropout(0.5))\n self.model.add(Dense(1, activation='sigmoid'))\n self.model.compile(loss=binary_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])", "def build_dense(self): # Pass state_size and action_size\n model = Sequential()\n model.add(Dense(24, input_dim = grid_size*grid_size+2, activation = 'relu'))\n model.add(Dense(24, activation = 'relu'))\n model.add(Dense(len(ACTIONS), activation = 'linear'))\n model.compile(loss = 'mse', optimizer = RMSprop(lr = alpha))\n\n return model", "def get_model():\r\n model = Sequential([\r\n\r\n Lambda(normalize, input_shape=(66, 200, 3)),\r\n\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(1, (3, 3), padding='same', activation='relu', strides=2),\r\n Flatten(),\r\n\r\n\r\n ])\r\n\r\n model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])\r\n return model", "def _build_model(self):\n # Define input layer (states)\n states = layers.Input(shape=(self.state_size,), name='states')\n\n # Add hidden layers\n net = layers.Dense(units=self.hidden_size, activation='relu')(states)\n net = layers.Dropout(self.dropout_rate)(net)\n net = layers.Dense(units=self.hidden_size * 2, activation='relu')(net)\n net = layers.Dropout(self.dropout_rate)(net)\n net = layers.Dense(units=self.hidden_size, activation='relu')(net)\n net = layers.Dropout(self.dropout_rate)(net)\n\n # Add final output layer with sigmoid activation\n raw_actions = layers.Dense(\n units=self.action_size,\n activation='sigmoid',\n name='raw_actions'\n )(net)\n\n # Scale [0, 1] output for each action dimension to proper range\n actions = layers.Lambda(\n lambda x: (x * self.action_range) + self.action_low,\n name='actions'\n )(raw_actions)\n\n # Create Keras model\n self.model = models.Model(inputs=states, outputs=actions)\n\n # Define loss function using action value (Q value) gradients\n action_gradients = layers.Input(shape=(self.action_size,))\n loss = K.mean(-action_gradients * actions)\n\n # Define optimizer and training function\n optimizer = optimizers.Adam()\n updates_op = optimizer.get_updates(\n params=self.model.trainable_weights,\n loss=loss,\n )\n self.train = K.function(\n inputs=[self.model.input, action_gradients, K.learning_phase()],\n outputs=[],\n updates=updates_op\n )", "def make_model():\n \n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu',\n input_shape=(150, 150, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu'))\n model.add(layers.Dense(37, activation='softmax'))\n \n #model.add(layers.Dense(1, activation='sigmoid'))\n \n\n model.compile(loss='sparse_categorical_crossentropy',\n optimizer=optimizers.RMSprop(lr=1e-4),\n metrics=['acc'])\n return model", "def neural_net(self, layers):\n model = nn.Sequential()\n for l in range(0, len(layers) - 1):\n model.add_module(\"layer_\"+str(l), nn.Linear(layers[l],layers[l+1], bias=True))\n if l != len(layers) - 2:\n model.add_module(\"tanh_\"+str(l), nn.Tanh())\n\n return model", "def make_model(self, inputs, is_training):\n with tf.variable_scope('ResNet50'):\n x = conv2d(inputs, 64, [7, 7], strides=[1, 2, 2, 1], name='conv1') # size 1/2\n x = bn(x, is_training)\n x = relu(x)\n x = max_pool(x, ksize=[1, 3, 3, 1], name='pool1') # size 1/4\n\n x = self.conv_block(x, [64, 64, 256], '2_1', is_training, s=1)\n x = self.identity_block(x, [64, 64, 256], '2_2', is_training)\n x = self.identity_block(x, [64, 64, 256], '2_3', is_training)\n\n x = self.conv_block(x, [128, 128, 512], '3_1', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_2', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_3', is_training)\n\n x = self.atrous_conv_block(x, [256, 256, 1024], '4_1', 2, is_training, s=1)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_2', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_3', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_4', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_5', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_6', 2, is_training)\n\n x = self.atrous_conv_block(x, [512, 512, 2048], '5_1', 4, is_training, s=1)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_2', 4, is_training)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_3', 4, is_training)\n\n\n\n \"\"\"\n Astrous Pyrimid Pooling. Decoder\n \"\"\"\n with tf.variable_scope('ASPP'):\n feature_map_shape = x.get_shape().as_list()\n\n # global average pooling\n # feature 맵의 height, width를 평균을 낸다.\n feature_map = tf.reduce_mean(x, [1, 2], keepdims=True)\n\n feature_map = conv2d(feature_map, 256, [1, 1], name='gap_feature_map')\n feature_map = tf.image.resize_bilinear(feature_map, [feature_map_shape[1], feature_map_shape[2]])\n\n rate1 = conv2d(x, 256, [1, 1], name='rate1')\n rate6 = atrous_conv2d(x, 256, [3, 3], rate=6, name='rate6')\n rate12 = atrous_conv2d(x, 256, [3, 3], rate=12, name='rate12')\n rate18 = atrous_conv2d(x, 256, [3, 3], rate=18, name='rate18')\n\n concated = tf.concat([feature_map, rate1, rate6, rate12, rate18], axis=3)\n\n net = conv2d(concated, 256, [1, 1], name='net')\n\n logits = conv2d(net, self.N_CLASS, [1, 1], name='logits')\n logits = tf.image.resize_bilinear(logits, size=[self.RESIZE, self.RESIZE], name='out')\n\n pred = tf.argmax(logits, axis=3)\n pred = tf.expand_dims(pred, dim=3)\n\n return logits, pred", "def build_neural_net(X, filename=None):\n Y1, weights1 = build_layer(X, input_size=784, output_size=300)\n Y2, weights2 = build_layer(Y1, input_size=300, output_size=10, activation=nnet.softmax)\n\n if filename != None:\n saved_weights = np.load(filename)\n weights1.set_value(np.asarray(saved_weights[0], dtype=theano.config.floatX))\n weights2.set_value(np.asarray(saved_weights[1], dtype=theano.config.floatX))\n\n return Y2, weights1, weights2", "def genModel():\n inp = (160, 320, 3) # initial image size\n oup1 = (160, 320, 1) # gray image size\n oup2 = (80, 320, 1) # cropped image size\n\n model = Sequential()\n model.add(Lambda(color2gray, input_shape = inp, output_shape= oup1))\n # crop top 50 pixels, bottom 30 pixels, left/right 0 pixels\n model.add(Cropping2D(cropping=((50,30), (0,0))))\n # Preprocess incoming data, centered around zero with small standard deviation \n model.add(Lambda(lambda x: x/127.5 - 1., output_shape= oup2))\n model.add(Convolution2D(24,5,5,subsample=(1,2), activation=\"relu\"))\n model.add(Convolution2D(36,5,5,subsample=(2,2), activation=\"relu\"))\n model.add(Convolution2D(48,5,5,subsample=(2,2), activation=\"relu\"))\n model.add(Convolution2D(64,3,3, activation=\"relu\"))\n model.add(Convolution2D(64,3,3, activation=\"relu\"))\n model.add(Flatten())\n model.add(Dropout(0.3))\n model.add(Dense(180, activation=\"relu\"))\n model.add(Dense(60))\n model.add(Dense(10, activation=\"relu\"))\n model.add(Dense(1))\n # print layer size for each model layers\n for layer in model.layers:\n print(layer.get_output_at(0).get_shape().as_list())\n return model", "def train_model():\n\n if python_version == 2 :\n if num_hidden is None:\n num_hidden = int(raw_input('Enter number of hidden layers: '))\n if num_neuron is None:\n num_neuron = int(raw_input('Enter number of neurons in each hidden layer: '))\n else:\n if num_hidden is None:\n num_hidden = int(input('Enter number of hidden layers: '))\n if num_neuron is None:\n num_neuron = int(input('Enter number of neurons in each hidden layer: '))\n\n print('Activations are LeakyReLU. Optimizer is ADAM. Batch sizei is 32.' + \\\n 'Fully connected network without dropout.')\n\n # Construct model\n model = Sequential()\n\n # Add input layer.\n # MNIST dataset: each image is a 28x28 pixel square (784 pixels total).\n model.add(Flatten(input_shape=(1, 28, 28)))\n\n # Add hidden layers.\n for _ in range(num_hidden):\n model.add(Dense(num_neuron, use_bias=False))\n model.add(LeakyReLU(alpha=.01))\n\n # Add output layer\n model.add(Dense(10, activation='softmax', use_bias=False))\n\n # Compile the model\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n # Print information about the model\n print(model.summary())\n\n X_train, Y_train, X_test, Y_test = load_data()\n X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train,\n test_size=1/6.0,\n random_state=seed)\n\n # Fit the model\n model.fit(X_train, Y_train, batch_size=32, epochs=10, verbose=1)\n\n print(\"Save the model\")\n model_name = __save_trained_model(model, num_hidden, num_neuron)\n\n print(\"Training done\")\n\n return model_name, model", "def buildFirstModel():\n model = build(IMAGE_HEIGHT, IMAGE_WIDTH, 3, y.shape[1], finalAct=\"sigmoid\")\n opt = Adam(lr=INIT_LE, decay=INIT_LE / EPOCHS)\n\n model.compile(loss=\"binary_crossentropy\", optimizer=opt, metrics=[\"acc\"])", "def make_neural_net_challenging():\n i0 = Input('i0', -1.0) # this input is immutable\n i1 = Input('i1', 0.0)\n i2 = Input('i2', 0.0)\n seed_random()\n wt1 = random_weight()\n wt2 = random_weight()\n wt3 = random_weight()\n wt4 = random_weight()\n wt5 = random_weight()\n wt6 = random_weight()\n wt7 = random_weight()\n wt8 = random_weight()\n wt9 = random_weight()\n wt10 = random_weight()\n\t\n w1A = Weight('w1A', wt1)\n w2A = Weight('w2A', wt2)\n w1B = Weight('w1B', wt3)\n w2B = Weight('w2B', wt4)\n wA = Weight('wA', -1)\n wB = Weight('wB', -1)\n wAC = Weight('wAC', wt5)\n wBC = Weight('wBC', wt6)\n wC = Weight('wC', -1)\n wAD = Weight('wAD', wt7)\n wBD = Weight('wBD', wt8)\n wD = Weight('wD', -1)\n wCE = Weight('wCE', wt9)\n wDE = Weight('wDE', wt10)\n wE = Weight('wE', -1)\n\n # Inputs must be in the same order as their associated weights\n A = Neuron('A', [i1,i2,i0], [w1A,w2A,wA])\n B = Neuron('B', [i1,i2,i0], [w1B,w2B,wB])\n C = Neuron('C', [A,B,i0], [wAC,wBC,wC])\n D = Neuron('D', [A,B,i0], [wAD,wBD,wD])\n E = Neuron('D', [C,D,i0], [wCE,wDE,wE])\n P = PerformanceElem(E, 0.0)\n\n net = Network(P,[A, B, C, D, E])\n return net", "def nn(data):\n training_set = SupervisedDataSet*\n\n\n input_nodes = 3\n hidden_layer_1 = 10\n hidden_layer_2 = 10\n output_layer = 5\n\n net = buildNetwork(input_nodes, hidden_layer_1, hidden_layer_2, output_layer, bias=True, hiddenclass=TanhLayer)", "def build_neural_net(net_def):\n\n populated_def = net_def.copy()\n\n for layer in populated_def['layers']:\n for n in range(0, layer['num_neurons']):\n weights = layer['weights'][n]\n bias = layer['bias'][n]\n\n neuron = Neuron(weights, bias, layer['activation'])\n layer['neurons'].append(neuron)\n\n\n return populated_def", "def create_model_net(n_input,n_hidden,n_output):\n net = Sequential(\n L.Linear(n_input, n_hidden), F.relu,\n L.Linear(n_hidden, n_hidden), F.relu,\n L.Linear(n_hidden, n_output), F.softmax)\n return net", "def build_cnn(input_var=None):\n\n # input layer\n network = lasagne.layers.InputLayer(\n shape=(\n None,\n 1,\n 128,\n 129\n ),\n input_var=input_var\n )\n\n # conv\n network = lasagne.layers.Conv2DLayer(\n lasagne.layers.batch_norm(network), # Batch norm on incoming\n num_filters=32, # Number of convolution filters to use\n filter_size=(5, 5),\n stride=(1, 1), # Stride fo (1,1)\n pad='same', # Keep output size same as input\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.GlorotUniform() # W initialization\n )\n\n # conv\n #network = lasagne.layers.Conv2DLayer(\n #lasagne.layers.batch_norm(network), # Batch norm on incoming\n #num_filters=32, # Number of convolution filters to use\n #filter_size=(5, 5),\n #stride=(1, 1), # Stride fo (1,1)\n #pad='same', # Keep output size same as input\n #nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n #W=lasagne.init.GlorotUniform() # W initialization\n #)\n\n # pool (2x2 max pool)\n network = lasagne.layers.MaxPool2DLayer(\n network, pool_size=(2, 2)\n )\n\n # conv\n network = lasagne.layers.Conv2DLayer(\n lasagne.layers.batch_norm(network), # Batch norm on incoming\n num_filters=32, # Number of convolution filters to use\n filter_size=(3, 3),\n stride=(1, 1), # Stride fo (1,1)\n pad='same', # Keep output size same as input\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.GlorotUniform() # W initialization\n )\n\n # conv\n #network = lasagne.layers.Conv2DLayer(\n #lasagne.layers.batch_norm(network), # Batch norm on incoming\n #num_filters=32, # Number of convolution filters to use\n #filter_size=(3, 3),\n #stride=(1, 1), # Stride fo (1,1)\n #pad='same', # Keep output size same as input\n #nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n #W=lasagne.init.GlorotUniform() # W initialization\n #)\n\n # pool (2x2 max pool)\n network = lasagne.layers.MaxPool2DLayer(\n network, pool_size=(2, 2)\n )\n\n # Fully-connected layer of 256 units with 50% dropout on its inputs\n network = lasagne.layers.DenseLayer(\n lasagne.layers.dropout(network, p=.5),\n num_units=256,\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.HeUniform() # W initialization\n )\n\n # Finally add a 1-unit softmax output layer\n network = lasagne.layers.DenseLayer(\n network,\n num_units=1,\n nonlinearity=lasagne.nonlinearities.sigmoid\n )\n\n return network", "def train_CNN(self,member,input_data):\n trainX,trainY,validX,validY = input_data\n \n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n print('Validation data shape {0}'.format(np.shape(validX)))\n print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n \n \n model_file = self.model_path + f'/{member}_{self.model_args}_CNN_model.h5'\n print(model_file)\n if not os.path.exists(model_file):\n # Clear graphs\n tf.keras.backend.clear_session()\n \n #Initiliaze Convolutional Neural Net (CNN)\n model = models.Sequential()\n input_shape = np.shape(trainX[0])\n \n #First layer: input shape (y,x,# variables) \n #Add noise\n model.add(layers.GaussianNoise(0.01, input_shape=(input_shape)))\n for filters in [32,64,128]:\n model.add(layers.Conv2D(filters, (3,3),padding='same'))\n model.add(layers.Conv2D(filters, (3,3),padding='same'))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.MaxPooling2D())\n \n #Flatten the last convolutional layer \n model.add(layers.Flatten())\n model.add(layers.Dense(256))\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.Dense(4,activation='softmax'))\n #Compile neural net\n model.compile(optimizer='adam',loss='categorical_crossentropy',\n metrics=[tf.keras.metrics.AUC()])\n print(model.summary())\n #fit neural net\n n_epochs = 10\n bs = 256\n\n #augment data\n aug = imagedatagenerator(\n rotation_range=10,zoom_range=0.15,\n width_shift_range=0.2,height_shift_range=0.2,\n fill_mode=\"nearest\")\n \n train_generator = aug.flow(trainx,trainy,batch_size=bs)\n conv_hist = model.fit(\n train_generator,steps_per_epoch=len(trainx) // bs,\n epochs=n_epochs,verbose=1,class_weight=self.class_percentages)\n #save trained model\n model.save(model_file)\n print(f'Writing out {model_file}')\n else:\n model = tf.keras.models.load_model(model_file)\n print(f'\\nOpening {model_file}\\n')\n\n del trainY,trainX\n \n threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'\n if os.path.exists(threshold_file): \n del validX,validY\n return\n \n self.validate_CNN(model,validX,validY,threshold_file)\n return", "def neural_net(X, Y):\n model = keras.Sequential([\n keras.layers.Dense(32, input_dim=len(X[0])),\n keras.layers.Activation(\"relu\"),\n keras.layers.Dense(32),\n keras.layers.Activation(\"relu\"),\n keras.layers.Dense(128),\n keras.layers.Activation(\"relu\"),\n keras.layers.Dense(128),\n keras.layers.Activation(\"relu\"),\n keras.layers.Dense(32),\n keras.layers.Activation(\"relu\"),\n keras.layers.Dense(1, activation='sigmoid')\n ])\n\n # Use the adam optimizer\n adam = keras.optimizers.Adam(lr=0.01)\n\n model.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])\n \n # Training\n model.fit(X, Y, epochs=20, validation_split=0.1)\n\n return model", "def build_model():\n model = Sequential()\n model.add(Dense(beer_emb.EMB_DIM, activation=\"relu\",\n input_dim=beer_emb.EMB_DIM))\n model.add(Dropout(0.5))\n model.add(Dense(64, activation=\"relu\"))\n model.add(Dropout(0.5))\n model.add(Dense(32, activation=\"relu\"))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy',\n metrics=['accuracy'], optimizer='adam')\n\n return model", "def Network_model(input_data):\n layer1_param={'weights':tf.Variable(tf.random_normal([784, no_neurons_layer1])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer1]))}\n \n layer2_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer1, no_neurons_layer2])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer2]))}\n \n layer3_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer2, no_neurons_layer3])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer3]))}\n \n layer4_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer3, no_neurons_layer4])), \n 'biases': tf.Variable(tf.random_normal([no_neurons_layer4]))}\n \n output_layer_param={'weights':tf.Variable(tf.random_normal([no_neurons_layer4, no_classes])), \n 'biases': tf.Variable(tf.random_normal([no_classes]))}\n \n #so uptill now the weights for each layer is initialized\n \n \"\"\"\n Now what will happened in each layer, I will define next. basically the weights are multiplied\n in each layer with the corresponding inputs and then it is passed through activation function \n (relu in this case) and the output is given as input to the other layer.\n sign:B-Jan\n \"\"\"\n \n l1_output= tf.add(tf.matmul(input_data,layer1_param['weights']), layer1_param['biases'])\n l1_output=tf.nn.relu(l1_output)\n \n l2_output= tf.add(tf.matmul(l1_output,layer2_param['weights']), layer2_param['biases'])\n l2_output=tf.nn.relu(l2_output)\n \n \n l3_output= tf.add(tf.matmul(l2_output,layer3_param['weights']), layer3_param['biases'])\n l3_output=tf.nn.relu(l3_output)\n \n l4_output= tf.add(tf.matmul(l3_output,layer4_param['weights']), layer4_param['biases'])\n l4_output=tf.nn.relu(l4_output)\n \n #The final output Layer\n output= tf.matmul(l4_output, output_layer_param['weights'])+output_layer_param['biases']\n \n return output # contains the output of the last output layer", "def demoModel(dim, num_classes):\n import numpy as np\n from keras.models import Sequential, Model\n from keras.layers import Input\n from keras.layers import Conv2D, ZeroPadding2D, MaxPooling2D, Conv2DTranspose, Cropping2D\n from keras.layers import concatenate, UpSampling2D, Reshape\n import keras.backend as K\n\n # Build model\n input_image = Input(shape=(dim, dim, 3))\n\n conv = Conv2D(24, (3, 3), activation='relu', padding='same')(input_image)\n\n pool = MaxPooling2D((2, 2), strides=(2, 2), name=\"pool\")(conv)\n\n conv1x1 = Conv2D(24, (1, 1), padding='same', activation='relu')(pool)\n\n up = UpSampling2D(size=(2,2))(conv1x1)\n up_conv = Conv2D(24, 2, activation = 'relu', padding = 'same')(up)\n merge = concatenate([conv,up_conv], axis = 3)\n\n conv = Conv2D(12, 3, activation = 'relu', padding = 'same')(merge)\n\n activation = Conv2D(num_classes, (1, 1), activation = \"softmax\")(conv)\n\n # need to reshape for training\n output = Reshape((dim*dim, 3))(activation)\n\n model = Model(inputs=[input_image], outputs=output)\n\n model.summary()\n\n return model" ]
[ "0.76426136", "0.737351", "0.73625165", "0.7330011", "0.7327857", "0.7225237", "0.7213768", "0.7154049", "0.71528256", "0.7138571", "0.710223", "0.7046471", "0.70461565", "0.70441526", "0.7029958", "0.7021955", "0.7017469", "0.7014341", "0.7011238", "0.70006037", "0.69785774", "0.6968724", "0.69613415", "0.69610095", "0.6958576", "0.6951852", "0.6950675", "0.693736", "0.6902005", "0.68925685", "0.68910617", "0.6885499", "0.6880795", "0.6879498", "0.68621844", "0.6856228", "0.6854706", "0.6854114", "0.684805", "0.68459255", "0.6843871", "0.68420756", "0.68388444", "0.6836423", "0.6823053", "0.68190956", "0.6816249", "0.6815885", "0.6815845", "0.6814629", "0.68140304", "0.6800382", "0.67931914", "0.6786349", "0.6778423", "0.6775009", "0.67733383", "0.6758714", "0.6755177", "0.6755028", "0.67548424", "0.6737128", "0.6729466", "0.6722907", "0.6715936", "0.67095095", "0.670901", "0.6707293", "0.6699304", "0.6698099", "0.6696481", "0.6695246", "0.6686566", "0.6686384", "0.66849494", "0.668317", "0.6680673", "0.6677813", "0.66606826", "0.6657041", "0.66444874", "0.6639683", "0.66380954", "0.6637531", "0.6636757", "0.66319495", "0.6631708", "0.66297555", "0.66143906", "0.66134137", "0.6612944", "0.6610235", "0.66069746", "0.65998244", "0.659791", "0.65949774", "0.65875006", "0.6581437", "0.65785366", "0.6575617" ]
0.67458504
61
Plots accuracy/loss for training/validation set as a function of the epochs
def plot_history(history): fig, axs = plt.subplots(2) # create accuracy subplot axs[0].plot(history.history["accuracy"], label="accuracy") axs[0].plot(history.history['val_accuracy'], label="val_accuracy") axs[0].set_ylabel("Accuracy") axs[0].legend(loc="lower right") axs[0].set_title("Accuracy evaluation") # create loss subplot axs[1].plot(history.history["loss"], label="loss") axs[1].plot(history.history['val_loss'], label="val_loss") axs[1].set_xlabel("Epoch") axs[1].set_ylabel("Loss") axs[1].legend(loc="upper right") axs[1].set_title("Loss evaluation") plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_accuracy(self):\n plot_title, img_title = self.prep_titles(\"\")\n test_legend = ['training data', 'test data']\n\n # Data for plotting x- and y-axis\n x = np.arange(1, CFG.EPOCHS + 1)\n y = [self.tr_accuracy, self.test_accuracy]\n\n # prints x and y-axis values\n print(f'x: {x}')\n print(f'training: {self.tr_accuracy}')\n print(f'test: {self.test_accuracy}')\n\n plt.figure(figsize=(CFG.FIG_WIDTH, CFG.FIG_HEIGHT))\n\n # Create the lineplot\n for line in range(2):\n ax = sns.lineplot(x=x, y=y[line], color=CFG.COLOR_ACCURACY[line], label=test_legend[line])\n\n if CFG.ANNOTATE:\n ax.set(xlabel='Epochs',\n ylabel='Accuracy (%)',\n title=plot_title,\n xlim=(1, CFG.EPOCHS + 2),\n ylim=(0, 119))\n\n for line in range(2):\n for e in range(0, CFG.EPOCHS):\n if y[line][e] > CFG.ANNOTATE_LEVEL:\n value = \"{:.2f}\".format(y[line][e])\n label = \"epoch \" + str(e + 1) + \"\\n\" + value + \"%\"\n plt.annotate(label,\n xy=(x[e], y[line][e]),\n alpha=1,\n size=9,\n rotation=45,\n textcoords='offset pixels', xytext=(0, 7),\n ha='left', va='bottom')\n else:\n ax.set(xlabel='Epochs',\n ylabel='Accuracy (%)',\n title=plot_title,\n xlim=(1, CFG.EPOCHS),\n ylim=(0, 102))\n\n ax.legend(loc='best')\n\n self.save_plot(img_title)\n plt.show()", "def plot_accuracy(model_fit, save_folder): \n train_acc = model_fit.history['binary_accuracy']\n val_acc = model_fit.history['val_binary_accuracy']\n epoch_axis = np.arange(1, len(train_acc) + 1)\n plt.title('Train vs Validation Accuracy')\n plt.plot(epoch_axis, train_acc, 'b', label='Train Acc')\n plt.plot(epoch_axis, val_acc,'r', label='Val Acc')\n plt.xlim([1, len(train_acc)])\n plt.xticks(np.arange(min(epoch_axis), max(epoch_axis) + 1, round((len(train_acc) / 10) + 0.5)))\n plt.legend(loc='lower right')\n plt.ylabel('Accuracy')\n plt.xlabel('Epochs')\n plt.savefig(save_folder + '/accuracy.png')\n plt.show()\n plt.close()", "def cross_validation_visualization_accuracy(epochs, accs, save=False, filename=\"cross_validation_acc\"):\n plt.plot(epochs, accs, marker=\".\", color='r', label='accuracy')\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n if (save):\n plt.savefig(filename)", "def train_visualization(output_path): \n log_path = output_path + 'output.log'\n Train_Cost, Valid_Cost, Test_Cost, Train_Acc, Valid_Acc, Test_Acc = log_reader(log_path)\n n_epoch = len(Train_Cost)\n\n x1 = range(n_epoch)\n x2 = range(n_epoch)\n y1 = Train_Cost\n y2 = Valid_Cost\n y3 = Test_Cost\n y4 = Train_Acc\n y5 = Valid_Acc\n y6 = Test_Acc\n plt.subplot(2, 1, 1)\n plt.plot(x1, y1, label=\"Train_Cost\", linewidth=2)\n plt.plot(x1, y2, label=\"Valid_Cost\", linewidth=2)\n plt.plot(x1, y3, label=\"Test_Cost\", linewidth=2)\n\n plt.title('binary cross entropy vs. epoches')\n plt.ylabel('binary cross entropy')\n plt.legend(loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(x2, y4, label=\"Train_Acc\", linewidth=2)\n plt.plot(x2, y5, label=\"Valid_Acc\", linewidth=2)\n plt.plot(x2, y6, label=\"Test_Acc\", linewidth=2)\n plt.xlabel('Accuracy@20 vs. epoches')\n plt.ylabel('Accuracy@20')\n plt.legend(loc='best')\n plt.savefig(output_path + 'loss_fig.png')\n # plt.show()", "def accuracy_plot(training, test, layers, data_size, n_neighbours, learning_rate, dropout_rate):\n\n plt.figure()\n plt.plot(training, label=\"Training\")\n plt.plot(test, label=\"Test\")\n plt.xlabel(\"Iterations\", size='medium')\n plt.ylabel(\"Accuracy function (%)\", size='medium')\n plt.suptitle(\"Accuracy function while training the neural network\", size='medium', ha='center')\n plt.title(\"layers: {} with dropout rate of {}, learning rate: {}\".format(layers, dropout_rate, learning_rate),\n size='small', ha='center')\n if n_neighbours == 0:\n plt.figtext(0.83, 0.80, \"Neighbours\\nexcluded\", size='medium')\n else:\n plt.figtext(0.83, 0.80, \"{} neighbours\\nincluded\".format(n_neighbours), size='medium')\n plt.figtext(0.83, 0.70, \"{}\\nsamples\".format(data_size), size='medium')\n plt.legend(loc='right', bbox_to_anchor=(1.3, 0.5))\n plt.subplots_adjust(right=0.8)\n\n working_dir = os.path.dirname(os.path.abspath(__file__))\n saving(working_dir + \"/output_ANN/accuracy_plots/{}_accuracy_{}\".format(n_neighbours, data_size))", "def plot_observations():\n plt.plot(history.history['loss'], label='training_loss')\n plt.plot(history.history['val_loss'], label='val_loss ')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()\n\n plt.plot(history.history['acc'], label='accuracy')\n plt.plot(history.history['val_acc'], label='val_accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend(loc='lower right')\n plt.show()\n\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n print(\"Test Accuracy:\", test_acc)", "def plot_acc(model_dir):\n ## extract loss from csv\n file_dir = os.path.join(model_dir, 'acc.csv')\n data = pd.read_csv(file_dir)\n epochs = data['epoch'].ravel()\n acc_train = data['acc_train'].ravel()\n acc_test = data['acc_test'].ravel()\n # epoch,acc_train,acc_test\n\n ## Theoretical Loss\n fig, ax = plt.subplots(1, 1, figsize=(7, 5), sharey=True, sharex=True, dpi=400)\n ax.plot(epochs, acc_train, label='train', color='green', alpha=0.8)\n ax.plot(epochs, acc_test, label='test', color='red', alpha=0.8)\n ax.set_ylabel('Accuracy', fontsize=10)\n ax.set_xlabel('Epoch', fontsize=10)\n ax.legend(loc='lower right', prop={\"size\": 15}, ncol=3, framealpha=0.5)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n plt.tight_layout()\n\n ## create saving directory\n acc_dir = os.path.join(model_dir, 'figures', 'acc')\n os.makedirs(acc_dir, exist_ok=True)\n file_name = os.path.join(acc_dir, 'accuracy.png')\n plt.savefig(file_name, dpi=400)\n print(\"Plot saved to: {}\".format(file_name))\n file_name = os.path.join(acc_dir, 'accuracy.pdf')\n plt.savefig(file_name, dpi=400)\n plt.close()\n print(\"Plot saved to: {}\".format(file_name))", "def visualize_train_history(history):\n cat_acc = history.history['categorical_accuracy']\n val_cat_acc = history.history['val_categorical_accuracy']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs = range(1, len(cat_acc) + 1)\n\n plt.plot(epochs, cat_acc, 'bo', label='Training cat_acc')\n plt.plot(epochs, val_cat_acc, 'b', label='Validation cat_acc')\n plt.title('Training and validation accuracy')\n plt.legend()\n\n plt.figure()\n\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n\n plt.show()", "def plot_acc(acc_v, acc_t, save_plots_path):\n\n plt.figure()\n plt.plot(acc_v, label='Validation acc')\n plt.plot(acc_t, label='Training acc')\n plt.legend()\n title = 'Accuracy per epoch'\n plt.title(title)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.savefig(save_plots_path + \"swag_accuracy_plot.png\")", "def plot_loss_curves(results):\n loss = results[\"train_loss\"]\n test_loss = results[\"test_loss\"]\n\n accuracy = results[\"train_acc\"]\n test_accuracy = results[\"test_acc\"]\n\n epochs = range(len(results[\"train_loss\"]))\n\n plt.figure(figsize=(15, 7))\n\n # Plot loss\n plt.subplot(1, 2, 1)\n plt.plot(epochs, loss, label=\"train_loss\")\n plt.plot(epochs, test_loss, label=\"test_loss\")\n plt.title(\"Loss\")\n plt.xlabel(\"Epochs\")\n plt.legend()\n\n # Plot accuracy\n plt.subplot(1, 2, 2)\n plt.plot(epochs, accuracy, label=\"train_accuracy\")\n plt.plot(epochs, test_accuracy, label=\"test_accuracy\")\n plt.title(\"Accuracy\")\n plt.xlabel(\"Epochs\")\n plt.legend()", "def cross_validation_visualization_accuracy_multiple(epochs, accs, save=False, filename=\"cross_validation_acc_multiple\"):\n \n for i in range(accs.shape[0]):\n plt.plot(epochs, accs[i], marker=\".\", color='r', label=str(i+1)+'th accuracy')\n \n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n if (save):\n plt.savefig(filename)", "def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc, save_figure_path):\n\n green = '#72C29B'\n orange = '#FFA577'\n\n with plt.xkcd():\n fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 8))\n ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,\n label='training')\n ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,\n linewidth=5, label='validation')\n ax1.set_xlabel('# epoch')\n ax1.set_ylabel('loss')\n ax1.tick_params('y')\n ax1.legend(loc='upper right', shadow=False)\n ax1.set_title('Model loss through #epochs', fontweight='bold')\n\n ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,\n label='training')\n ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,\n linewidth=5, label='validation')\n ax2.set_xlabel('# epoch')\n ax2.set_ylabel('accuracy')\n ax2.tick_params('y')\n ax2.legend(loc='lower right', shadow=False)\n ax2.set_title('Model accuracy through #epochs', fontweight='bold')\n\n plt.tight_layout()\n plt.show()\n fig.savefig(save_figure_path)\n plt.close(fig)", "def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc):\n \n green = '#72C29B'\n orange = '#FFA577'\n \n with plt.xkcd():\n # plot model loss\n fig, ax1 = plt.subplots()\n ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,\n label='training')\n ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,\n linewidth=5, label='validation')\n ax1.set_xlabel('# epoch')\n ax1.set_ylabel('loss')\n ax1.tick_params('y')\n ax1.legend(loc='upper right', shadow=False)\n # plot model accuracy\n fig, ax2 = plt.subplots()\n ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,\n label='training')\n ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,\n linewidth=5, label='validation')\n ax2.set_xlabel('# epoch')\n ax2.set_ylabel('accuracy')\n ax2.tick_params('y')\n ax2.legend(loc='lower right', shadow=False)\n plt.show()", "def main():\n args = parse_args()\n\n with open(args.train_details_json, mode='r', encoding='utf-8') as json_f:\n results_dict = json.load(json_f)[-1]\n\n losses_plot = plt.figure()\n plt.plot(range(1, len(results_dict['train_loss']) + 1),\n results_dict['train_loss'])\n plt.plot(range(1, len(results_dict['val_loss']) + 1),\n results_dict['val_loss'])\n plt.plot(range(1, len(results_dict['test_loss']) + 1),\n results_dict['test_loss'])\n plt.legend(['train', 'val', 'test'])\n plt.title(f'loss vs epoch for {args.model} model on {args.dataset} dataset')\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.grid(True)\n losses_plot.set_size_inches((8, 8))\n losses_plot.savefig(\n os.path.join(FIGURES_DIR,\n f'{args.dataset}_{args.model}_losses_plot.png'))\n\n accuracies_plot = plt.figure()\n plt.plot(range(1, len(results_dict['train_acc']) + 1),\n results_dict['train_acc'])\n plt.plot(range(1, len(results_dict['val_acc']) + 1),\n results_dict['val_acc'])\n plt.plot(range(1, len(results_dict['test_acc']) + 1),\n results_dict['test_acc'])\n plt.legend(['train', 'val', 'test'])\n plt.title(f'accuracy vs epoch for {args.model} '\n f'model on {args.dataset} dataset')\n plt.xlabel('epoch')\n plt.ylabel('accuracy')\n plt.grid(True)\n accuracies_plot.set_size_inches((8, 8))\n accuracies_plot.savefig(\n os.path.join(FIGURES_DIR,\n f'{args.dataset}_{args.model}_accuracies_plot.png'))", "def acc_loss_graph(self):\n acc = self.history['accuracy']\n val_acc = self.history['val_accuracy']\n loss = self.history['loss']\n val_loss = self.history['val_loss']\n plt.figure(figsize=(15, 5))\n plt.subplot(1, 2, 1)\n plt.plot(acc, label='Train')\n plt.plot(val_acc, label='Val')\n plt.legend(loc='lower right')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.ylim([min(plt.ylim()), 1])\n plt.title('Accuracy')\n\n plt.subplot(1, 2, 2)\n plt.plot(loss, label='Train')\n plt.plot(val_loss, label='Val')\n plt.legend(loc='lower right')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.ylim([0, max(plt.ylim())])\n plt.title('Loss')\n plt.show();", "def plot_acc (history, acc='acc', val_acc='val_acc'):\n \n history_dict = history.history\n acc = history_dict[acc]\n val_acc = history_dict[val_acc]\n loss_values = history_dict['loss']\n epochs = range(1, len(loss_values) + 1)\n\n plt.plot (epochs, acc, 'bo', label='Training accuracy')\n plt.plot (epochs, val_acc, 'b', label=\"validation accuracy\")\n plt.title('Training and validation accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n plt.show()", "def plot_loss_vs_epoch(history, var_train, var_val, show=False):\n plt.figure(figsize=(10, 8))\n plt.grid(True)\n plt.plot(history.history['loss']/var_train, marker=\"o\")\n plt.plot(history.history['val_loss']/var_val, marker=\"o\")\n plt.title('Model Loss')\n plt.ylabel('Loss (Normalised to variance of dataset)')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Validation'])\n # plt.ylim(bottom=0)\n filename = \"img/\"\n filename += datetime.now().strftime(\"%y%m%d_%H%M\")\n filename += \"_model_loss.png\"\n plt.savefig(filename)\n\n if show:\n plt.show()", "def train_nn(train_nn_results, label, title, yaxis):\n plt.figure(figsize=(12,5))\n for i in range(len(label)):\n plt.plot(train_nn_results[i], label=label[i], alpha=0.75)\n plt.title(title)\n plt.xlabel('epoch')\n plt.ylabel(yaxis)\n plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left')\n plt.tight_layout()\n plt.show()", "def train_model_and_plot_results(**kwargs):\n epochs = kwargs[\"epochs\"]\n\n history, model = train_model(**kwargs)\n\n dice = history.history['dice_loss']\n val_dice = history.history['val_dice_loss']\n\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs_range = range(epochs)\n\n plt.figure(figsize=(16, 8))\n plt.subplot(1, 2, 1)\n plt.plot(epochs_range, dice, label='Training Dice Loss')\n plt.plot(epochs_range, val_dice, label='Validation Dice Loss')\n plt.legend(loc='upper right')\n plt.title('Training and Validation Dice Loss')\n\n plt.subplot(1, 2, 2)\n plt.plot(epochs_range, loss, label='Training Loss')\n plt.plot(epochs_range, val_loss, label='Validation Loss')\n plt.legend(loc='upper right')\n plt.title('Training and Validation Loss')\n\n plt.show()", "def plot_training_history(history):\n fig, (ax_loss, ax_acc) = plt.subplots(1, 2, figsize=(15, 5))\n ax_loss.plot(history.epoch, history.history[\"loss\"], label=\"Train loss\")\n ax_loss.plot(history.epoch, history.history[\"val_loss\"], label=\"Validation loss\")\n ax_loss.legend()\n ax_acc.plot(history.epoch, history.history[\"iou_score\"], label=\"Train iou\")\n ax_acc.plot(history.epoch, history.history[\"val_iou_score\"], label=\"Validation iou\")\n ax_acc.legend()", "def save_accuracy_chart(self):\n history = self.model.history.history\n fig = plt.figure()\n plt.plot(history['accuracy'], label='Training Accuracy')\n plt.plot(history['val_accuracy'],label='Validation Set Accuracy')\n plt.legend()\n fig.savefig('model_accuracy.png')", "def plot_accuracy_and_loss(histories=None):\n fig = subplots.make_subplots(rows=2, cols=2, subplot_titles=('Training accuracy', 'Validation accuracy',\n 'Training loss ', 'Validation loss'))\n\n def append_trace(model_name, acc, val_acc, loss, val_loss, epochs):\n e = list(range(epochs))\n color = random.choice(hex_colors_only)\n trace_ta = create_trace(e, acc, model_name, color)\n trace_va = create_trace(e, val_acc, model_name, color)\n trace_tl = create_trace(e, loss, model_name, color)\n trace_vl = create_trace(e, val_loss, model_name, color)\n\n fig.append_trace(trace_ta, 1, 1)\n fig.append_trace(trace_va, 1, 2)\n fig.append_trace(trace_tl, 2, 1)\n fig.append_trace(trace_vl, 2, 2)\n\n if histories is None:\n df_accuracies, df_losses = get_tensorboard_scalars()\n for model_name in df_accuracies.model_name.unique():\n df_acc = df_accuracies.loc[df_accuracies.model_name == model_name]\n df_l = df_losses.loc[df_losses.model_name == model_name]\n\n acc = df_acc.loc[df_acc.result_of == 'train'].accuracy.values.tolist()\n val_acc = df_acc.loc[df_acc.result_of == 'validation'].accuracy.values.tolist()\n loss = df_l.loc[df_l.result_of == 'train'].loss.values.tolist()\n val_loss = df_l.loc[df_l.result_of == 'validation'].loss.values.tolist()\n epochs = len(df_acc)\n\n append_trace(model_name, acc, val_acc, loss, val_loss, epochs)\n\n else:\n for model_name, history in histories.items():\n acc = history['accuracy']\n val_acc = history['val_accuracy']\n loss = history['loss']\n val_loss = history['val_loss']\n epochs = list(range(1, len(acc) + 1))\n\n append_trace(model_name, acc, val_acc, loss, val_loss, epochs)\n fig['layout']['xaxis'].update(title='Epoch')\n fig['layout']['xaxis2'].update(title='Epoch')\n fig['layout']['yaxis'].update(title='Accuracy', range=[0, 1])\n fig['layout']['yaxis2'].update(title='Loss', range=[0, 1])\n\n iplot(fig, filename='accuracies-losses')", "def show_training(history: tf.keras.callbacks.History) -> None:\n hist = history.history\n\n if \"loss\" not in hist:\n print(\"Error: 'loss' values not found in the history\")\n return\n\n # plot training\n plt.figure(figsize=(14, 4))\n plt.subplot(121)\n plt.plot(hist[\"loss\"], label=\"Training\")\n if \"val_loss\" in hist:\n plt.plot(hist[\"val_loss\"], label=\"Validation\")\n plt.xlabel(\"epoch\")\n plt.ylabel(\"loss\")\n plt.legend()\n\n if \"accuracy\" in hist:\n plt.subplot(122)\n plt.plot(hist[\"accuracy\"], label=\"Training\")\n if \"val_accuracy\" in hist:\n plt.plot(hist[\"val_accuracy\"], label=\"Validation\")\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.legend()\n\n plt.suptitle(\"Training history\")\n plt.show()\n\n # show final results\n print(\"\\nTraining loss: \\t{:.4f}\".format(hist[\"loss\"][-1]))\n if \"val_loss\" in hist:\n print(\"Validation loss: \\t{:.4f}\".format(hist[\"val_loss\"][-1]))\n if \"accuracy\" in hist:\n print(\"\\nTraining accuracy: \\t{:.3f}\".format(hist[\"accuracy\"][-1]))\n if \"val_accuracy\" in hist:\n print(\"Validation accuracy:\\t{:.3f}\".format(hist[\"val_accuracy\"][-1]))", "def plot_train_history(self):\n plt.figure()\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.plot(self.train_history.history['loss'])\n plt.plot(self.train_history.history['val_loss'])\n plt.legend(['Training', 'Validation'])\n\n plt.show()", "def model_testing(X_train,y_train):\n\n # for testing amount of layers, each layer has 32 neurons\n # layers = [[32, 32], [32, 32, 32], [32, 32, 32, 32], [32, 32, 32, 32],\\\n # [32, 32, 32, 32, 32], [32, 32, 32, 32, 32, 32]]\n layers = [[8], [16], [32], [64], [128], [256]]\n\n # activation = [\"linear\", \"sigmoid\", \"relu\", \"softmax\"]\n activation = [\"relu\"]\n runs = 1\n for i, act in enumerate(activation):\n val_accs = []\n for layer in layers:\n acc_avg = []\n for run in range(runs):\n model = create_model_testing(layer, act)\n\n # train model on full train set, with 80/20 CV split\n training = model.fit(X_train, y_train, epochs=100, validation_split=0.2, verbose=0)\n val_acc = np.mean(training.history['val_accuracy'])\n print(\"Run \", run, \" - \", act + \" activation - layer \" + str(layer))\n acc_avg.append(val_acc)\n\n # save average accuracy of runs\n val_accs.append(round(np.mean(acc_avg)*100, 2))\n print(\"accuracy: \" + str(np.mean(acc_avg)))\n\n # plot line for each activation method\n plt.plot([1,2,4,8,16,32,64,128,256], val_accs, label=act)\n # plt.plot(val_accs, label=act)\n\n # plotting\n plt.title(\"Accuracy of neural network model with different layers (N=\" +\\\n str(len(layers)) + \")\", fontsize=22)\n plt.xlabel(\"Layers\", fontsize=20)\n # plt.xticks(np.arange(1, len(val_accs) + 1, 1), fontsize=18)\n plt.ylabel(\"Accuracy (%)\", fontsize=20)\n plt.legend()\n plt.subplots_adjust(bottom=.15, left=.15)\n plt.savefig(\"results/linear-relu-\" + str(runs) + \"runs.png\")\n plt.show()", "def plot_loss_and_acc(history):\n hist = history.history\n x_arr = np.arange(len(hist['loss'])) + 1\n fig = plt.figure(figsize=(12,4))\n ax = fig.add_subplot(1,2,1)\n ax.plot(x_arr, hist['loss'], '-o', label='Train loss')\n ax.plot(x_arr, hist['val_loss'], '--<', label='Validation loss')\n ax.legend(fontsize=15)\n ax.set_xlabel('Epoch', size=15)\n ax.set_ylabel('Loss', size=15)\n\n ax = fig.add_subplot(1,2,2)\n ax.plot(x_arr, hist['accuracy'], '-o', label='Train acc.')\n ax.plot(x_arr, hist['val_accuracy'], '--<', label='Validation acc.')\n ax.legend(fontsize=15)\n ax.set_xlabel('Epoch', size=15),\n ax.set_ylabel('Accuracy', size=15)\n plt.show()", "def learning_viz(self) :\n self.train\n history = self.history\n plot_loss(history)", "def acc_plotter(model_history, ax = None):\n \n import matplotlib.pyplot as plt\n import seaborn as sns\n \n training_loss = model_history['loss']\n\n acc = model_history['accuracy']\n \n val_acc = model_history['val_accuracy']\n\n epoch_count = range(1,len(training_loss)+1)\n \n sns.set(font_scale=1.15)\n \n sns.lineplot(\n x=epoch_count,\n y=acc,\n ax=ax\n )\n \n sns.lineplot(\n x=epoch_count,\n y=val_acc,\n ax=ax\n )\n\n ax.set_title('Accuracy Curves: Pre-Trained VGG-16 with 2 Trained Layers',fontsize=19)\n \n ax.set_ylabel('Accuracy',fontsize=18)\n \n ax.set_xlabel('Epochs',fontsize=18)\n\n plt.legend(['Training Accuracy', 'Validation Accuracy'])\n\n plt.show()", "def plotHistory(history):\n \n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs = range(len(acc))\n \n # Make and save the plot for our accuracy\n plt.plot(epochs, acc, 'bo', label='Training acc')\n plt.plot(epochs, val_acc, 'b', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.legend()\n plt.savefig(\"trainValAccSecond.png\")\n\n # Make and save the plots for our loss \n plt.figure()\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n plt.show()\n plt.savefig(\"trainValLossSecond.png\")", "def plot_loss(model_fit, save_folder): \n train_loss = model_fit.history['loss']\n val_loss = model_fit.history['val_loss']\n epoch_axis = np.arange(1, len(train_loss) + 1)\n plt.title('Train vs Validation Loss')\n plt.plot(epoch_axis, train_loss, 'b', label='Train Loss')\n plt.plot(epoch_axis, val_loss,'r', label='Val Loss')\n plt.xlim([1, len(train_loss)])\n plt.xticks(np.arange(min(epoch_axis), max(epoch_axis) + 1, round((len(train_loss) / 10) + 0.5)))\n plt.legend(loc='upper right')\n plt.ylabel('Loss')\n plt.xlabel('Epochs')\n plt.savefig(save_folder + '/loss.png')\n plt.show()\n plt.close()", "def plot_training_history(history, metric):\n \n val_metric = 'val_'+metric\n acc = history.history[metric]\n val_acc = history.history[val_metric]\n \n loss = history.history['loss']\n val_loss = history.history['val_loss']\n \n epochs_range = history.epoch\n \n plt.figure(figsize=(8, 8))\n plt.subplot(2, 1, 1)\n plt.plot(epochs_range, acc, label='Training Acc.')\n plt.plot(epochs_range, val_acc, label='Validation Acc.')\n plt.legend(loc='best',)\n plt.title('Training and Validation Accuracy')\n \n plt.subplot(2, 1, 2)\n plt.plot(epochs_range, loss, label='Training Loss')\n plt.plot(epochs_range, val_loss, label='Validation Loss')\n plt.legend(loc='best')\n plt.title('Training and Validation Loss')\n plt.show()", "def make_accuracy_plot(num_trials=10):\n data = load_digits()\n # print data.DESCR\n train_percentages = range(5, 95, 5)\n test_accuracies = numpy.zeros(len(train_percentages))\n\n for i in range(len(train_percentages)):\n individual_trial_accuracies = []\n for j in range(num_trials):\n X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, train_size=train_percentages[i]*.01)\n model = LogisticRegression(C=10**-10)\n model.fit(X_train, y_train)\n individual_trial_accuracies.append(model.score(X_test, y_test))\n test_accuracies[i] = numpy.mean(individual_trial_accuracies)\n\n fig = plt.figure()\n plt.plot(train_percentages, test_accuracies, 'b')\n plt.xlabel('Percentage of Data Used for Training')\n plt.ylabel('Accuracy on Test Set')\n plt.show()", "def plot_training_info(case, metrics, save, history):\n val = False\n if 'val_accuracy' in history and 'val_loss' in history:\n val = True\n plt.ioff()\n if 'accuracy' in metrics:\n fig = plt.figure()\n plt.plot(history['accuracy'])\n if val:\n plt.plot(history['val_accuracy'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n if val:\n plt.legend(['train', 'val'], loc='upper left')\n else:\n plt.legend(['train'], loc='upper left')\n if save:\n plt.savefig(case + 'accuracy.png')\n plt.gcf().clear()\n else:\n plt.show()\n plt.close(fig)\n\n # summarize history for loss\n if 'loss' in metrics:\n fig = plt.figure()\n plt.plot(history['loss'])\n if val:\n plt.plot(history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n # plt.ylim(1e-3, 1e-2)\n plt.yscale(\"log\")\n if val:\n plt.legend(['train', 'val'], loc='upper left')\n else:\n plt.legend(['train'], loc='upper left')\n if save:\n plt.savefig(case + 'loss.png')\n plt.gcf().clear()\n else:\n plt.show()\n plt.close(fig)", "def plot_eval_1(trained_model, image_name):\n # Get training evaluation data\n train_accuracy = trained_model.history['acc']\n train_val_accuracy = trained_model.history['val_acc']\n train_loss = trained_model.history['loss']\n train_val_loss = trained_model.history['val_loss']\n \n # Generate accuracy plot\n epochs = range(len(train_accuracy))\n plt.figure()\n plt.plot(epochs, train_accuracy, 'bo', label='Training accuracy')\n plt.plot(epochs, train_val_accuracy, 'b', label='Validation accuracy')\n plt.title('Training and validation accuracy')\n plt.legend()\n \n # Save accuracy plot\n plot_file = os.path.join(OUTPUT_DIR,\n \"{}_training_accuracy\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')\n \n # Generate loss plot\n plt.figure()\n plt.plot(epochs, train_loss, 'bo', label='Training loss')\n plt.plot(epochs, train_val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n \n # Save loss plot\n plot_file = os.path.join(OUTPUT_DIR, \"{}_training_loss\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')", "def plot_train_history(modell=config.model):\n history = pd.read_csv(f'model/this/{modell}_history.csv')\n epochs = len(history.epoch)\n\n plt.style.use(\"ggplot\")\n plt.rcParams['figure.figsize'] = (5, 9)\n plt.plot(np.arange(0, epochs), history[\"accuracy\"], label=\"model accuracy\", color=\"red\", zorder=10, linewidth=2)\n plt.plot(np.arange(0, epochs), history[\"loss\"], label=\"training loss\", color=\"blue\", zorder=9, linewidth=2)\n plt.plot(np.arange(0, epochs), history[\"val_accuracy\"], label=\"validation accuracy\", color=\"red\", zorder=1, linewidth=1, alpha= 0.4)\n plt.plot(np.arange(0, epochs), history[\"val_loss\"], label=\"validation loss\", color=\"blue\", zorder=2, linewidth=1, alpha= 0.4)\n plt.hlines(1.0,0, epochs, colors=\"black\", linestyles=\"dotted\")\n plt.title(f'Trening av modell: {modell}')\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy / Loss\")\n plt.ylim(0, 2.)\n plt.yticks(np.append(np.arange(0, 1., 0.05), (np.arange(1, 2., 0.2) )))\n\n plt.xlim(0, epochs)\n plt.legend(loc=\"upper right\")\n plt.tight_layout(True)\n\n xint = []\n locs, labels = plt.xticks()\n for each in locs:\n xint.append(int(each))\n plt.xticks(xint)\n\n plt.savefig(f'model/this/{modell}.png')\n plt.show()", "def visualize_training(features, labels, pl):\n print(\"Visualizing training\")\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # Take out each feature type, one at a time\n label_map = get_label_map(labels)\n\n for key in label_map.keys():\n like_ind = label_map[key]\n like_data = np.array([features[i] for i in like_ind])\n\n plt.scatter(like_data[:,0],like_data[:,1],label=key)\n\n # get limits\n xmin = features.column_min(0) - .5\n xmax = features.column_max(0) + .5\n ymin = features.column_min(1) - .5\n ymax = features.column_max(1) + .5\n\n plt.xlim(xmin,xmax)\n plt.ylim(ymin,ymax)\n\n # Track the current dividing line, as well as the number of epochs passed\n divider, = plt.plot([],[])\n epoch_tracker = plt.text(-1,.9, '', fontsize=15)\n\n def update(i):\n \"\"\"\n 1.) Get the next set of weights from the tracker\n 2.) Calculate and draw the new divider line\n 3.) Update the epoch counter\n 4.) If we are at the end of an epoch, plot a dashed divider line to track progress\n \"\"\"\n epoch = i//features.instance_count\n w = pl.weights_tracker[i]\n a = pl.accuracy_tracker[epoch]\n divider.set_data([xmin,xmax],[(-xmin * w[0] - w[2]) / w[1], (-xmax * w[0] - w[2]) / w[1]])\n epoch_tracker.set_text(\"{} {}\".format(epoch + 1, a))\n\n # Keep a shadow of the hyperplane at the end of each epoch\n if i % features.instance_count == 0:\n plot_hyperplane(w,xmin,xmax,iter = i, alpha = .3, color='black',linestyle='dashed')\n\n return divider\n\n ani = animation.FuncAnimation(fig, update, frames=range(len(pl.weights_tracker)), interval=250,repeat=False)\n plt.legend()\n\n # optional save file\n if len(sys.argv) >= 3 :\n ani.save(sys.argv[2], writer='imagemagick', fps=5)\n\n plt.show()", "def display_convergence_acc(train_accs, valid_accs):\n if len(valid_accs) > 0:\n plt.plot(len(train_accs), train_accs, color=\"red\")\n plt.plot(len(valid_accs), valid_accs, color=\"blue\")\n plt.legend([\"Train\", \"Valid\"])\n else:\n plt.plot(len(train_accs), train_accs, color=\"red\")\n plt.legend([\"Train\"])\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.show()", "def plot_training_history(history, title_str=''):\n acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n \n epochs = range(len(acc))\n\n plt.plot(epochs, acc, 'r', label='Training accuracy')\n plt.plot(epochs, val_acc, 'b', label='Validation accuracy')\n plt.title('Training and validation accuracy ' + title_str)\n\n ymin = min([min(acc), min(val_acc)])\n\n axes = plt.gca()\n axes.set_xlim([0, len(acc)])\n axes.set_ylim([ymin, 1])\n\n plt.legend(loc=0)\n plt.figure()\n plt.show\n \n plt.plot(epochs, loss, 'r', label='Training loss')\n plt.plot(epochs, loss, 'b', label='Validation loss')\n plt.title('Training and validation loss ' + title_str)\n\n ymin = min([min(loss), min(val_loss)])\n\n axes = plt.gca()\n axes.set_xlim([0, len(loss)])\n axes.set_ylim([ymin, 1])\n\n plt.legend(loc=0)\n plt.show()", "def plots(self, history):\n print(history.history.keys())\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train','test'], loc='upper left')\n plt.show()\n\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train','test'], loc='upper left')\n plt.show()", "def plot_training_history(history, title_str=''):\n acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n \n epochs = range(len(acc))\n\n plt.plot(epochs, acc, 'r', label='Training accuracy')\n plt.plot(epochs, val_acc, 'b', label='Validation accuracy')\n plt.title('Training and validation accuracy ' + title_str)\n\n ymin = min([min(acc), min(val_acc)])\n\n axes = plt.gca()\n axes.set_xlim([0, len(acc)])\n axes.set_ylim([ymin, 1])\n\n plt.legend(loc=0)\n plt.figure()\n plt.show\n \n plt.plot(epochs, loss, 'r', label='Training loss')\n plt.plot(epochs, loss, 'b', label='Validation loss')\n plt.title('Training and validation loss ' + title_str)\n\n ymin = min([min(loss), min(val_loss)])\n\n axes = plt.gca()\n axes.set_xlim([0, len(loss)])\n axes.set_ylim([ymin, 1])\n\n plt.legend(loc=0)\n plt.figure()\n plt.show()", "def show_plots(history):\n loss_vals = history['loss']\n val_loss_vals = history['val_loss']\n epochs = range(1, len(history['accuracy'])+1)\n \n f, ax = plt.subplots(nrows=1,ncols=2,figsize=(16,4))\n \n # plot losses on ax[0]\n ax[0].plot(epochs, loss_vals, color='navy',marker='o', linestyle=' ', label='Training Loss')\n ax[0].plot(epochs, val_loss_vals, color='firebrick', marker='*', label='Validation Loss')\n ax[0].set_title('Training & Validation Loss')\n ax[0].set_xlabel('Epochs')\n ax[0].set_ylabel('Loss')\n ax[0].legend(loc='best')\n ax[0].grid(True)\n \n # plot accuracies\n acc_vals = history['accuracy']\n val_acc_vals = history['val_accuracy']\n\n ax[1].plot(epochs, acc_vals, color='navy', marker='o', ls=' ', label='Training Accuracy')\n ax[1].plot(epochs, val_acc_vals, color='firebrick', marker='*', label='Validation Accuracy')\n ax[1].set_title('Training & Validation Accuracy')\n ax[1].set_xlabel('Epochs')\n ax[1].set_ylabel('Accuracy')\n ax[1].legend(loc='best')\n ax[1].grid(True)\n \n plt.show()\n plt.close()\n \n # delete locals from heap before exiting\n del loss_vals, val_loss_vals, epochs, acc_vals, val_acc_vals", "def plot_loss(training_errors, validation_errors):\n plt.xscale('Log')\n plt.xlabel('Epochs')\n plt.ylabel('Mean Actual Error')\n plt.plot(training_errors, label = \"Training Error\", \\\n color = 'blue')\n plt.plot(validation_errors, label = \"Validation Error\", \\\n color = 'red')\n plt.legend()\n # Saves plot automatically, adjust filename as needed.\n plt.savefig('reservoir_05whdens_100h_7spec_test_3.png')\n plt.show()", "def plot_eval_2(trained_model, image_name):\n # Get training evaluation data\n train_accuracy = trained_model.history['acc']\n train_val_accuracy = trained_model.history['val_acc']\n train_loss = trained_model.history['loss']\n train_val_loss = trained_model.history['val_loss']\n \n # Generate accuracy plot\n epochs = range(len(train_accuracy))\n plt.figure()\n plt.plot(epochs, train_accuracy, 'bo', label='Training accuracy')\n plt.plot(epochs, train_val_accuracy, 'b', label='Validation accuracy')\n plt.title('Training and validation accuracy')\n plt.legend()\n \n # Save accuracy plot\n plot_file = os.path.join(OUTPUT_DIR,\n \"{}_training_accuracy\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')\n \n # Generate loss plot\n plt.figure()\n plt.plot(epochs, train_loss, 'bo', label='Training loss')\n plt.plot(epochs, train_val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n \n # Save loss plot\n plot_file = os.path.join(OUTPUT_DIR, \"{}_training_loss\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')", "def plot_train_results(metrics2record, loss_metric,\n train_metrics, test_metrics):\n pyplot.figure(figsize=(10, 5))\n min_, max_ = np.min(loss_metric), np.max(loss_metric)\n lg, = pyplot.plot(loss_metric)\n pyplot.yticks(min_ + np.arange(5) * (max_ - min_))\n # if learning_rate is not None:\n # lg, = pyplot.plot(learning_rate)\n pyplot.title('Loss')\n pyplot.xlabel('Epoch')\n pyplot.yscale('log')\n pyplot.show()\n\n for prm in basic_metrics:\n if prm in metrics2record:\n leg = []\n met_idx = metrics2record.index(prm)\n pyplot.figure(figsize=(10, 5))\n lg, = pyplot.plot(train_metrics[:, met_idx], label=('train'))\n leg.append(lg)\n lg, = pyplot.plot(test_metrics[:, met_idx], label=('test'))\n leg.append(lg)\n\n pyplot.legend(handles=leg)\n pyplot.title(prm)\n pyplot.xlabel('Epoch')\n pyplot.show()\n\n has_prf = any([(prm in PRF_metrics) for prm in metrics2record])\n if has_prf:\n pyplot.figure(figsize=(10, 5))\n leg = []\n for prm in PRF_metrics:\n if prm in metrics2record:\n met_idx = metrics2record.index(prm)\n lg, = pyplot.plot(train_metrics[:, met_idx],\n label=(prm + ':train'))\n leg.append(lg)\n\n for prm in PRF_metrics:\n if prm in metrics2record:\n met_idx = metrics2record.index(prm)\n lg, = pyplot.plot(test_metrics[:, met_idx],\n label=(prm + ':test'))\n leg.append(lg)\n\n pyplot.legend(handles=leg)\n pyplot.title('Precision / Recall')\n pyplot.xlabel('Epoch')\n pyplot.show()", "def visualize_manipulation_training(flow, epoch, save_dir=None):\n\n # Basic figure setup\n images_x = 3\n images_y = 3 if isinstance(flow.codec, DCN) else 2\n fig = Figure(figsize=(18, 10 / images_x * images_y))\n conf = np.array(flow.fan.performance['confusion'])\n \n # Draw the plots\n ax = fig.add_subplot(images_y, images_x, 1)\n ax.plot(flow.nip.performance['loss']['training'], '.', alpha=0.25)\n ax.plot(helpers.stats.ma_conv(flow.nip.performance['loss']['training'], 0))\n ax.set_ylabel('{} NIP loss'.format(flow.nip.class_name))\n ax.set_title('Loss')\n\n ax = fig.add_subplot(images_y, images_x, 2)\n ax.plot(flow.nip.performance['psnr']['validation'], '.', alpha=0.25)\n ax.plot(helpers.stats.ma_conv(flow.nip.performance['psnr']['validation'], 0))\n ax.set_ylabel('{} NIP psnr'.format(flow.nip.class_name))\n ax.set_title('PSNR')\n ax.set_ylim([30, 50])\n\n ax = fig.add_subplot(images_y, images_x, 3)\n ax.plot(flow.nip.performance['ssim']['validation'], '.', alpha=0.25)\n ax.plot(helpers.stats.ma_conv(flow.nip.performance['ssim']['validation'], 0))\n ax.set_ylabel('{} NIP ssim'.format(flow.nip.class_name))\n ax.set_title('SSIM')\n ax.set_ylim([0.8, 1])\n \n ax = fig.add_subplot(images_y, images_x, 4)\n ax.plot(flow.fan.performance['loss']['training'], '.', alpha=0.25)\n ax.plot(helpers.stats.ma_conv(flow.fan.performance['loss']['training'], 0))\n ax.set_ylabel('FAN loss')\n\n ax = fig.add_subplot(images_y, images_x, 5)\n ax.plot(flow.fan.performance['accuracy']['validation'], '.', alpha=0.25)\n ax.plot(helpers.stats.ma_conv(flow.fan.performance['accuracy']['validation'], 0))\n ax.set_ylabel('FAN accuracy')\n ax.set_ylim([0, 1])\n\n # The confusion matrix\n ax = fig.add_subplot(images_y, images_x, 6)\n ax.imshow(conf, vmin=0, vmax=1)\n\n ax.set_xticks(range(flow.n_classes))\n ax.set_xticklabels(flow._forensics_classes, rotation='vertical')\n ax.set_yticks(range(flow.n_classes))\n ax.set_yticklabels(flow._forensics_classes)\n\n for r in range(flow.n_classes):\n ax.text(r, r, '{:.2f}'.format(conf[r, r]), horizontalalignment='center', color='b' if conf[r, r] > 0.5 else 'w')\n\n ax.set_xlabel('PREDICTED class')\n ax.set_ylabel('TRUE class')\n ax.set_title('Accuracy: {:.2f}'.format(np.mean(np.diag(conf))))\n\n # If the compression model is a trainable DCN, include it's validation metrics\n if images_y == 3:\n ax = fig.add_subplot(images_y, images_x, 7)\n ax.plot(flow.codec.performance['loss']['validation'], '.', alpha=0.25)\n ax.plot(helpers.stats.ma_conv(flow.codec.performance['loss']['validation'], 0))\n ax.set_ylabel('DCN loss')\n\n ax = fig.add_subplot(images_y, images_x, 8)\n ax.plot(flow.codec.performance['ssim']['validation'], '.', alpha=0.25)\n ax.plot(helpers.stats.ma_conv(flow.codec.performance['ssim']['validation'], 0))\n ax.set_ylabel('DCN ssim')\n ax.set_ylim([0.8, 1])\n\n ax = fig.add_subplot(images_y, images_x, 9)\n ax.plot(flow.codec.performance['entropy']['validation'], '.', alpha=0.25)\n ax.plot(helpers.stats.ma_conv(flow.codec.performance['entropy']['validation'], 0))\n ax.set_ylabel('DCN entropy')\n\n if save_dir is not None:\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n fig.savefig('{}/manip_validation_{:05d}.jpg'.format(save_dir, epoch), bbox_inches='tight', dpi=100)\n del fig\n\n else:\n return fig", "def plot_results(self):\n\n\n f1, ax1 = plt.subplots()\n h1, = ax1.plot(self.history[\"step\"], self.history[\"trainLoss\"],\\\n \"b-\", label=\"Loss - Train\")\n h2, = ax1.plot(self.history[\"step\"], self.history[\"validLoss\"],\\\n \"b.\", label=\"Loss - Validation\")\n\n ax1.set_ylabel(\"Loss\", color = \"blue\")\n ax1.tick_params(\"y\", color = \"blue\")\n ax1.yaxis.label.set_color(\"blue\")\n ax1.set_xlabel(\"Training Steps [{}]\".format(self.FLAGS.eval_every))\n\n ax2 = ax1.twinx()\n h3, = ax2.plot(self.history[\"step\"], self.history[\"trainAccr\"], \"r-\",\\\n label = \"Accuracy - Train\")\n h4, = ax2.plot(self.history[\"step\"], self.history[\"validAccr\"], \"r.\",\\\n label = \"Accuracy - Validation\")\n\n ax2.set_ylabel(\"Accuracy\", color = \"red\")\n ax2.tick_params(\"y\", color = \"red\")\n ax2.yaxis.label.set_color(\"red\")\n\n hds = [h1,h2,h3,h4]\n lbs = [l.get_label() for l in hds]\n ax1.legend(hds, lbs)\n f1.tight_layout()\n plt.savefig(\"trainingHistory.png\")\n\n plt.close(f1)\n #plt.show()", "def error_plot(training_costs, test_costs, learning_rate, accuracy, test_accuracy, val_accuracy, layers, data_size,\n n_neighbours, dropout_rate):\n\n plt.plot(training_costs, label=\"Training loss\")\n plt.plot(test_costs, label=\"Test loss\")\n plt.xlabel(\"Iterations\", size='medium')\n plt.ylabel(\"Cost function (%)\", size='medium')\n plt.suptitle(\"Cost function while training the neural network\", size='medium', ha='center')\n plt.title(\"layers: {} with dropout rate of {}, learning rate: {}\".format(layers, dropout_rate, learning_rate),\n size='small', ha='center')\n plt.figtext(0.77, 0.35, \"Training accuracy\\n{0:.2f}%\".format(accuracy), size='medium')\n plt.figtext(0.77, 0.25, \"Test accuracy\\n{0:.2f}%\".format(test_accuracy), size='medium')\n plt.figtext(0.77, 0.15, \"Validation accuracy\\n{0:.2f}%\".format(val_accuracy), size='medium')\n if n_neighbours == 0:\n plt.figtext(0.77, 0.80, \"Neighbours\\nexcluded\", size='medium')\n else:\n plt.figtext(0.77, 0.80, \"{} neighbours\\nincluded\".format(n_neighbours), size='medium')\n plt.figtext(0.77, 0.70, \"{}\\nsamples\".format(data_size))\n plt.legend(loc='right', bbox_to_anchor=(1.39, 0.5))\n plt.subplots_adjust(right=0.75)\n working_dir = os.path.dirname(os.path.abspath(__file__))\n saving(working_dir + \"/output_ANN/error_plots/{}_error_{}\".format(n_neighbours, data_size))", "def plot_errors(loss_train, loss_val, jet):\n plt.plot(list(range(len(loss_train))), loss_train, 'g', label='Training loss')\n plt.plot(list(range(len(loss_val))), loss_val, 'b', label='Validation loss')\n plt.title('Training and Validation loss for jet: {jet}'.format(jet=jet))\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.show()", "def plot_history(history, config):\n\n # Plot training and validation history\n train_acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n train_prec = history.history['precision']\n val_prec = history.history['val_precision']\n train_rec = history.history['recall']\n val_rec = history.history['val_recall']\n train_auc = history.history['auc']\n val_auc = history.history['val_auc']\n\n plt.figure(figsize=(8, 8))\n plt.subplot(2, 2, 1)\n plt.plot(train_acc, label='Training')\n plt.plot(val_acc, label='Validation')\n plt.legend(loc='lower left')\n plt.ylabel('Accuracy')\n plt.ylim([0, 1.0])\n # plt.title('Accuracy')\n\n plt.subplot(2, 2, 2)\n plt.plot(train_prec, label='Training')\n plt.plot(val_prec, label='Validation')\n plt.legend(loc='upper left')\n plt.ylabel('Precision')\n plt.ylim([0, 1.0])\n # plt.title('Training and Validation Precision')\n plt.xlabel('epoch')\n\n plt.subplot(2, 2, 3)\n plt.plot(train_rec, label='Training')\n plt.plot(val_rec, label='Validation')\n plt.legend(loc='upper left')\n plt.ylabel('Recall')\n plt.ylim([0, 1.0])\n # plt.title('Training and Validation Recall')\n\n plt.subplot(2, 2, 4)\n plt.plot(train_auc, label='Training')\n plt.plot(val_auc, label='Validation')\n plt.legend(loc='upper left')\n plt.ylabel('AUC')\n plt.ylim([0, 1.0])\n # plt.title('Training and Validation AUC')\n plt.xlabel('epoch')\n\n plt.savefig(f\"{config['model_label']}.png\")", "def plot_eval_3(trained_model, X_val, y_val, image_name):\n # FOR EACH CLASS\n # val_pred = trained_model.predict_proba(X_val, num_iteration=iteration)\n \n iterations = trained_model.booster_.current_iteration()\n# results = np.zeros((2, iterations))\n results = np.zeros((iterations,))\n for pos in range(iterations):\n \n # Calculate the current iteration (from 1 to iterations)\n iteration = pos + 1\n \n # Predict validation set for the current iteration\n# start_time = timeit.default_timer()\n val_pred = trained_model.predict(X_val, num_iteration=iteration)\n# end_time = timeit.default_timer()\n# time = end_time - start_time\n# speed = int(X_val.shape[0] / time)\n \n # Number of hits\n val_ok = (val_pred == y_val)\n \n # Percentage of hits\n val_acc = val_ok.sum() / val_ok.size\n \n # Actualize data for plotting results\n# results[0][pos] = time\n# results[1][pos] = val_acc\n results[pos] = val_acc\n \n # Generate accuracy plot\n plt.figure()\n# plt.plot(results[0], results[1], 'b')\n plt.plot(results, 'b')\n plt.title('Validation accuracy')\n plt.xlabel('iterations')\n plt.ylabel('accuracy')\n plt.legend()\n \n # Save validation plot\n plot_file = os.path.join(OUTPUT_DIR, \"{}_val_accuracy\".format(image_name))\n plt.savefig(plot_file + \".svg\", bbox_inches='tight', format='svg')", "def plot_on_ax(ax, trn_ls, val_ls, ylabel=\"Accuracy\"):\n ax.plot(trn_ls, 'o-', label='Training')\n ax.plot(val_ls, 'x-', label='Validation')\n ax.set_xlabel('Epochs')\n ax.set_ylabel(ylabel)\n ax.legend()", "def plot_error(self, maxstep=20):\n plt.ion()\n plt.xlabel(\"step\")\n plt.ylabel(\"Ave Logloss (bits)\")\n train_errors = []\n if self.dataset.test:\n test_errors = []\n for i in range(maxstep):\n self.learn(1)\n train_errors.append( sum(self.logloss(tple) for tple in self.dataset.train)\n /len(self.dataset.train))\n if self.dataset.test:\n test_errors.append( sum(self.logloss(tple) for tple in self.dataset.test)\n /len(self.dataset.test))\n plt.plot(range(1,maxstep+1),train_errors,\n label=str(self.num_classes)+\" classes. Training set\")\n if self.dataset.test:\n plt.plot(range(1,maxstep+1),test_errors,\n label=str(self.num_classes)+\" classes. Test set\")\n plt.legend()\n plt.draw()", "def see_evaluation(epoch, training_acc, test_acc):\n print (\"Epoch \", epoch, \"Training acc: \", training_acc*100, \"Test acc: \", test_acc*100)", "def plot(training_losses, validation_losses, epochs, directory_name):\n plt.figure(figsize=(20, 10))\n\n x = np.linspace(1, epochs, epochs)\n training_losses = np.array(training_losses)\n validation_losses = np.array(validation_losses)\n\n plt.title(\"Learning curve over Epochs\")\n\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Average Loss\")\n\n plt.plot(x, training_losses, color='purple', marker=\".\", label='Training loss')\n plt.plot(x, validation_losses, color='orange', marker=\".\", label='Validation loss')\n plt.legend()\n plt.savefig('./' + directory_name + '/Learning_curves-' + str(epochs) + '.png')\n pass", "def plot_loss(x, loss_train, loss_valid, title):\n plt.figure()\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.title(title)\n plt.plot(x, loss_train, '-b', label='Training')\n plt.plot(x, loss_valid, '-r', linestyle=(0, (1, 2)), label='Validation')\n plt.legend([\"Training\", \"Validation\"], loc=\"upper right\", frameon=False)\n plt.yscale(\"log\")\n # plt.show()\n plt.savefig('{}.png'.format(title))", "def plot_history(H, epochs):\n plt.style.use(\"fivethirtyeight\")\n plt.figure()\n plt.plot(np.arange(0, epochs), H.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, epochs), H.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, epochs), H.history[\"accuracy\"], label=\"train_acc\")\n plt.plot(np.arange(0, epochs), H.history[\"val_accuracy\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n plt.tight_layout()\n plt.show()", "def display(self, show_history=False):\n epoch = len(self.history['loss'])\n epochs = [x for x in range(1, epoch + 1)]\n\n fig, axes = plt.subplots(3, 1, sharex=True)\n plt.tight_layout()\n\n axes[0].set_ylabel('Loss')\n axes[0].plot(epochs, self.history['loss'])\n\n axes[1].set_ylabel('Sample Energy')\n axes[1].plot(epochs, self.history['sample_energy'])\n\n axes[2].set_xlabel('Epochs')\n axes[2].set_ylabel('Lr')\n axes[2].set_yscale('log')\n axes[2].plot(epochs, self.history['lr'])\n\n plt.savefig(self.save_path + \"/train_history\", bbox_inches=\"tight\")\n if show_history:\n plt.show(block=True)", "def training_summary(history, model, train_generator, eval_generator):\n nrows, ncols = 2, 3\n fig, axes = plt.subplots(nrows, ncols, figsize=(20, 12))\n fig.suptitle(\"Training Summary\")\n axes = np.ravel(axes)\n\n keys = history.history.keys()\n print(keys)\n\n axes[0].plot(history.history[\"loss\"], label=\"training loss\", c=\"blue\")\n axes[0].plot(history.history[\"val_loss\"], label=\"validation loss\", c=\"green\")\n axes[0].set_xlabel(\"epoch\")\n axes[0].set_ylabel(\"loss\")\n axes[0].legend(loc=\"best\")\n\n axes[1].plot(history.history[\"acc\"], label=\"training acc\", c=\"blue\")\n axes[1].plot(history.history[\"val_acc\"], label=\"validation acc\", c=\"green\")\n axes[1].set_xlabel(\"epoch\")\n axes[1].set_ylabel(\"loss\")\n axes[1].legend(loc=\"best\")\n\n eval_model(model, train_generator, eval_generator, axes[2:])\n plt.show()", "def plot_run_basic():\n # Define files prefix, model variables to plot and title\n str_learning_rate = ['lr_', 'learning_rate', 'Learning rate', 'LR']\n str_f_number = ['f_', 'F', 'Filter number', 'F']\n str_regular = ['reg_param_', 'reg_par', 'Regularization', 'RP']\n\n fig = plt.figure(figsize=(16,12))\n # Plot learning rate\n acc_span, acc_tot, setting_val = CNN.get_acc_run_basic(str_learning_rate[0], str_learning_rate[1])\n CNN.subplot_run_basic(acc_span, acc_tot, setting_val, str_learning_rate[2], str_learning_rate[3], [2,2,1])\n # Plot F number\n acc_span, acc_tot, setting_val = CNN.get_acc_run_basic(str_f_number[0], str_f_number[1])\n CNN.subplot_run_basic(acc_span, acc_tot, setting_val, str_f_number[2], str_f_number[3], [2,2,2])\n # Plot Regularization\n acc_span, acc_tot, setting_val = CNN.get_acc_run_basic(str_regular[0], str_regular[1])\n CNN.subplot_run_basic(acc_span, acc_tot, setting_val, str_regular[2], str_regular[3], [2,2,3])\n plt.suptitle('Validation accuracy - Sweep parameters', fontsize=20)\n plt.show();\n \n # Save as PDF file if wanted\n if DataLoader.SAVE_FIGURE:\n DataLoader.save_plot(fig, 'sweep_parameters_CNN.pdf')", "def show_learning_curve(self):\n\n # Loop output classes\n for c in range(1,self.n_output_classes):\n # Get data\n x_values = np.array(self.n_class_samples_list[c])\n accuracy = np.array(self.accuracy_list[c])\n precision = np.array(self.precision_list[c])\n recall = np.array(self.recall_list[c])\n F1 = np.array(self.F1_list[c])\n\n # Make plot\n with sns.axes_style(\"ticks\"):\n fig,ax = plt.subplots()\n plt.plot([np.min(x_values),np.max(x_values)],[0.5,0.5],\n color='#777777',linestyle='--')\n plt.plot([np.min(x_values),np.max(x_values)],[0.66,0.66],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.8,0.8],\n color='#777777',linestyle=':')\n plt.plot([np.min(x_values),np.max(x_values)],[0.9,0.9],\n color='#777777',linestyle=':')\n\n plt.plot( x_values, accuracy, color='#000000',\n linewidth=1, label='Accuracy' )\n plt.plot( x_values, precision, color='#0000aa',\n linewidth=1, label='Precision' )\n plt.plot( x_values, recall, color='#00aa00',\n linewidth=1, label='Recall' )\n plt.plot( x_values, F1, color='#aa0000',\n linewidth=2, label='F1' )\n\n plt.yticks( [0, 0.5, 0.66, 0.8, 0.9, 1.0],\n ['0','0.5','0.66','0.8','0.9','1.0'], ha='right' )\n plt.xlim(np.max(x_values)*-0.02,np.max(x_values)*1.02)\n plt.ylim(-0.02,1.02)\n plt.xlabel('Number of training samples')\n plt.ylabel('Performance')\n plt.title('Learning curve, class {}'.format(c))\n sns.despine(ax=ax, offset=0, trim=True)\n lgnd = plt.legend(loc=4, ncol=1, frameon=True, fontsize=9)\n lgnd.get_frame().set_facecolor('#ffffff')\n ax.spines['left'].set_bounds(0,1)\n ax.spines['bottom'].set_bounds(np.min(x_values),np.max(x_values))", "def cross_validation_visualization_mean(epochs, means, save=False, filename = \"cross_validation_mean\"):\n plt.plot(epochs, means, marker=\".\", color='b', label='means')\n plt.xlabel(\"epoch\")\n plt.ylabel(\"mean loss\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n if (save):\n plt.savefig(filename)", "def display_convergence_error(train_losses, valid_losses):\n if len(valid_losses) > 0:\n plt.plot(len(train_losses), train_losses, color=\"red\")\n plt.plot(len(valid_losses), valid_losses, color=\"blue\")\n plt.legend([\"Train\", \"Valid\"])\n else:\n plt.plot(len(train_losses), train_losses, color=\"red\")\n plt.legend([\"Train\"])\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()", "def graph(trainingLoss, validationLoss = None):\n style.use('fivethirtyeight')\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n ax1.cla()\n if validationLoss is not None:\n ax1.plot(np.array(range(len(trainingLoss))) + 1, validationLoss, label=\"Validation loss\")\n# print('Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}'.format(len(trainingLoss), trainingLoss[-1], validationLoss[-1]))\n# else:\n# print('Epoch: {} \\tTraining Loss: {:.6f}'.format(len(trainingLoss), trainingLoss[-1]))\n ax1.plot(np.array(range(len(trainingLoss))) + 1, trainingLoss, label=\"Training loss\")\n plt.legend(loc='best')\n plt.tight_layout()\n plt.show()", "def plot_loss (history):\n \n history_dict = history.history\n loss_values = history_dict['loss']\n val_loss_values = history_dict['val_loss']\n epochs = range(1, len(loss_values) + 1)\n\n plt.plot (epochs, loss_values, 'bo', label='Training loss')\n plt.plot (epochs, val_loss_values, 'b', label=\"validation loss\")\n plt.title('Training and validation loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.show()", "def plot_loss():\n df = pd.read_csv('data/loss.csv', encoding='utf-8')\n loss = df['loss'].values\n val_loss = df['val_loss'].values\n x = [i for i in range(1, len(loss) + 1)]\n\n plt.plot(x, loss, label='Train loss')\n plt.plot(x, val_loss, label='Val loss')\n\n plt.xlabel('Epochs')\n plt.ylabel('Contrastive Loss')\n plt.title('Train and test loss')\n plt.grid(True)\n plt.legend(shadow=True, fontsize='x-large')\n\n plt.show()", "def cross_validation_experiment(train_data, train_labels):\n accuracies = []\n for i in range(1, 200):\n avg = cross_validation(train_data, train_labels, i, 10)\n accuracies.append(avg)\n fig = plt.figure()\n dim = np.arange(1,len(accuracies)+1)\n plt.plot(dim,accuracies, label='Accuracy')\n plt.xlabel('k')\n plt.ylabel('accuracy')\n plt.grid()\n plt.legend()\n plt.tight_layout()\n fig.savefig('knn_cross_validation.png')\n best_k = np.argmax(accuracies)+1\n return best_k", "def accuracy_plot(LS_sizes, data_fun):\r\n\r\n opt_neigh = []\r\n\r\n #plot of optimal n_neighbors as a function of the LS size\r\n\r\n for size in LS_sizes:\r\n\r\n acc = []\r\n neighbors_values = np.arange(1,size+1,1)\r\n\r\n # For a given LS size, plots of accuracy(n_neighbors)\r\n\r\n for value in neighbors_values:\r\n\r\n X_train, y_train, X_test, y_test = data_fun(n_ts=500, n_ls=size)\r\n\r\n clf = KNeighborsClassifier(n_neighbors = value)\r\n clf = clf.fit(X_train, y_train)\r\n acc.append(clf.score(X_test,y_test))\r\n\r\n plt.figure()\r\n plt.plot(neighbors_values,acc, '.')\r\n plt.title(\"Evolution of accuracy as a function \\nof n_neighbors for LS_size = {} samples, for {}.\".format(size, data_fun.__name__))\r\n plt.savefig(\"acc(n_neigh)_{}_{}.pdf\".format(size, data_fun.__name__))\r\n\r\n opt_neigh.append(np.argmax(acc)+1)\r\n\r\n plt.figure()\r\n plt.plot(LS_sizes, opt_neigh, '.')\r\n plt.title(\"Optimal n_neighbors as a function \\nof the size of the learning sample, for {}.\".format(data_fun.__name__))\r\n plt.savefig(\"opt_n_neigh(LS_size)_{}.pdf\".format(data_fun.__name__))", "def plot(self, ylog=False, category=\"Accuracy\", figsize=(12, 5)):\n if self.CV == False: # no Cross Validation set case\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n plt.suptitle(\"Training Curve for \" + self.loss, fontsize=12)\n ax[0].plot(range(1, len(self.trainError) + 1), self.trainError, 'g-', label='Training Error')\n ax[0].set_xlabel('Iteration')\n ax[0].set_ylabel(\"Error\")\n if ylog == True:\n ax[0].set_yscale('log')\n ax[0].legend()\n ax[0].grid('on')\n\n if category == \"Accuracy\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), self.trainAcc, 'r-', label='Training Accuracy')\n ax[1].set_ylabel(\"Accuracy\")\n elif category == \"Error Rate\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), 1 - np.array(self.trainAcc), 'r-', label='Training Error Rate')\n ax[1].set_ylabel(\"Error Rate\")\n # ax[1].set_ylim((0, 1))\n ax[1].set_xlabel('Iteration')\n ax[1].legend(loc='best')\n ax[1].grid('on')\n plt.show()\n if self.CV == True: # has Cross Validation set case\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)\n plt.suptitle(\"Training Curve for \" + self.loss, fontsize=12)\n ax[0].plot(range(1, len(self.trainError) + 1), self.trainError, 'g-', label='Training Error')\n ax[0].plot(range(1, len(self.cvError) + 1), self.cvError, 'r-', label='CV Error')\n ax[0].set_xlabel('Iteration')\n ax[0].set_ylabel(\"Error\")\n if ylog == True:\n ax[0].set_yscale('log')\n ax[0].legend()\n ax[0].grid('on')\n\n if category == \"Accuracy\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), self.trainAcc, 'g-', label='Training Accuracy')\n ax[1].plot(range(1, len(self.cvAcc) + 1), self.cvAcc, 'r-', label='CV Accuracy')\n ax[1].set_ylabel(\"Accuracy\")\n elif category == \"Error Rate\":\n ax[1].plot(range(1, len(self.trainAcc) + 1), 1 - np.array(self.trainAcc), 'g-', label='Training Error Rate')\n ax[1].plot(range(1, len(self.cvAcc) + 1), 1 - np.array(self.cvAcc), 'r-', label='CV Error Rate')\n ax[1].set_ylabel(\"Error Rate\")\n # ax[1].set_ylim((0, 1))\n ax[1].set_xlabel('Iteration')\n ax[1].legend(loc='best')\n ax[1].grid('on')\n plt.show()\n\n return fig, ax", "def Plot_loss(history_object): \n ### print the keys contained in the history object\n print(history_object.history.keys())\n print(history_object.history['loss'])\n print(history_object.history['val_loss'])\n\n ### plot the training and validation loss for each epoch\n plt.plot(history_object.history['loss'])\n plt.plot(history_object.history['val_loss'])\n plt.title('model mean squared error loss')\n plt.ylabel('mean squared error loss')\n plt.xlabel('epoch')\n plt.legend(['training set', 'validation set'], loc='upper right')\n plt.show()", "def summarize_diagnostics(self):\n # plot loss\n pyplot.subplot(211)\n pyplot.title('Cross Entropy Loss')\n pyplot.plot(self.history.history['loss'], color='blue', label='train')\n pyplot.plot(self.history.history['val_loss'], color='orange', label='test')\n # plot accuracy\n pyplot.subplot(212)\n pyplot.title('Classification Accuracy')\n pyplot.plot(self.history.history['accuracy'], color='blue', label='train')\n pyplot.plot(self.history.history['val_accuracy'], color='orange', label='test')\n # save plot to file\n pyplot.savefig(f'{self.project_home / \"o\"}/{self.model.name}_plot.png')\n pyplot.close()", "def create(self, train: List[float], validation: List[float]) -> None:\n self.ax.plot(train)\n self.ax.plot(validation)\n self.ax.set_xlabel('epochs')\n if self.loss:\n self.ax.set_ylabel('loss')\n else:\n self.ax.set_ylabel('accuracy')\n self.ax.legend(['train', 'validation'])", "def acc_loss_graph(accuracies, losses):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))\n\n for experiment_id in accuracies.keys():\n ax1.plot(accuracies[experiment_id], label=experiment_id)\n ax1.legend()\n ax1.set_title('Validation Accuracy')\n fig.tight_layout()\n\n for experiment_id in accuracies.keys():\n ax2.plot(losses[experiment_id], label=experiment_id)\n ax2.legend()\n ax2.set_title('Validation Loss');\n\n fig.tight_layout()", "def loss_plot(train_loss, val_loss, filename):\n\tplt.plot(train_loss)\n\tplt.plot(val_loss)\n\tplt.ylabel('Loss')\n\tplt.xlabel('Epochs')\n\tplt.legend(['Train', 'Val'], loc='upper right')\n\tplt.savefig(filename)\n\tplt.close()", "def show_training_history(self):\n hist = [i.history[\"loss\"][0] for i in self.history]\n plt.plot(hist)", "def plot_loss(stats):\r\n plt.plot(stats['train_loss_ind'], stats['train_loss'], label='Training loss')\r\n plt.plot(stats['val_loss_ind'], stats['val_loss'], label='Validation loss')\r\n plt.legend()\r\n plt.xlabel('Number of iterations')\r\n plt.ylabel('Loss')\r\n plt.show()", "def plot_data(x, y, epochs):\n\n fig = plt.figure()\n ax = fig.gca()\n\n ax.set_ylim(0, int(np.max(y)+0.5))\n ax.set_xlim(0, np.max(x))\n ax.yaxis.grid(True)\n ax.grid(which='minor', axis='x', alpha=0.2)\n ax.grid(which='major', axis='x', alpha=0.5)\n major_ticks = np.arange(0, np.max(x), 88)\n minor_ticks = np.arange(0, np.max(x), 16)\n ax.set_xticks(major_ticks)\n ax.set_xticks(minor_ticks, minor=True)\n\n fig.canvas.draw()\n labels = [\"{:2d}\".format(int(int(item.get_text())/88)) for item in ax.get_xticklabels()]\n ax.set_xticklabels(labels)\n\n plt.title(\"Model Loss over {} Epochs\".format(epochs))\n plt.scatter(x, y, s=50, alpha=0.5, label='cross_entropy')\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.legend(loc='upper right')\n plt.show()", "def graph_results(loss, acc):\n N = len(loss)\n x = np.linspace(0, N, N)\n plt.subplot(1,2,1)\n plt.plot(x, loss)\n plt.subplot(1,2,2)\n plt.plot(x,acc)\n plt.show()", "def plot_loss_metrics(history_file):\n history = pickle.load(open(history_file, \"rb\"))\n loss, metric, val_loss, val_metric = islice(history.keys(), 4)\n n_epochs = len(history[loss])\n\n plt.style.use(\"ggplot\")\n fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(13, 8))\n\n ax1.set_title(loss)\n ax1.plot(np.arange(1, n_epochs + 1), history[loss], label='train')\n ax1.plot(np.arange(1, n_epochs + 1), history[val_loss], label='test')\n ax1.legend()\n\n ax2.set_title(metric)\n ax2.plot(np.arange(1, n_epochs + 1), history[metric], label='train')\n ax2.plot(np.arange(1, n_epochs + 1), history[val_metric], label='test')\n ax2.set_xlabel('Epochs')\n ax2.set_xlim((1, n_epochs + 1))\n xa = ax2.get_xaxis()\n xa.set_major_locator(MaxNLocator(integer=True))\n ax2.legend()\n plt.savefig(history_file + '.png')\n plt.show()", "def plot_loss_acc(history, aucs, model_path=None):\n # summarize history for accuracy\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.savefig(path.join(model_path, 'accuracy.png'))\n plt.gcf().clear()\n # summarize history for loss\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.plot(aucs)\n plt.title('model loss, ROC AUC')\n plt.ylabel('loss, ROC AUC')\n plt.xlabel('epoch')\n plt.legend(['train', 'test', 'ROC AUC'], loc='upper left')\n plt.savefig(path.join(model_path, 'loss.png'))", "def plot(self, epochs, title=\"Learning Rate Schedule\"):\n lrs = [self(i) for i in epochs]\n\n # plot the learning rate schedule\n plt.style.use(\"ggplot\")\n plt.figure()\n plt.plot(epochs, lrs)\n plt.title(title)\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Learning Rate\")\n plt.close()", "def visualization(epochs, mse_tr, mse_te):\n plt.semilogx(epochs, mse_tr, marker=\".\", color='b', label='train error')\n plt.semilogx(epochs, mse_te, marker=\".\", color='r', label='test error')\n plt.xlabel(\"k\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation\")", "def plot_history(history):\n\n fig, axs = plt.subplots(2)\n\n # create accuracy sublpot\n axs[0].plot(history.history[\"accuracy\"], label=\"train accuracy\")\n axs[0].plot(history.history[\"val_accuracy\"], label=\"validation accuracy\")\n axs[0].set_ylabel(\"Accuracy\")\n axs[0].legend(loc=\"lower right\")\n axs[0].set_title(\"Accuracy eval\")\n\n # create error sublpot\n axs[1].plot(history.history[\"loss\"], label=\"train error\")\n axs[1].plot(history.history[\"val_loss\"], label=\"validation error\")\n axs[1].set_ylabel(\"Error\")\n axs[1].set_xlabel(\"Epoch\")\n axs[1].legend(loc=\"upper right\")\n axs[1].set_title(\"Error eval\")\n\n plt.show()", "def plot_fit_history(fit_history_obj):\r\n plt.plot(fit_history_obj.history['loss'])\r\n plt.plot(fit_history_obj.history['val_loss'])\r\n plt.title('model mean squared error loss')\r\n plt.ylabel('mean squared error loss')\r\n plt.xlabel('epoch')\r\n plt.legend(['training set', 'validation set'], loc='upper right')\r\n plt.show()", "def plot_loss_acc(name,score):\n plt.title(name)\n plt.xlabel('Epoch Number')\n plt.ylabel(name.split(sep=' ')[1])\n plt.plot(score)\n plt.savefig(\"graphs/\"+name+\".png\")", "def _plot_train_test_experiment(mtrain, mval, metric_name, isState):\n # axes\n f, axes = plt.subplots(2,2,figsize=(12,10))\n ltrain = _plot_experiment(mtrain, axes[:,0], metric_name, isTrain=True)\n lval = _plot_experiment(mval, axes[:,1], metric_name, isTrain=False)\n # title\n target = \"State\" if isState else \"Output\"\n f.suptitle(f\"{target} Errors\")\n f.tight_layout()\n return f, axes", "def plot_learning_curve(X_train_all, X_val_all, y_train_all, y_val_all, train_sizes, title):\n\n errors_df = pd.DataFrame(columns = ['train_size', 'train_acc', 'val_acc'])\n\n # Loop through example sizes and get the training and validation error\n for train_size in train_sizes:\n # Select Subset of Data\n X_train = X_train_all[:train_size]\n X_val = X_val_all[:train_size]\n y_train = y_train_all[:train_size]\n y_val = y_val_all[:train_size]\n\n # Initialize Model\n model = svm.SVC(kernel='linear')\n\n # Fit model\n print(f\"Training {title} using {train_size} examples\")\n model.fit(X_train, y_train)\n\n # Get Predictions \n train_pred = model.predict(X_train)\n val_pred = model.predict(X_val)\n\n # Get Accuracy Score for X_Train and X_Val\n errors = pd.DataFrame({\n 'train_size': [train_size],\n 'train_acc': [accuracy_score(y_train, train_pred)],\n 'val_acc': [accuracy_score(y_val, val_pred)]\n })\n \n # Concatenate Dataframes\n errors_df = pd.concat([errors_df, errors])\n\n # Plot Learning Curve\n fig, ax = plt.subplots()\n\n errors_df.plot(x='train_size', y='train_acc',kind='line', ax=ax)\n errors_df.plot(x='train_size', y='val_acc',kind='line', color='red', ax=ax)\n\n ax.set_xlabel(\"Training Size\")\n ax.set_ylabel(\"Accuracy\")\n ax.set_title(title)\n\n # Save Figure\n plt.savefig('figs/' + title + '_learning_curve.png')", "def cross_validation_visualization(lambdas, loss_train, loss_test):\n plt.semilogx(lambdas, loss_train, marker=\".\", color='b', label='train error')\n plt.semilogx(lambdas, loss_test, marker=\".\", color='r', label='test error')\n plt.xlabel(\"lambda\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_mse\")", "def create_val_plots(x_vals, vals_zeros,vals_ones):\n plt.plot(x_vals, vals_zeros,label=\"non-fraud\")\n plt.plot(x_vals, vals_ones,label=\"fraud\")\n plt.title('Accuracy per number of iterations')\n plt.xlabel('Number of Iterations')\n plt.ylabel('Accuracy')\n plt.xticks(np.arange(100, 210, 10))\n plt.legend() \n plt.show()\n # plt.savefig('./analysis_deliverable/visualizations/accuracy_plot.png')", "def graph_ACC(history,title):\n _, ax = plt.subplots()\n ax.set_title(title)\n try:\n ax.plot(history.history['acc'], label='Train')\n ax.plot(history.history['val_acc'], label='Test')\n except:\n ax.plot(history['acc'], label='Train')\n ax.plot(history['val_acc'], label='Test')\n ax.set_xlabel(\"Epochs\")\n ax.set_ylabel(\"Accuracy\")\n ax.legend()", "def plot_data(losses, accuracies, name):\n # convert accuracies to percentages\n accuracies['Train'] = [acc * 100 for acc in accuracies['Train']]\n accuracies['Valid'] = [acc * 100 for acc in accuracies['Valid']]\n # set fontsize\n plt.rcParams.update({'font.size': 13})\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(8,5))\n ax1.set_xlabel('Number of Epochs')\n ax1.set_ylabel('Cross Entropy Loss')\n ax1.set_ylim(0,2)\n ax1.plot(losses['Train'], label='Training')\n ax1.plot(losses['Valid'], label='Validation')\n ax1.legend(loc='upper right')\n\n ax2.set_xlabel('Number of Epochs')\n ax2.set_ylabel('Accuracy (%)')\n ax2.set_ylim(0,100)\n ax2.plot(accuracies['Train'], label='Training')\n ax2.plot(accuracies['Valid'], label='Validation')\n ax2.legend(loc='upper left')\n\n fig.tight_layout()\n fig.savefig('../outputs/' + name)", "def plot_loss(history, name):\n\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n # plt.show()\n plt.savefig(name, format=\"png\")", "def plot_train_or_test_error_graphs(summary, current_iter, save_path, config_dict):\n plot_testing_now = any(current_iter % item == 0 for item in config_dict['test_every'])\n\n plot_training_now = plot_testing_now or (current_iter % 500 == 0 and current_iter<5000)\n\n if plot_training_now or plot_testing_now:\n plot_path = os.path.join(save_path, 'plots')\n if not os.path.exists(plot_path):\n os.makedirs(plot_path)\n \n if plot_training_now:\n plot_file_name = os.path.join(plot_path, 'train_val_loss_{:06d}.png'.format(current_iter))\n plot_train_info_iteration(summary, current_iter, plot_file_name, config_dict)\n if plot_testing_now:\n plot_file_name = os.path.join(plot_path, 'train_val_loss_list_{:06d}.png'.format(current_iter))\n plot_test_info_iteration(summary, current_iter, plot_file_name, config_dict)", "def run_model(x_train, y_train, x_test, y_test, plot_type, epochs, size):\n # Create the sequential model from keras (CNN)\n\n #TODO: Add batch normalization layers\n # Add the layers to the sequential model\n model = Sequential()\n\n model.add(Conv2D(64, kernel_size=3, use_bias=False,\n input_shape=(size, size, 1)))\n model.add(BatchNormalization())\n model.add(Activation(\"relu\"))\n\n model.add(Conv2D(32, kernel_size=3, use_bias=False))\n model.add(BatchNormalization())\n model.add(Activation(\"relu\"))\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n #model.add(Dropout(0.25))\n\n model.add(Flatten())\n\n model.add(Dense(128, use_bias=False, kernel_regularizer=regularizers.l2(\n 0.01)))\n model.add(BatchNormalization())\n model.add(Activation(\"relu\"))\n\n model.add(Dense(64, use_bias=False, kernel_regularizer=regularizers.l2(\n 0.01)))\n model.add(BatchNormalization())\n model.add(Activation(\"relu\"))\n\n model.add(Dropout(0.5))\n\n model.add(Dense(10, activation='softmax'))\n\n #keras.utils.plot_model(model, to_file='test_keras_plot_model.png',show_shapes=True)\n\n #TODO: Add in a lower learning rate - 0.001\n adam = optimizers.adam(lr=0.001)\n model.compile(optimizer=adam, loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n # Training the model\n number_of_epochs = epochs\n\n history = model.fit(x_train, y_train, validation_data=(x_test, y_test),\n epochs=number_of_epochs, verbose=1)\n\n # summarize history for accuracy\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.savefig(\"History_for_Accuracy\")\n plt.show()\n # summarize history for loss\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.savefig(\"History_for_Loss\")\n plt.show()\n model.save('models/' + str(number_of_epochs) + '_epochs.h5')", "def plot_curve(self):\n x1 = np.arange(self.init_epoch, self.params.num_epoch+1, dtype=np.int).tolist()\n x2 = np.linspace(self.init_epoch, self.epoch,\n num=(self.epoch-self.init_epoch)//self.params.val_every+1, dtype=np.int64)\n plt.plot(x1, self.train_loss, label='train_loss')\n plt.plot(x2, self.val_loss, label='val_loss')\n plt.legend(loc='best')\n plt.title('Train/Val loss')\n plt.grid()\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()", "def plot_error(class_incorreto):\n epochs = np.arange(1, num_iter + 1)\n plt.plot(epochs, class_incorreto)\n plt.xlabel('Iterações')\n plt.ylabel('Classificados incorretamente')\n plt.show()", "def train(self, epochs):\n print('Starting training...')\n print('\\n{:13} '\n '{:>17} '\n '{:^38}'\n ''.format('', '--- Training ---', '--- Validation ---'))\n print('{:4} {:>8} '\n '{:>8} {:>8} '\n '{:>8} {:>8} {:>8} {:>8}'\n ''.format('', '', 'Loss', 'Acc', 'Loss', 'Prc', 'Rec', 'Acc'))\n training_time = 0\n for epoch in range(1, epochs + 1):\n start_time = time.time()\n trn_stats = self.__progress(self.training, self.__train_fn)\n val_stats = self.__progress(self.validation, self.__val_fn)\n elapsed_time = time.time() - start_time\n training_time += elapsed_time\n print('{:>4} {:>7.2f}s '\n '{:>8.3f} {:>8.1%} '\n '{:>8.3f} {:>8.1%} {:>8.1%} {:>8.1%}'\n ''.format(epoch, elapsed_time,\n trn_stats[0], trn_stats[-1],\n *val_stats))\n self.history.append([epoch] + list(trn_stats) + list(val_stats))\n self.report['epochs'] = epochs\n self.report['time_per_epoch'] = training_time / epochs", "def plot_metric_values(self, threshold=0):\n epochs_range = np.arange(threshold, len(self.accuracies), 1)\n plt.plot(epochs_range, self.accuracies[threshold:], color='red', marker='o')\n plt.title('Accuracy on test data. Eta={:.2f} Lambda={:2.2f}'.format(self.eta, self.lambda_r))\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.grid(True)\n plt.show()", "def draw_keras_history(history, output_dir='.', output_name='loss.pdf'):\n\n fig = plt.figure(1, figsize=(6, 6), dpi=300)\n fig.clear()\n\n # Create an axes instance\n ax = fig.add_subplot(111)\n\n if 'loss' in history.history.keys():\n training_losses = history.history['loss']\n epochs = np.arange(0, len(training_losses))\n l1 = ax.plot(epochs, training_losses, '-', color='#8E2800', lw=2, label=\"Training loss\")\n \n if 'val_loss' in history.history.keys():\n validation_losses = history.history['val_loss']\n epochs = np.arange(0, len(validation_losses))\n l2 = ax.plot(epochs, validation_losses, '-', color='#468966', lw=2, label=\"Validation loss\")\n\n ax.set_xlabel(\"Epochs\")\n ax.set_ylabel(\"Loss\")\n \n # training_acc = history.history['acc']\n # validation_acc = history.history['val_acc']\n\n # ax2 = ax.twinx()\n # l3 = ax2.plot(epochs, training_acc, '--', color='#8E2800', lw=2, label=\"Training accuracy\")\n # l4 = ax2.plot(epochs, validation_acc, '--', color='#468966', lw=2, label=\"Validation accuracy\")\n # ax2.set_ylabel(\"Accuracy\")\n\n ax.margins(0.05)\n # ax2.margins(0.05)\n\n fig.set_tight_layout(True)\n\n # lns = l1 + l2 + l3 + l4\n #lns = l1 + l2\n #labs = [l.get_label() for l in lns]\n #ax.legend(lns, labs, loc='best', numpoints=1, frameon=False)\n ax.legend(loc='best', numpoints=1, frameon=False)\n\n fig.savefig(os.path.join(output_dir, output_name))\n\n plt.close()", "def plot_train_and_valid_curves(ax, train_points, valid_points, learning_rate_updates_epoch, best_per_lr, mode=\"loss\"):\n if mode==\"loss\":\n name = \"Loss\"\n names = \"losses\"\n factor = [1.2, 1.22]\n loc_legend = 1\n elif mode ==\"acc\":\n name = \"Accuracy\"\n names = \"acc\"\n factor = [0.9, 0.88]\n loc_legend = 4\n else:\n print \"Mode not understood. Available modes : 'loss' and 'acc'\"\n return\n\n #ax = plt.subplot(1,1,1)#\n # Plot training and valid loss curves\n ax.plot(np.arange(len(train_points)),train_points, c=\"k\", zorder=1)\n ax.plot(np.arange(len(valid_points)),valid_points, c=\"k\", zorder=1)\n ax.scatter(np.arange(len(train_points)),train_points, c=\"b\", label=\"Train %s\"%names, zorder=2)\n ax.scatter(np.arange(len(valid_points)),valid_points, c=\"r\", label=\"Valid %s\"%names, zorder=2)\n # Plot vertical line when the learning rate was updated\n first = True\n for elem in learning_rate_updates_epoch:\n if first:\n plt.plot([elem-.5,elem-.5], [1.4*valid_points[elem],train_points[elem]*0.6], c=\"k\", label=\"LR updates\", linestyle=\"--\")\n first = False\n else:\n plt.plot([elem-.5,elem-.5], [1.4*valid_points[elem],train_points[elem]*0.6], c=\"k\", linestyle=\"--\")\n # Plot best model in each region\n first = True\n for i,elem in enumerate(best_per_lr):\n if first:\n x = elem[0]\n y = elem[1]\n plt.scatter(x,y, c=\"g\", label=\"Best models\", marker=\"*\", zorder=3, s=100)\n plt.plot([x,x],[y,factor[0]*y], c=\"g\")\n plt.text(x,factor[1]*y, \"Epoch %d\"%(x), fontsize=8)\n first = False\n else:\n x = elem[0]+learning_rate_updates_epoch[i-1]\n y = elem[1]\n plt.scatter(x,y, c=\"g\", marker=\"*\", zorder=3, s=100)\n plt.plot()\n plt.plot([x,x],[y,factor[0]*y], c=\"g\")\n plt.text(x,factor[1]*y, \"Epoch %d\"%(x), fontsize=8)\n # Xlim, Ylim, labels, legend...\n ax.set_ylim([0,1])\n ax.set_xlim([0,len(train_points)+5])\n ax.set_xlabel(\"Epochs\")\n ax.set_ylabel(name)\n handles,labels = ax.get_legend_handles_labels()\n sorted_zip = sorted(zip([2,0,1,3],handles, labels))\n index, handles, labels = zip(*sorted_zip)\n ax.legend(handles,labels, loc=loc_legend, prop={'size':10})", "def loss_plotter(model_history, ax=None):\n \n import matplotlib.pyplot as plt\n import seaborn as sns\n\n training_loss = model_history['loss']\n \n test_loss = model_history['val_loss']\n\n epoch_count = range(1,len(training_loss)+1)\n \n sns.set(font_scale=1.15)\n \n ax = sns.lineplot(\n x=epoch_count,\n y=training_loss,\n ax=ax\n )\n \n ax = sns.lineplot(\n x=epoch_count,\n y=test_loss,\n ax=ax\n )\n\n ax.set_title(\n 'Loss Curves: Pre-Trained VGG-16 with 2 Trained Layers',\n fontsize=19\n )\n ax.set_ylabel(\n 'Loss',\n fontsize=18\n )\n ax.set_xlabel(\n 'Epochs',\n fontsize=18\n )\n\n plt.legend(['Training Loss', 'Validation Loss'])\n plt.show()", "def _show_learning_rate():\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6.4 * 2, 4.8))\n\n # Visualize c_prime\n c_prime_list = np.linspace(1, 100, num=11)\n x_label = f\"c'\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[0]\n x_list = c_prime_list\n\n # MNIST\n y_list = [161, 16, 14, 15, 20, 21, 24, 27, 30, 30, 35]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [63, 12, 12, 15, 18, 19, 22, 25, 26, 28, 30]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [1297, 724, 221, 80, 52, 51, 54, 54, 52, 60, 60]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n # Visualize t0\n t0_list = np.linspace(1, 100, num=11)\n x_label = f\"t0\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[1]\n x_list = t0_list\n\n # MNIST\n y_list = [16, 16, 16, 16, 16, 17, 16, 16, 16, 16, 16]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [765, 765, 767, 772, 772, 773, 789, 789, 793, 796, 799]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n plt.show()" ]
[ "0.8396251", "0.8177083", "0.8090178", "0.79906076", "0.79561436", "0.77318394", "0.77222484", "0.76527697", "0.76454616", "0.7617511", "0.7601171", "0.75999707", "0.75929874", "0.7586753", "0.7510082", "0.7505088", "0.7487265", "0.74864775", "0.74474126", "0.7441814", "0.74150574", "0.7407079", "0.74018466", "0.7398399", "0.73971367", "0.738876", "0.73624384", "0.7359866", "0.7359056", "0.7342758", "0.7340326", "0.7317557", "0.7305682", "0.72943145", "0.72923994", "0.7289882", "0.72759783", "0.7239037", "0.72295433", "0.72283965", "0.7224092", "0.7217437", "0.7213826", "0.7208396", "0.7204813", "0.71998537", "0.7194708", "0.7191559", "0.7170421", "0.7119945", "0.7109121", "0.7107993", "0.71037424", "0.7087601", "0.7087567", "0.70860654", "0.70450044", "0.7030621", "0.7002688", "0.7002524", "0.699866", "0.6976159", "0.69570297", "0.69466335", "0.6944987", "0.694338", "0.69401795", "0.69309294", "0.69308215", "0.6925416", "0.6904436", "0.6894827", "0.6872885", "0.6857435", "0.68533146", "0.6840751", "0.683541", "0.6834451", "0.68254393", "0.6824352", "0.6815716", "0.68077403", "0.67872083", "0.67871773", "0.6783893", "0.6765224", "0.6756314", "0.6742386", "0.67349327", "0.67314065", "0.67107594", "0.67107296", "0.6704104", "0.6697292", "0.66911143", "0.6684408", "0.6679939", "0.66783553", "0.6675993", "0.66625893", "0.6662337" ]
0.0
-1
Refresh the contents of the projects.
def refresh(self): metadata = project_scrape(self.url) if metadata: if not self.done: self.progress.set_fraction(metadata['percent_raised']) self.progress.set_text(metadata['pretty_percent']) self.progress.set_show_text(True) self.pledged.set_text(metadata['pledged']) self.backers.set_text(metadata['backers']) self.updates.set_label(metadata['updates']) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def project_refresh_all():\n project_list = Project.objects()\n analyser.add_repos(current_user.username, [repo.project_name for repo in project_list])\n flash('Refresh all successfully!', 'success')\n return redirect(url_for('main.admin_manage'))", "def update_projects(self):\n self._read_directory()\n print(self._filenames)\n for filename in self._filenames:\n project = self._make_project(self._read_project(filename))\n self.projects.append(\n (int(project.get_id()), project)\n )\n self.projects = sorted(self.projects, reverse=True)", "def refresh_details(self) -> None:\n data = request(\n 'get',\n f'/api/v0/projects/{self.id}/',\n ).json()\n self.data.update(data)", "def project_refresh(project_name):\n if not db_find_project(project_name):\n abort(404)\n analyser.add_repos(current_user.username, [project_name])\n return redirect(url_for('main.admin_manage'))", "def update_image_all_projects(self):\n projects = Project.objects.all(temporary=False)\n for project in projects:\n project.update_image()", "def refresh(self):\n self.update_from_file()\n self.update_from_env()", "def updateProjects(request):\n\n updater = ProjectUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")", "def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")", "def _page_projects(self):\n return self._open(self.app.page_projects)", "def run(self):\n self.update_repos()", "def _update_projects_watch(self, new_projects_list):\n persistent_update_project = retry_children_watch_coroutine(\n '/appscale/projects', self.update_projects\n )\n main_io_loop = IOLoop.instance()\n main_io_loop.add_callback(persistent_update_project, new_projects_list)", "def refresh(self):\n\t\tif self.id is None:\n\t\t\tprint(\"({cls}): self.id is None, can't refresh\".format(cls=self.__class__.__name__))\n\t\t\treturn\n\n\t\tif self.needs_project_id and self.project_id is None:\n\t\t\tprint(\"({cls}): self.project_id is None, can't refresh\".format(cls=self.__class__.__name__))\n\t\t\treturn\n\n\t\tif self.needs_project_id:\n\t\t\targs = [self.project_id, self.id]\n\t\telse:\n\t\t\targs = [self.id]\n\n\t\tres = getattr(self._client, \"get_\" + self.method)(*args, raw=True)\n\t\tself._create_fields(res)", "def refresh(self):\n self.config.read(self.filename)\n self.loadRecentFiles()", "def projects(self, projects):\n\n self._projects = projects", "def project_updated_handler(event):\n project = event.obj\n cache_manager.refresh(project)", "def refresh(self):\n pass", "def refresh(self):\n pass", "def update_project(self, name):\n self._log.info(\"Updating project: {}\".format(name))\n if name in self.projects:\n pass\n else:\n self.add_project(name)", "def refresh(self):\n self.__refresh()", "def projects(update_db=False):\n try:\n if not os.path.isfile(\n app.config.get(\"DATA_DIR\") + \"/figs/projects/project_screen.html\"\n ):\n update(update_db=update_db)\n\n with open(\n app.config.get(\"DATA_DIR\") + \"/figs/projects/project_screen.html\", \"r\"\n ) as f:\n whiteboard = f.read()\n\n return whiteboard\n\n except:\n return traceback.format_exc()", "def refresh(self):\n self.Refresh()", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def Refresh(self):\n pass", "def update(self):\n update_url = f'{self._tower.api}/projects/{self.id}/update/'\n response = self._tower.session.post(update_url)\n if not response.ok:\n self._logger.error(f\"Error updating the project '{self.name}'. response was: {response.text})\")\n return response.json() if response.ok else {}", "def repo_refresh_for_unfinished():\n project_list = Project.objects()\n crawl_list = []\n for repo in project_list:\n if repo.analyser_progress != \"100%\":\n crawl_list.append(repo.project_name)\n analyser.add_repos(current_user.username, crawl_list)\n flash('Refresh for unfinished successfully!', 'success')\n return redirect(url_for('main.admin_manage'))", "def update_projects(self, new_projects_list):\n to_stop = [project for project in self if project not in new_projects_list]\n for project_id in to_stop:\n self[project_id].stop()\n del self[project_id]\n\n for project_id in new_projects_list:\n if project_id not in self:\n self[project_id] = ProjectManager(self.zk_client, project_id,\n self.callback)", "def project_overview(project_name):\n if not db_find_project(project_name):\n abort(404)\n\n _project = Project.objects(project_name=project_name).first()\n # _forks = ProjectFork.objects(project_name=project_name, file_list__ne=[], total_changed_line_number__ne=0)\n _forks = ProjectFork.objects(project_name=project_name, total_changed_line_number__ne=0)\n\n # TODO _all_tags could be opted by AJAX\n _all_tags = {}\n if current_user.is_authenticated:\n _project_tags = ForkTag.objects(project_name=project_name, username=current_user.username)\n for tag in _project_tags:\n _all_tags[tag.fork_full_name] = tag.tags\n\n if current_user.is_authenticated:\n print('View: ', current_user.username, project_name)\n\n return render_template('project_overview.html', project=_project, forks=_forks, all_tags=_all_tags)", "def update_project_documents(self, manifest_info):\n\n for proj_name, proj_info in manifest_info.projects.items():\n # See if project document already is in the database and extract\n # for updating if so, otherwise create a new dictionary for\n # population\n key_name = f'project:{proj_name}'\n\n try:\n project_data = self.db.get_document(key_name)\n except cbdatabase_db.NotFoundError:\n project_data = dict(\n type='project', key_=key_name, name=proj_name\n )\n\n remote, repo_url = \\\n manifest_info.get_project_remote_info(proj_name)\n\n if 'remotes' in project_data:\n if remote in project_data['remotes']:\n if repo_url not in project_data['remotes'][remote]:\n project_data['remotes'][remote].append(repo_url)\n else:\n project_data['remotes'][remote] = [repo_url]\n else:\n project_data['remotes'] = {remote: [repo_url]}\n\n self.db.upsert_documents({key_name: project_data})", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\n raise NotImplementedError", "def apache_projects():\n display = Display(visible=0, size=(800, 800)) \n display.start()\n # path to where I have chrome driver installed\n path_to_chromedriver = '/usr/local/bin/chromedriver'\n # initialize the driver\n driver = webdriver.Chrome(executable_path=path_to_chromedriver)\n # go to the apache projects page\n driver.get('https://projects.apache.org/projects.html')\n # wait for the list of projects to load\n time.sleep(2)\n\n # get the HTML element with id list\n elem = driver.find_element_by_id('list')\n project_list = elem.text.split(\"\\n\")\n # initialize an instance of Projects\n projects = Projects()\n\n for i in range(1, len(project_list) + 1):\n # Get the url of each project\n project_xpath = '//*[@id=\"list\"]/ul/li[%d]/a' %i\n # Get the HTML element that for the current project\n project_link = driver.find_element_by_xpath(project_xpath)\n project_name = project_link.text\n\n # Open the project page\n driver.get(project_link.get_attribute(\"href\"))\n # Wait for project page to load\n time.sleep(0.5)\n\n inception = get_inception(driver)\n description = get_description(driver, project_name)\n\n # get the name without \"Apache\", make it lowercase, and add dashes\n stripped_name = \"-\".join(project_name.lower().split(\" \")[1:]).encode('utf-8')\n github_mirror = \"http://github.com/apache/\" + stripped_name\n\n # see if there's anything at the github url that was generated\n resp = httplib2.Http().request(github_mirror, 'HEAD')\n # this means the github repo with the parsed url doesn't exist\n if int(resp[0]['status']) >= 400:\n github_mirror = \"N/A\"\n\n # Add extra attributes to the JSON\n description[\"github\"] = github_mirror\n description[\"company\"] = \"Apache Software Foundation\"\n description[\"name\"] = project_name\n description[\"day\"] = inception[\"day\"]\n description[\"month\"] = inception[\"month\"]\n description[\"year\"] = inception[\"year\"]\n\n projects.add(project_name, description)\n\n # Reset the driver\n driver.get('https://projects.apache.org/projects.html')\n time.sleep(0.8)\n\n return projects", "def refresh(self):\n raise NotImplementedError(\"To be implemented\")", "def resync_domains_projects(self):\n pass", "def updateProjectList(self, i):\n\n cl = self.dlg.countryListWidget\n self.cv = cl.currentText()\n if self.dataPath is not None:\n prjPath = os.path.join(self.dataPath, self.countryList[self.cv])\n self.dlg.listWidget.clear()\n if os.path.exists(prjPath):\n for f in os.listdir(prjPath):\n if f.endswith(\".qgs\"):\n f = f.replace(\".qgs\", \"\")\n f = f.replace(\"_\", \" \")\n f = f.title()\n item = QListWidgetItem(f)\n self.dlg.listWidget.addItem(item)\n else:\n msg = (\"Selected data path does not have any country sub-directories. \"\n \"Please select the directory where the country projects are saved.\")\n QMessageBox.about(self.dlg, \"PacSAFE project folders missing\", msg)", "def refresh(self):\n\t\tself.driver.refresh()", "def refresh(self):\n\n assets_model = self.data[\"model\"][\"assets\"]\n assets_model.clear()\n\n has = {\"children\": False}\n\n project = io.ObjectId(os.environ[\"MINDBENDER__PROJECT\"])\n assets = io.find({\"type\": \"asset\", \"parent\": project})\n for asset in sorted(assets, key=lambda i: i[\"name\"]):\n item = QtWidgets.QListWidgetItem(asset[\"name\"])\n item.setData(QtCore.Qt.ItemIsEnabled, True)\n item.setData(DocumentRole, asset)\n assets_model.addItem(item)\n has[\"children\"] = True\n\n if not has[\"children\"]:\n item = QtWidgets.QListWidgetItem(\"No assets found\")\n item.setData(QtCore.Qt.ItemIsEnabled, False)\n assets_model.addItem(item)\n\n assets_model.setFocus()\n assets_model.setCurrentRow(0)\n self.data[\"button\"][\"load\"].hide()\n self.data[\"button\"][\"stop\"].hide()", "def main(root: Path = typer.Argument(Path.cwd(), help=\"Root path to look in\")):\n msg.info(f\"Updating projects.jsonl in {root}\")\n entries = []\n # We look specifically for project directories\n for path in root.glob(f\"**/*/{PROJECT_FILE}\"):\n path = path.parent\n\n # prep data for the json file\n config = load_project_config(path)\n entry = {\"shortname\": f\"{path.parent.name}/{path.name}\"}\n entry[\"title\"] = config[\"title\"]\n entry[\"description\"] = config.get(\"description\", \"\")\n entries.append(entry)\n\n with open(\"projects.jsonl\", \"w\", encoding=\"utf-8\") as jsonfile:\n for entry in entries:\n jsonfile.write(json.dumps(entry))\n jsonfile.write(\"\\n\")", "def refresh(self):\n self.log_info(f\"Browser.refresh: Refreshing the page\")\n self.CORE.refresh()\n return", "def refresh(self, new_content):\n pass", "def get_projects_data():\n wcscanner_path = context.__BASE_PATH__ + '/.wcscanner'\n\n data = []\n for project in os.listdir(wcscanner_path):\n if (os.path.isdir(os.path.join(wcscanner_path, project))):\n update_project_data(project)\n project_path = '{}/{}'.format(wcscanner_path, project)\n f = open('{}/.project'.format(project_path), 'r')\n data.append(json.load(f))\n f.close()\n return data", "def reload(self):\n if len(self.files) > 0:\n self.load(self.files, regfiles=self.regions)", "def update_project_data(project_name):\n project_path = context.__PROJECTS_PATH__+ '/' + project_name\n f = open(project_path+'/.project', 'r')\n project_data = json.load(f)\n f.close()\n\n image_count = len(os.listdir(project_path)) - 2\n\n if image_count > 0:\n\n img = Image.open('{}/{}.jpg'.format(project_path, image_count-1))\n img = img.resize((640,480))\n buffered = BytesIO()\n img.save(buffered, format=\"JPEG\")\n img_str = base64.b64encode(buffered.getvalue()).decode('ascii')\n\n project_data['preview_data'] = img_str\n project_data['size'] = round(int(subprocess.check_output(['du', project_path, '-k']).split()[0]) / 1000,2)\n\n with open('{}/.project'.format(project_path), 'w') as config_file:\n json.dump(project_data, config_file, indent=4)\n config_file.close()", "def refresh(self):\n\t\tself.win.refresh()\n\t\tfor c in self.components:\n\t\t\tc.refresh()", "def command_refresh_repo(self):\n repoinit.refresh(*self.args())", "def get_projects(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/projects\").json()", "def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")", "def project_updates(self):\n return self._tower.project_updates.filter({'project': self.id})", "def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()", "def close(self):\n\n if not self.__projects:\n return\n\n Console.debug(\"Closing session...\")\n Console.indent()\n\n for project in self.__projects:\n project.close()\n\n self.__projects = None\n\n Console.outdent()", "def test_projects_get(self):\n response = self.client.open('/project-tracker/projects',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def open(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/open\"\n\n _response = self.connector.http_call(\"post\", _url)\n\n # Update object\n self._update(_response.json())", "def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects", "def refresh_dialog(self):\n self._client.update_elements()", "def projects(self):\r\n return p.Projects(self)", "def refresh(self):\n self.details = self.workspace.get_job(self.id).details", "def project():", "def project():", "def project():", "def refresh(self): \n return self._config.refreshObj(self)", "def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )", "def sync():\n _ownered_project = []\n _tmp_project_list = get_user_repo_list(current_user.username)\n if _tmp_project_list:\n for project in _tmp_project_list:\n _ownered_project.append((project, project))\n # Add upperstream_repo\n upperstream_repo = get_upperstream_repo(project)\n if upperstream_repo is not None:\n _ownered_project.append((upperstream_repo, upperstream_repo + \"(Upperstream of %s)\" % project))\n\n User.objects(username=current_user.username).update_one(set__owned_repo_sync_time=datetime.utcnow())\n\n # mongoDB don't support key value contains '.'\n for i in range(len(_ownered_project)):\n _ownered_project[i] = (_ownered_project[i][0].replace('.', '[dot]'), _ownered_project[i][1])\n User.objects(username=current_user.username).update_one(set__owned_repo=dict(_ownered_project))\n\n flash('Refresh your own GitHub repositories list successfully!', 'success')\n return redirect(url_for('main.load_from_github'))", "def get_projects(self, refresh=False):\n if refresh:\n self._projects_lookup = self.get_project_lookup()\n\n return self._projects_lookup.keys()", "def reload(self):\n\n pass", "def refresh(self):\n\n for w in self.windows.values():\n w.refresh()", "def __gitPull(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitPull(self.project.getProjectPath()) or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Pull\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def reload(self):", "def reload(self):", "def scan(self):\n\n # Check for whether session is still alive\n if not self.__projects:\n return\n\n Console.info(\"Scanning projects...\")\n Console.indent()\n\n for project in self.__projects:\n project.scan()\n\n for postscan in self.__postscans:\n postscan()\n\n Console.outdent()", "def test_projects_patch(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='PATCH',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def refresh(self):\n self._refresh_method()", "def projects():\n \n if 'username' in session:\n current_user = mongo.db.user.find_one({'username': session['username']}) \n projects = mongo.db.projects.find().sort('date',pymongo.DESCENDING)\n return render_template('pages/projects.html', title='Projects', projects=projects, current_user=current_user)\n \n flash('Please login to view user projects.', 'warning')\n return redirect(url_for('login'))", "def refresh_all(self) -> None:\n self._update_thread.force_refresh_folder(self.feed_cache)", "def open_project(self, project_path):\n self.clear()\n self.open_directory(project_path)\n if self.show_libraries:\n self.add_libraries()", "def all(cls):\r\n projects_url = 'https://www.pivotaltracker.com/services/v3/projects'\r\n response = _perform_pivotal_get(projects_url)\r\n\r\n root = ET.fromstring(response.text)\r\n if root is not None:\r\n return [Project.from_node(project_node) for project_node in root]", "def onRefreshRepositoryRuns(self, data, projectid):\n self.runsDialog.initializeTests(listing=data )", "def project(self, value):\n\n if self._project != value:\n self._project = value\n self._update_page()", "def do_project_update(cs, args):\n raise NotImplementedError", "def ConnectToProject(self):\r\n \r\n # Remove old connections\r\n for proj, signal in self._project_connections:\r\n if not proj.destroyed():\r\n proj.Disconnect(signal)\r\n self._project_connections = []\r\n \r\n # Connect to new project\r\n app = wingapi.gApplication\r\n proj = app.GetProject()\r\n self._project_connections.append((proj, proj.Connect('files-added', self.__CB_ProjectChanged, proj)))\r\n self._project_connections.append((proj, proj.Connect('files-removed', self.__CB_ProjectChanged, proj)))\r\n \r\n self.ScheduleUpdate()", "def project_list(self):\n try:\n ids = self.request[api.DATA][api.DATA][\"ids\"]\n return self._get_keystone_projects(ids)\n except Exception as e:\n LOG.exception(\"Error occurred: %s\" % e)", "def update_project(self):\n\n modules = self.project.pyqt_modules\n\n modules[:] = [name for name, b in self._buttons.items()\n if b.explicitly_required]", "def refresh_from_api(self):\n self.populate_from_api(self.get_from_api())", "def all(cls):\n projects_url = 'https://www.pivotaltracker.com/services/v5/projects'\n root = _perform_pivotal_get(projects_url)\n if root is not None:\n return [Project.from_json(project_node) for project_node in root]", "def _on_del_project(self):\n project = self.ddnCurProject.get()\n# if len(project) > 0:\n if project:\n if '.prj'!= project[-4:]:\n project += '.prj'\n if os.path.exists(self.BibTerm + '/'+ project):\n os.remove(self.BibTerm + '/'+ project)\n self.list_projects = [f.rstrip('.prj') \\\n for f in os.listdir(self.BibTerm) \\\n if f.endswith('.prj')]\n self.ddnCurProject['values'] = self.list_projects\n# if len(self.list_projects) > 0:\n if self.list_projects:\n self.ddnCurProject.set(self.list_projects[0])\n else:\n self.ddnCurProject.set('')\n pass", "def refresh_parcel_repos(self):\n if self.api_client.api_version < 'v16':\n logger.warning('Detected API version without support '\n 'for refreshParcelRepos (%s). Sleeping instead ...',\n self.api_client.api_version)\n sleep(30)\n else:\n return self.api_client.refresh_parcel_repos()", "def projectOpened(self):\n for editor in self.editors:\n editor.projectOpened()\n \n self.__editProjectPwlAct.setEnabled(True)\n self.__editProjectPelAct.setEnabled(True)", "def refresh( self, refreshIndexes=True ):\r\n try:\r\n self.oodocument.refresh()\r\n except:\r\n pass\r\n if refreshIndexes:\r\n #I needed the table of contents to automatically update in case page contents had changed\r\n try:\r\n #get all document indexes, eg. toc, or index\r\n oIndexes = self.oodocument.getDocumentIndexes()\r\n for x in range( 0, oIndexes.getCount() ):\r\n oIndexes.getByIndex( x ).update()\r\n except:\r\n pass", "def test_list_projects(self):\n pass", "def test_list_projects(self):\n pass", "def getprojects(self):\n resp = self.conn.request('GET', self.URLS['allprojects'], dict(api_key=self.api_key))\n data = resp.data.decode('utf-8')\n jdata = json.loads(data)['projects']\n # Convert nested JSON documents\n for project_index in range(len(jdata)):\n for field in ('options_json', 'templates_json'):\n jdata[project_index][field] = json.loads(jdata[project_index][field])\n # Pass project details dictionaries to constructors, return array\n return [PhProject(self, project) for project in jdata]", "def populate_projects(self, projects_folder):\n projects = helpers.get_folders(projects_folder)\n for p in projects:\n self._combo.addItem(p)\n\n if self.default_project in projects:\n index = self._combo.findText(self.default_project,\n QtCore.Qt.MatchFixedString)\n if index >= 0:\n self._combo.setCurrentIndex(index)", "def _loadProjects(self):\n logger.debug(\"Func: _loadProjects\")\n\n if not os.path.isfile(self._pathsDict[\"projectsFile\"]):\n return\n else:\n projectsData = self._loadJson(self._pathsDict[\"projectsFile\"])\n if projectsData == -2:\n return -2\n return projectsData", "def onProjectChangedInRuns(self, projectName):\n projectId = self.iRepo.remote().getProjectId(project=projectName)\n\n # rest call\n RCI.instance().listingTests(projectId=projectId, forSaveAs=False, forRuns=True)", "def test_update_project(self):\n pass", "def test_update_project(self):\n pass", "def refresh(self):\r\n self.metadata = self.db.read(self.path).json()", "def resume(self):\n\n Console.info(\"Resuming session...\")\n\n for project in self.__projects:\n project.resume()", "def set_recent_projects_menu(self):\n self.menuRecent_projects.clear()\n for project_file_path in self.recent_projects:\n action = QAction(self, visible=False, triggered=self.open_project_activated)\n action.setText(project_file_path)\n action.setVisible(True)\n self.menuRecent_projects.addAction(action)", "def scan_all_projects():\n from projectscanner import ProjectScanner\n #ensure that new items get scanned by setting their last scan time to this.\n needs_scan_time = datetime.now() - timedelta(days=30)\n\n s = ProjectScanner()\n for projectinfo in s.scan_all():\n projectinfo.save_receipt(needs_scan_time)" ]
[ "0.76175606", "0.7572136", "0.75084645", "0.69303507", "0.65593135", "0.6468245", "0.64175284", "0.6360456", "0.63405234", "0.6282787", "0.6265902", "0.6220813", "0.6210531", "0.6204316", "0.6168462", "0.6165451", "0.6165451", "0.6127819", "0.6123018", "0.6102209", "0.6101717", "0.6074755", "0.6074755", "0.6074755", "0.60475147", "0.5989671", "0.59885186", "0.5978159", "0.59729546", "0.59547836", "0.59472036", "0.59472036", "0.59098834", "0.58661056", "0.58360046", "0.58274335", "0.58273447", "0.5799052", "0.5796843", "0.5794128", "0.57812953", "0.57787025", "0.5770689", "0.57433814", "0.57243097", "0.5716006", "0.57105726", "0.5687727", "0.56705034", "0.56543493", "0.5654037", "0.5652414", "0.5648525", "0.56480134", "0.56402034", "0.56383246", "0.563649", "0.5618916", "0.5618916", "0.5618916", "0.56037587", "0.5603435", "0.5587943", "0.5579766", "0.55672526", "0.5543536", "0.5542839", "0.5530606", "0.5530606", "0.55260015", "0.5519987", "0.5504589", "0.5502035", "0.5499964", "0.54992706", "0.5477044", "0.547113", "0.5467325", "0.54584414", "0.54574156", "0.54561955", "0.5444844", "0.5440451", "0.5437052", "0.5431097", "0.5425519", "0.54204494", "0.54113", "0.540214", "0.540214", "0.53939104", "0.53861773", "0.53753316", "0.53752387", "0.537348", "0.537348", "0.53677464", "0.53544724", "0.53530765", "0.5342528" ]
0.69102895
4
Refresh the project countdown for each project. If the project has expired, move the ProjBox to the completed tab.
def refresh_time(container): now = datetime.utcnow().replace(microsecond=0) for widget in container.get_children(): if widget in win.default_texts: continue if widget.end_date > now: widget.left.set_text(str(widget.end_date - now)) else: widget.left.set_text('Done!') widget.done = True win.complete.pack_start(widget, False, False, 0) container.remove(widget) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def poll_advanced(self):\r\n osName = platform.system()\r\n\r\n ## Check if user updated project name\r\n try:\r\n ## Check if user updated project name\r\n checkName = self.widgetList[3].get()\r\n if checkName != self.newProj.name:\r\n if kT.check_proj_name(checkName):\r\n self.newProj.name = checkName\r\n else:\r\n self.newProj.name = None\r\n if self.prevName != checkName:\r\n tkMessageBox.showinfo(\"Invalid Project Name\",\\\r\n \"No spaces or special characters.\")\r\n self.prevName = checkName\r\n kT.debug_log(\"Invalid name\")\r\n except AttributeError:\r\n kT.debug_log(\"AttributeError\", sys.exc_info()[2])\r\n return\r\n\r\n self._retLoop = self.after(250, self.poll_advanced)", "def refresh(self):\n\n metadata = project_scrape(self.url)\n if metadata:\n if not self.done:\n self.progress.set_fraction(metadata['percent_raised'])\n self.progress.set_text(metadata['pretty_percent'])\n self.progress.set_show_text(True)\n self.pledged.set_text(metadata['pledged'])\n self.backers.set_text(metadata['backers'])\n self.updates.set_label(metadata['updates'])\n\n return True", "def repo_refresh_for_unfinished():\n project_list = Project.objects()\n crawl_list = []\n for repo in project_list:\n if repo.analyser_progress != \"100%\":\n crawl_list.append(repo.project_name)\n analyser.add_repos(current_user.username, crawl_list)\n flash('Refresh for unfinished successfully!', 'success')\n return redirect(url_for('main.admin_manage'))", "def switch_project(project):\n # Get the data\n project = project.lower()\n lines, finished, last_project = parse_file(project=None)\n line1, i1, last1, _, times1 = parse_line(lines, last_project, finished)\n line2, i2, _, new2, times2 = parse_line(lines, project, True)\n now = datetime.now()\n\n # Format the data\n if not finished:\n punch1 = now - last1\n times1.append(punch1)\n punch1 = punch1.total_seconds()\n total1 = sum(t.total_seconds() for t in times1)\n total2 = sum(t.total_seconds() for t in times2)\n now = now.strftime(TIMEF)\n\n # Modifying the lines for the file\n lines[1] = HEADER1 + project\n if not finished:\n\n # Clock-Out\n line1[-1] += IN_OUT_SEP + now\n line1[1] = fnum(total1)\n line1 = PUNCH_SEP.join(line1)\n lines[i1] = line1\n\n # Clock-In\n line2.append(now)\n line2 = PUNCH_SEP.join(line2)\n if new2:\n lines.append(line2)\n else:\n lines[i2] = line2\n\n # Write to file\n with open(PUNCHES_PATH, 'w+') as f:\n f.write('\\n'.join(lines))\n\n # Report\n if new2:\n print(f\"Created Project: '{project}'\")\n if finished:\n print(f\"CURRENTLY CLOCKED OUT, Project Switched From: '{last_project}', To: '{project}'\")\n print(f\"NOW: {now}\")\n print(f\"'{last_project}' Total Hrs: {fnum(total1)}\")\n print(f\"'{project}' Total Hrs: {fnum(total2)}\")\n else:\n print(f\"CLOCK OUT, Project: '{last_project}'\")\n print(f\"CLOCK IN, Project: '{project}'\")\n print(f\"'{last_project}' IN: {last1.strftime(TIMEF)}, NOW: {now}\")\n print(f\"'{last_project}' Total Hrs: {fnum(total1)}, Current Punch: {fnum(punch1)}\")\n print(f\"'{project}' Total Hrs: {fnum(total2)}\")", "def refresh_details(self) -> None:\n data = request(\n 'get',\n f'/api/v0/projects/{self.id}/',\n ).json()\n self.data.update(data)", "def project_refresh_all():\n project_list = Project.objects()\n analyser.add_repos(current_user.username, [repo.project_name for repo in project_list])\n flash('Refresh all successfully!', 'success')\n return redirect(url_for('main.admin_manage'))", "def _on_del_project(self):\n project = self.ddnCurProject.get()\n# if len(project) > 0:\n if project:\n if '.prj'!= project[-4:]:\n project += '.prj'\n if os.path.exists(self.BibTerm + '/'+ project):\n os.remove(self.BibTerm + '/'+ project)\n self.list_projects = [f.rstrip('.prj') \\\n for f in os.listdir(self.BibTerm) \\\n if f.endswith('.prj')]\n self.ddnCurProject['values'] = self.list_projects\n# if len(self.list_projects) > 0:\n if self.list_projects:\n self.ddnCurProject.set(self.list_projects[0])\n else:\n self.ddnCurProject.set('')\n pass", "def project_refresh(project_name):\n if not db_find_project(project_name):\n abort(404)\n analyser.add_repos(current_user.username, [project_name])\n return redirect(url_for('main.admin_manage'))", "def apache_projects():\n display = Display(visible=0, size=(800, 800)) \n display.start()\n # path to where I have chrome driver installed\n path_to_chromedriver = '/usr/local/bin/chromedriver'\n # initialize the driver\n driver = webdriver.Chrome(executable_path=path_to_chromedriver)\n # go to the apache projects page\n driver.get('https://projects.apache.org/projects.html')\n # wait for the list of projects to load\n time.sleep(2)\n\n # get the HTML element with id list\n elem = driver.find_element_by_id('list')\n project_list = elem.text.split(\"\\n\")\n # initialize an instance of Projects\n projects = Projects()\n\n for i in range(1, len(project_list) + 1):\n # Get the url of each project\n project_xpath = '//*[@id=\"list\"]/ul/li[%d]/a' %i\n # Get the HTML element that for the current project\n project_link = driver.find_element_by_xpath(project_xpath)\n project_name = project_link.text\n\n # Open the project page\n driver.get(project_link.get_attribute(\"href\"))\n # Wait for project page to load\n time.sleep(0.5)\n\n inception = get_inception(driver)\n description = get_description(driver, project_name)\n\n # get the name without \"Apache\", make it lowercase, and add dashes\n stripped_name = \"-\".join(project_name.lower().split(\" \")[1:]).encode('utf-8')\n github_mirror = \"http://github.com/apache/\" + stripped_name\n\n # see if there's anything at the github url that was generated\n resp = httplib2.Http().request(github_mirror, 'HEAD')\n # this means the github repo with the parsed url doesn't exist\n if int(resp[0]['status']) >= 400:\n github_mirror = \"N/A\"\n\n # Add extra attributes to the JSON\n description[\"github\"] = github_mirror\n description[\"company\"] = \"Apache Software Foundation\"\n description[\"name\"] = project_name\n description[\"day\"] = inception[\"day\"]\n description[\"month\"] = inception[\"month\"]\n description[\"year\"] = inception[\"year\"]\n\n projects.add(project_name, description)\n\n # Reset the driver\n driver.get('https://projects.apache.org/projects.html')\n time.sleep(0.8)\n\n return projects", "def pause(self):\n\n Console.info(\"Pausing session...\")\n\n for project in self.__projects:\n project.pause()", "def update_projects(self):\n self._read_directory()\n print(self._filenames)\n for filename in self._filenames:\n project = self._make_project(self._read_project(filename))\n self.projects.append(\n (int(project.get_id()), project)\n )\n self.projects = sorted(self.projects, reverse=True)", "def onClose(self, evt):\n if self.project:\n self.project.save()\n if config.TIMER:\n config.TIMER.stop_task(\"TOTALTIME\")\n config.TIMER.dump()\n for fn in Project.closehook:\n fn()\n evt.Skip()", "def resume(self):\n\n Console.info(\"Resuming session...\")\n\n for project in self.__projects:\n project.resume()", "def update_projects(self, new_projects_list):\n to_stop = [project for project in self if project not in new_projects_list]\n for project_id in to_stop:\n self[project_id].stop()\n del self[project_id]\n\n for project_id in new_projects_list:\n if project_id not in self:\n self[project_id] = ProjectManager(self.zk_client, project_id,\n self.callback)", "def submit_data(self):\n\n database = Database()\n project_data = []\n\n project_entries = [\"\",\n \"\",\n \"\",\n self.proj_date.get(),\n self.proj_descrpt.get(),\n self.proj_estdatest.get(),\n self.proj_estdateend.get(),\n self.proj_estbudget.get(),\n self.proj_actdatest.get(),\n self.proj_actdateend.get(),\n self.proj_actcost.get()]\n\n index = 0\n num_filled = 0\n for item in project_entries:\n if item == \"\":\n project_entries[index] = None\n else:\n num_filled += 1\n index += 1\n\n cus_name = self.customer_name.get()\n\n if num_filled == 0 and cus_name == \"\":\n ErrorMessageWindow(\"You have to fill in at least one argument!\")\n else:\n # If a customer name is provided.\n if cus_name != \"\":\n customer_data = database.query_customer(cus_name=cus_name)\n if customer_data:\n project_entries[1] = customer_data[0][0]\n project_data = self.multi_project(database.query_project(\n project_query_options=project_entries))\n else:\n ErrorMessageWindow(\"No customer with this name found.\")\n else:\n project_data = self.multi_project(database.query_project(\n project_query_options=project_entries))\n\n if project_data:\n schedule_data = database.query_project_tasks(\n project_data=project_data)\n customer_data = database.query_customer(project_data[0][1])\n\n region_data = database.query_region(\n region_id=customer_data[0][1])\n\n # Project schedule window definition.\n ps_window = tkinter.Tk()\n ps_window.wm_title(\"Project Schedule Display\")\n tkinter.Label(\n ps_window, text=\"Project Information:\"\n ).grid()\n\n # Display project information.\n tkinter.Label(\n ps_window,\n text=\"Project ID: {}\".format(project_data[0][0]),\n ).grid(\n pady=5, column=0, row=1\n )\n tkinter.Label(\n ps_window,\n text=\"Description: {}\".format(project_data[0][4]),\n ).grid(\n pady=5, column=1, row=1\n )\n tkinter.Label(\n ps_window,\n text=\"Company: {}\".format(customer_data[0][2]),\n ).grid(\n pady=5, column=0, row=2\n )\n tkinter.Label(\n ps_window,\n text=\"Contract Date: {}\".format(project_data[0][3]),\n ).grid(\n pady=5, column=1, row=2\n )\n tkinter.Label(\n ps_window,\n text=\"Region: {}\".format(region_data[0][1]),\n ).grid(\n pady=5, column=2, row=2\n )\n tkinter.Label(\n ps_window,\n text=\"Start Date: {}\".format(project_data[0][5]),\n ).grid(\n pady=5, column=0, row=3\n )\n tkinter.Label(\n ps_window,\n text=\"End Date: {}\".format(project_data[0][6]),\n ).grid(\n pady=5, column=1, row=3\n )\n tkinter.Label(\n ps_window,\n text=\"Budget: ${}\".format(project_data[0][7]),\n ).grid(\n pady=5, column=2, row=3\n )\n\n # Schedule table definition.\n p_s_view = tkinter.ttk.Treeview(ps_window)\n p_s_view.grid(pady=10, column=1, row=5)\n\n p_s_view[\"show\"] = \"headings\"\n p_s_view[\"columns\"] = (\n \"Start Date\", \"End Date\", \"Task Description\",\n \"Skill(s) Required\", \"Quantity Required\"\n )\n\n # Table column headings.\n for heading in p_s_view[\"columns\"]:\n p_s_view.heading(heading, text=heading)\n p_s_view.column(heading, width=250)\n\n # Load data into table.\n for item in schedule_data:\n p_s_view.insert('', 'end', values=item)\n else:\n ErrorMessageWindow(\"No project found with given info.\")", "def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")", "def updateProjects(request):\n\n updater = ProjectUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")", "def refresh(self):\n self.details = self.workspace.get_job(self.id).details", "def project(self, value):\n\n if self._project != value:\n self._project = value\n self._update_page()", "def project_updated_handler(event):\n project = event.obj\n cache_manager.refresh(project)", "def XeprGUIrefresh(self):\n with self._lock:\n self._API.XeprRefreshGUI()", "def updateProjectList(self, i):\n\n cl = self.dlg.countryListWidget\n self.cv = cl.currentText()\n if self.dataPath is not None:\n prjPath = os.path.join(self.dataPath, self.countryList[self.cv])\n self.dlg.listWidget.clear()\n if os.path.exists(prjPath):\n for f in os.listdir(prjPath):\n if f.endswith(\".qgs\"):\n f = f.replace(\".qgs\", \"\")\n f = f.replace(\"_\", \" \")\n f = f.title()\n item = QListWidgetItem(f)\n self.dlg.listWidget.addItem(item)\n else:\n msg = (\"Selected data path does not have any country sub-directories. \"\n \"Please select the directory where the country projects are saved.\")\n QMessageBox.about(self.dlg, \"PacSAFE project folders missing\", msg)", "def refresh(self) :\n if not self.running:\n self.running = True\n self.strip.show()\n self.running = False\n self.refreshTimer.expired = True\n self.refreshTimer.isrunning = False", "def _update_projects_watch(self, new_projects_list):\n persistent_update_project = retry_children_watch_coroutine(\n '/appscale/projects', self.update_projects\n )\n main_io_loop = IOLoop.instance()\n main_io_loop.add_callback(persistent_update_project, new_projects_list)", "def process_project(self, project_name):\n self.logging.debug('Retrieving project %s..', project_name)\n\n try:\n project = self.get_lp_client().projects[project_name]\n except KeyError:\n self.logging.error(\n \"Project %s wasn't found. Skipped..\",\n project_name\n )\n else:\n if project:\n self.logging.debug(\n 'Retrieving active milestone %s..',\n self.get_new_milestone_name()\n )\n\n new_milestone = project.getMilestone(\n name=self.get_new_milestone_name()\n )\n self.get_stats()[project.name] = {}\n\n for old_milestone_name in self.get_old_milestone_names():\n if self.is_limit_achived():\n break\n\n self.process_milestone_on_project(\n project, old_milestone_name, new_milestone\n )\n\n else:\n self.logging.debug(\n \"Project %s wasn't found. Skipped..\",\n project_name\n )", "def update_project(self, name):\n self._log.info(\"Updating project: {}\".format(name))\n if name in self.projects:\n pass\n else:\n self.add_project(name)", "def update_operations_count(self):\n project = get_current_project()\n if project is not None:\n fns, sta, err, canceled = self.flow_service.get_operation_numbers(project.id)\n project.operations_finished = fns\n project.operations_started = sta\n project.operations_error = err\n project.operations_canceled = canceled\n add2session(KEY_PROJECT, project)", "def update_gui(self, *args):\r\n\r\n # List of widgets that will be disabled if choosing to clone a project\r\n disWidgetList = [3, 10, 11, 12, 13, 14, 15, 16, 17, 19, 20, 21, 22, 23, 25, 29, 34]\r\n\r\n try:\r\n self.localSDK.get_version()\r\n except IOError:\r\n kT.debug_log('IO Error', sys.exc_info()[2])\r\n try:\r\n self.newProj.name = self.widgetList[3].get()\r\n except IndexError:\r\n kT.debug_log('Index Error', sys.exc_info()[2])\r\n \r\n self.newProj.setKsdkPath(self.localSDK.path)\r\n self.newProj.sdkVer = self.localSDK.version\r\n\r\n labelFont = 'Arial 9 bold'\r\n\r\n if self.prevProjType != self.advancedProjType.get():\r\n if self.advancedProjType.get():\r\n if len(self.widgetList) > 36:\r\n self.widgetList[35].grid_remove()\r\n self.widgetList[36].grid_remove()\r\n self.widgetList[37].grid_remove()\r\n del self.widgetList[37]\r\n del self.widgetList[36]\r\n del self.widgetList[35]\r\n # Disable widgets that aren't applicable to cloning\r\n for w in disWidgetList:\r\n self.widgetList[w].state([\"disabled\"])\r\n # Enable build\r\n self.widgetList[31].config(command=lambda: self.begin_advanced_gen(self.master, None))\r\n self.widgetList[31].state([\"!disabled\"])\r\n ### Widget 7 is the label for the device drop down menu\r\n self.widgetList[7].config(text='Board:')\r\n ### Widget 8 is te drop down menu for the devices\r\n self.widgetList[8].config(textvariable=self.advBrdSelect)\r\n self.widgetList[8]['values'] = self.localSDK.brdList\r\n try:\r\n self.widgetList[8].current(int(self.currBoard) - 1)\r\n except IOError: ## Catch the case where the user hasn't selected anything\r\n self.widgetList[8].current(0)\r\n except ValueError: ## Catch the case where there is no device given in manifest\r\n self.widgetList[8].current(0)\r\n ### Widget 34 is the label for the clone project drop down menu\r\n self.widgetList.append(Label(self, text='Project:', font=labelFont))\r\n self.widgetList[35].grid(row=2, column=3, sticky=W, pady=(5, 0))\r\n ### Widget 35 is te drop down menu for the clonable projects\r\n try:\r\n self.localSDK.get_projects(self.newProj.board[1])\r\n except IndexError:\r\n self.localSDK.get_projects('frdmk22f')\r\n self.widgetList.append(Combobox(self, state='readonly'))\r\n self.widgetList[36]['values'] = self.localSDK.demoLst\r\n self.widgetList[36].grid(row=3, column=3, columnspan=2, sticky=W+E, pady=(0, 0))\r\n try:\r\n self.widgetList[36].current(0)\r\n except TclError:\r\n kT.debug_log('No list', sys.exc_info()[2])\r\n else:\r\n kT.debug_log('Widget list length = %d' %len(self.widgetList))\r\n if len(self.widgetList) > 35:\r\n self.widgetList[35].grid_remove()\r\n self.widgetList[36].grid_remove()\r\n del self.widgetList[36]\r\n del self.widgetList[35]\r\n ### Widget 35 is a radio button for configuring a new project\r\n self.widgetList.append(Radiobutton(self, text='Device', variable=self.advancedDevType, \\\r\n value=0))\r\n try:\r\n self.widgetList[35].grid(row=3, column=3, sticky=W)\r\n except IndexError:\r\n self.prevProjType = self.advancedProjType.get()\r\n return\r\n ### Widget 36 is a radio button for configuring a cloned project\r\n self.widgetList.append(Radiobutton(self, text='Board', variable=self.advancedDevType, \\\r\n value=1))\r\n self.widgetList[36].grid(row=3, column=3, sticky=E)\r\n self.advancedDevType.set(0)\r\n ### Widget 37 is the label for project type\r\n self.widgetList.append(Label(self, text='Device or Board:', font=labelFont))\r\n self.widgetList[37].grid(row=2, column=3, sticky=W, pady=(5, 0))\r\n # Enable widgets that aren't applicable to cloning\r\n try:\r\n for w in disWidgetList:\r\n self.widgetList[w].state([\"!disabled\"])\r\n # Disable build\r\n self.widgetList[31].config(command=lambda: self.package_select(self.master))\r\n self.widgetList[31].state([\"!disabled\"])\r\n ### Widget 7 is the label for the device drop down menu\r\n self.widgetList[7].config(text='Device:')\r\n ### Widget 8 is te drop down menu for the devices\r\n self.widgetList[8].config(textvariable=self.advDevSelect)\r\n self.widgetList[8]['values'] = self.localSDK.devList\r\n except IndexError:\r\n kT.debug_log('IndexError', sys.exc_info()[2])\r\n try:\r\n self.newProj.add_board(self.currBoard, self.localSDK.brdList)\r\n self.widgetList[8].current(self.localSDK.devList.index(self.newProj.device[0]))\r\n except IndexError:\r\n kT.debug_log('IndexError', sys.exc_info()[2])\r\n except IOError: ## Catch the case where the user hasn't selected anything\r\n try:\r\n self.widgetList[8].current(0)\r\n except IndexError:\r\n kT.debug_log('IndexError', sys.exc_info()[2])\r\n except ValueError: ## Catch the case where there is no device given in manifest\r\n try:\r\n self.widgetList[8].current(0)\r\n except IndexError:\r\n kT.debug_log('Index Error', sys.exc_info()[2])\r\n self.prevProjType = self.advancedProjType.get()\r\n self.update_proj()\r\n return", "def restart(self):\n self.points_arr.append(self.click_count)\n self.grid.destroy()\n self.click_count = 0\n self.ranking_label.destroy()\n self.ranking_box.destroy()\n self.ranking_box = Gtk.Grid()\n self.vbox.add(self.ranking_box)\n self.ranking_panel()\n self.point_label_score.set_markup(\"<b>0</b>\")\n self.point_label_score.show_all()\n self.grid = BallsGrid(self.rows, self.cols)\n self.create_grid()\n self.grid.show()", "def select_approved_projects(self):\r\n print \"Selecting approved projects... \"\r\n global ANNUAL_BUDGET\r\n \r\n projects_citizens_sorted = sorted(self.projects_for_vote, key=lambda project:project.units, reverse=True)\r\n projects_reps_sorted = sorted(self.projects_for_vote, key=lambda project:project.p_units, reverse=True)\r\n budget_sum = 0\r\n \r\n for p in projects_citizens_sorted:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n budget_sum = 0\r\n for p in projects_reps_sorted:\r\n if p not in self.projects_approved:\r\n budget_sum += p.budget\r\n if budget_sum <= ANNUAL_BUDGET/2:\r\n self.projects_approved.append(p)\r\n\r\n\r\n \r\n# raw_input(\"select_approved_projects - antes\")\r\n for p in projects_citizens_sorted:\r\n print p\r\n print \"\\nReps\\n\"\r\n for p in projects_reps_sorted:\r\n print p\r\n print \"\\nApproved\\n\"\r\n for p in self.projects_approved:\r\n print p\r\n\r\n raw_input(\"select_approved_projects - depois\")", "async def status(self, ctx, project_name: str) -> discord.Message:\n if not ctx.projects.find_project(project_name):\n await ctx.send(\"This project doesn't exist.\")\n return\n progress_bar = ctx.projects.project_progress_bar(project_name)\n if not progress_bar:\n progress_bar = self.empty_progress_bar\n await ctx.send(progress_bar)", "def launch_project_sizing():\n from queries import IN_PRODUCTION_NEED_SCAN, NEW_NEED_SCAN, OTHER_NEED_SCAN\n if not getattr(settings,\"GNMPLUTOSTATS_PROJECT_SCAN_ENABLED\",False):\n logger.error(\"GNMPLUTOSTATS_PROJECT_SCAN_ENABLED is false, not going to trigger launching\")\n return \"GNMPLUTOSTATS_PROJECT_SCAN_ENABLED is false, not going to trigger launching\"\n\n prioritise_old = getattr(settings,\"GNMPLUTOSTATS_PRIORITISE_OLD\",False)\n if prioritise_old:\n logger.warning(\"GNMPLUTOSTATS_PRIORITISE_OLD is set, will only focus on old projects\")\n\n trigger_limit = int(getattr(settings,\"GNMPLUTOSTATS_PROJECT_SCAN_LIMIT\",10))\n to_trigger = []\n c=0\n\n logger.info(\"Gathering projects to measure\")\n\n if not prioritise_old:\n highest_priority = IN_PRODUCTION_NEED_SCAN.order_by('last_scan')\n for entry in highest_priority:\n to_trigger.append(entry)\n logger.info(\"{0}: {1} ({2})\".format(c, entry,entry.project_status))\n c+=1\n if c>=trigger_limit:\n break\n\n if not prioritise_old and len(to_trigger)<trigger_limit:\n next_priority = NEW_NEED_SCAN.order_by('last_scan')\n for entry in next_priority:\n to_trigger.append(entry)\n logger.info(\"{0}: {1} ({2})\".format(c, entry,entry.project_status))\n c+=1\n if c>=trigger_limit:\n break\n\n if len(to_trigger)<trigger_limit:\n everything_else = OTHER_NEED_SCAN.order_by('last_scan')\n for entry in everything_else:\n to_trigger.append(entry)\n logger.info(\"{0}: {1} ({2})\".format(c, entry,entry.project_status))\n c+=1\n if c>=trigger_limit:\n break\n\n logger.info(\"Projects to scan: \".format(to_trigger))\n if len(to_trigger)==0:\n if prioritise_old:\n logger.error(\"No projects to scan and GNMPLUTOSTATS_PRIORITISE_OLD is set. You should disable this now to pick up new projects\")\n logger.info(\"No projects need to be scanned right now\")\n\n n=0\n for entry in to_trigger:\n n+=1\n calculate_project_size.apply_async(kwargs={'project_id': entry.project_id},queue=getattr(settings,\"GNMPLUTOSTATS_PROJECT_SCAN_QUEUE\",\"celery\"))\n return \"Triggered {0} projects to scan\".format(n)", "def __gitBisectReplay(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitBisectReplay(self.project.getProjectPath()) or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Bisect\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()", "def stop_modify_project(update, context):\n context.user_data[START_OVER] = True\n get_list_projects(update, context)\n\n return END", "def clone_update(self, *args):\r\n if self.advancedProjType.get():\r\n ##print self.advBrdSelect.get()\r\n userBoard = self.localSDK.brdList.index(self.advBrdSelect.get()) + 1\r\n self.newProj.add_board(userBoard, self.localSDK.brdList)\r\n self.localSDK.get_projects(self.newProj.board[1])\r\n try:\r\n self.widgetList[36]['values'] = self.localSDK.demoLst\r\n self.widgetList[36].current(0)\r\n except IndexError:\r\n kT.debug_log('Index Error', sys.exc_info()[2])", "def status(self,project_dir):\n \n if \"towercrane\" not in os.listdir(project_dir):\n print('(!) No project has been initialized yet.\\n => you can use \"towercrane init\" to start a new project.\\n => Or it might be because you have lost the \"towercrane config file\" ')\n \n elif \"towercrane\" in os.listdir(project_dir):\n TowercraneConfig = read_config(project_dir)\n project, files = self.db.get_project(TowercraneConfig[\"projectkey\"])\n files_table = tabulate([[file[1],file[0],file[2],file[-1]] for file in files], headers=['File Name', 'File Key','Size','status'], tablefmt='orgtbl')\n print(f'project:\"{TowercraneConfig[\"project_name\"]}\" with projectkey: \"{TowercraneConfig[\"projectkey\"]}\"\\nFiles added to the project: \\n\\n{files_table}')", "def _checkout_project(self, info):\n path = self._path_join(info.project)\n tl = RendererUpdateTransactionListener(self._renderer)\n prj = Project.init(path, info.project, info.apiurl,\n transaction_listener=[tl])\n self._update_project(prj, info)", "def _countdown(self):\n self._game.deleteBall()\n self._game.draw()\n # reset paddle speed\n self._game.updatePaddle(self.input)\n if ZERO_SECS <= self.time < ONE_SEC:\n self._mssg = (GLabel(text='3', x=GAME_WIDTH/2, y=GAME_HEIGHT/2,\n font_size=COUNTDOWN_FONT_SIZE))\n if ONE_SEC <= self.time < TWO_SECS:\n self._mssg = (GLabel(text='2', x=GAME_WIDTH/2, y=GAME_HEIGHT/2,\n font_size=COUNTDOWN_FONT_SIZE))\n if TWO_SECS <= self.time < THREE_SECS:\n self._mssg = (GLabel(text='1', x=GAME_WIDTH/2, y=GAME_HEIGHT/2,\n font_size=COUNTDOWN_FONT_SIZE))\n if self.time >= THREE_SECS:\n self._mssg = None\n self._game.serveBall()\n self._state = STATE_ACTIVE\n self._points_mssg = (GLabel(text='Points: 0', x=POINTS_X, y=POINTS_Y, font_size=24))", "def ConnectToProject(self):\r\n \r\n # Remove old connections\r\n for proj, signal in self._project_connections:\r\n if not proj.destroyed():\r\n proj.Disconnect(signal)\r\n self._project_connections = []\r\n \r\n # Connect to new project\r\n app = wingapi.gApplication\r\n proj = app.GetProject()\r\n self._project_connections.append((proj, proj.Connect('files-added', self.__CB_ProjectChanged, proj)))\r\n self._project_connections.append((proj, proj.Connect('files-removed', self.__CB_ProjectChanged, proj)))\r\n \r\n self.ScheduleUpdate()", "def reload_job(self):\n if self.ui['main_window'].widgets['live_preview'].get_active():\n self._update_preview()", "def __ensure_project(self, project_name):\n if project_name not in self.history:\n self.history[project_name] = {}\n self.history[project_name]['opened'] = []\n self.history[project_name]['closed'] = []", "def refresh(self):\n self.goto(self.starting_position)", "def poll_selection(self):\r\n osName = platform.system()\r\n\r\n ## Check if the user changed the KSDK_path\r\n try:\r\n checkPath = self.widgetList[1].get()\r\n if checkPath != self.localSDK.path:\r\n self.ask_set_directory(True, 1)\r\n\r\n ## Check if user updated project name\r\n checkName = self.widgetList[4].get()\r\n if checkName != self.newProj.name:\r\n if kT.check_proj_name(checkName):\r\n self.newProj.name = checkName\r\n else:\r\n self.newProj.name = None\r\n if self.prevName != checkName:\r\n tkMessageBox.showinfo(\"Invalid Project Name\",\\\r\n \"No spaces or special characters.\")\r\n self.prevName = checkName\r\n kT.debug_log(\"Invalid name\")\r\n except AttributeError:\r\n kT.debug_log(\"Basic Changed menu\", sys.exc_info()[2])\r\n #return\r\n\r\n try:\r\n now = self.widgetList[6].curselection()\r\n if now != self.curr:\r\n if len(self.widgetList[6].curselection()) > 0:\r\n try:\r\n self.displayBoard = PhotoImage(data=self.imageList[int(now[0])])\r\n except IndexError:\r\n kT.debug_log(now[0], sys.exc_info()[2])\r\n self.widgetList[8].grid_remove()\r\n self.widgetList[8] = Button(self, \\\r\n image=self.displayBoard, \\\r\n command=lambda:\\\r\n self.web_launch(self.localSDK.brdList[\\\r\n int(self.widgetList[6].curselection()[0])]))\r\n self.widgetList[8].image = self.displayBoard\r\n self.widgetList[8].grid(row=5, column=3, columnspan=3, sticky=E+W+N+S)\r\n self.widgetList[8].bind(\"<Enter>\", \\\r\n lambda h: self.update_tips('Is this your board?\\n' + \\\r\n 'If so, ' + \\\r\n 'then clicking on the board' + \\\r\n ' image will take you to the ' + \\\r\n 'board homepage on ' + \\\r\n 'freescale.com.\\n\\n'))\r\n self.widgetList[8].bind(\"<Leave>\", \\\r\n lambda h: self.update_tips(self.defaultHelp))\r\n self.curr = now\r\n try:\r\n self.currBoard = int(self.widgetList[6].curselection()[0]) + 1\r\n # Clear out driver list and board\r\n self.newProj.board = ()\r\n self.newProj.drvList = []\r\n # Configure ksdkProj given GUI state\r\n self.localSDK.get_version()\r\n self.newProj.name = self.widgetList[4].get()\r\n self.newProj.setKsdkPath(self.localSDK.path)\r\n self.newProj.sdkVer = self.localSDK.version\r\n self.newProj.useBSP = not self.localSDK.isNewVersion()\r\n except IndexError:\r\n self.displayBoard = PhotoImage(data=kImg.boardImages['kds_icon.gif'])\r\n self.widgetList[8].config(image=self.displayBoard)\r\n self.widgetList[8].image = self.displayBoard\r\n self.widgetList[8].config(command=lambda: self.web_launch(kImg.boardImages['NoPreview.gif']))\r\n kT.debug_log(\"Index Error\", sys.exc_info()[2])\r\n #return\r\n except IndexError:\r\n kT.debug_log(\"Index Error\", sys.exc_info()[2])\r\n #return\r\n except AttributeError:\r\n kT.debug_log(\"AttributeError\", sys.exc_info()[2])\r\n return\r\n\r\n self._retLoop = self.after(250, self.poll_selection)", "def poker(project):\r\n total_stories = len(project.unestimated_stories())\r\n for idx, story in enumerate(project.unestimated_stories()):\r\n clear()\r\n rows, cols = _get_column_dimensions()\r\n print \"{} PLANNING POKER SESSION [{}]\".format(project.name.upper(), bold(\"{}/{} Stories Estimated\".format(idx+1, total_stories)))\r\n print \"-\" * cols\r\n pretty_print_story(story)\r\n prompt_estimation(project, story)\r\n else:\r\n print \"KaBoom!!! Nice Work Team\"", "def Refresh(self):\n pass", "def do_projects(self, arg):\n args = shlex.split(arg)\n limit = 10\n from_date = to_date = ''\n if args:\n limit = 0\n try:\n from_date, to_date = helpers.parse_date_parameters(args)\n except ValueError, msg:\n print(msg)\n return\n projects = self.db.get_projects_with_activity_field(\n from_date, to_date, limit=limit)\n refined = map(lambda x: [\n x['pid'], x['name'],\n '[Active]' if x['active'] else '[closed]',\n datetime.datetime.strftime(x['created'], '%c').decode('utf8'),\n x['description']], projects)\n print(tabulate(refined, ['ID', 'Project', 'Activity', 'Created',\n 'Description']))", "def refresh(self):\n self.Refresh()", "def swap_active_portfolio(self):\n\n self.lock.acquire()\n\n self.clear_main()\n\n w = self.windows['MAIN']\n l = 1\n\n for p in self.portfolios:\n # Only support 9 portfolios since that makes this easier to deal\n # with.\n if l >= 10:\n break\n\n w.addstr(l, 0, '%2d' % l, curses.A_BOLD | curses.color_pair(1))\n w.addstr(l, 3, p.name)\n l += 1\n\n self.refresh()\n\n # Wait for the user to give is a key.\n while True:\n c = self.stdscr.getch()\n\n if c < ord('1') and c > ord('9'):\n continue\n\n index = c - ord('1')\n\n if index < len(self.portfolios):\n break\n\n self.portfolios[index].refresh()\n\n self.active_portfolio = self.portfolios[index]\n self.display_portfolio(self.active_portfolio)\n self.lock.release()", "def update_project(arn=None, name=None, defaultJobTimeoutMinutes=None):\n pass", "def toolbox_current_changed(self, index):\r\n if self.openedfileslistwidget.isVisible():\r\n self.refresh_openedfileslistwidget()\r\n elif self.classbrowser.isVisible():\r\n # Refreshing class browser\r\n editortabwidget = self.get_current_editortabwidget()\r\n editortabwidget.refresh()\r\n elif self.analysislistwidget.isVisible():\r\n self.refresh_analysislistwidget()", "def setup_schedule():\n for project in Project.select():\n if (project.schedule_interval is not None) and (project.schedule_interval > 0):\n schedule.add_job(pull_build_project, \"interval\", id=\"building_\" + str(project.id),\n hours=project.schedule_interval,\n args=[project, \"master\"])", "def __projectOpened(self):\n if self.__e5project.getProjectType() == \"Django\":\n projectAct = self.__ui.getMenuBarAction(\"project\")\n actions = self.__ui.menuBar().actions()\n insertAct = actions[actions.index(projectAct) + 1]\n self.__mainAct = self.__ui.menuBar().insertMenu(\n insertAct, self.__mainMenu)", "def update_image_all_projects(self):\n projects = Project.objects.all(temporary=False)\n for project in projects:\n project.update_image()", "def set_recent_projects_menu(self):\n self.menuRecent_projects.clear()\n for project_file_path in self.recent_projects:\n action = QAction(self, visible=False, triggered=self.open_project_activated)\n action.setText(project_file_path)\n action.setVisible(True)\n self.menuRecent_projects.addAction(action)", "def projects(update_db=False):\n try:\n if not os.path.isfile(\n app.config.get(\"DATA_DIR\") + \"/figs/projects/project_screen.html\"\n ):\n update(update_db=update_db)\n\n with open(\n app.config.get(\"DATA_DIR\") + \"/figs/projects/project_screen.html\", \"r\"\n ) as f:\n whiteboard = f.read()\n\n return whiteboard\n\n except:\n return traceback.format_exc()", "def _on_new_project(self):\n lang = self.ddnGuiLanguage.get()\n projectfile = filedialog.asksaveasfilename(\\\n filetypes=[('Paratext Biblical Terms', '.htm'), ], \\\n initialdir=self.BibTerm, \\\n initialfile='', \\\n title=LOCALIZED_TEXT[lang]['BibTerms2Dict project'], \\\n defaultextension='.prj')\n if os.path.exists(projectfile):\n messagebox.showwarning(LOCALIZED_TEXT[lang]['New Project'], \\\n LOCALIZED_TEXT[lang]['{} already exist choose another name.'].\\\n format(os.path.basename(projectfile)))\n return\n else:\n newfile = codecs.open(fileout, mode='w', encoding='utf-8')\n newfile.close()\n self.list_projects = [f.rstrip('.prj') \\\n for f in os.listdir(self.BibTerm) \\\n if f.endswith('.prj')]\n self.ddnCurProject['values'] = self.list_projects\n self.ddnCurProject.set(os.path.basename(projectfile)[:-4])\n self.update\n\n pass", "def progress_status(self):\n from tqdm import tqdm\n pbar_a = tqdm(total=len(self.jobs), position=0)\n pbar_a.set_description('Submitted jobs ...')\n pbar_b = tqdm(total=self.n_submit_script, position=1)\n pbar_b.set_description('Running jobs ...')\n pbar_c = tqdm(total=self.n_submit_script, position=2)\n pbar_c.set_description('Completed jobs ...')\n pbar_d = tqdm(total=self.n_submit_script, position=3)\n pbar_d.set_description('Failed? jobs ...')\n while self.n_completed < self.n_submit_script:\n pbar_a.n = self.n_submitted\n pbar_b.n = self.n_running\n pbar_c.n = self.n_completed\n pbar_d.n = self.n_failed + self.n_unknown\n pbar_a.refresh()\n pbar_b.refresh()\n pbar_c.refresh()\n pbar_d.refresh()\n sleep(5)\n self.update_status()", "def clearExpired(self):\n self.sleep_approx(1)\n playersOnPage = self.driver.find_elements_by_tag_name(\"li.listFUTItem\")\n\n num_players_expired = 0\n for player in playersOnPage:\n bidStatus = player.get_attribute(\"class\")\n bidStatus = str(bidStatus)\n\n if \"expired\" in bidStatus:\n num_players_expired += 1\n\n if num_players_expired > 0:\n clearExpired = self.driver.find_element(\n By.XPATH, \"/html/body/main/section/section/div[2]/div/div/div/section[4]/header/button\")\n self.driver.execute_script(\n \"arguments[0].scrollIntoView(true);\", clearExpired)\n WebDriverWait(self.driver, 20).until(EC.element_to_be_clickable(\n (By.XPATH, \"/html/body/main/section/section/div[2]/div/div/div/section[4]/header/button\"))).click()\n self.sleep_approx(1)\n log_event(self.queue, \"Cleared expired\")\n self.sleep_approx(1)", "def onRefreshRepositoryRuns(self, data, projectid):\n self.runsDialog.initializeTests(listing=data )", "def update(self):\n\n\t\tif not self.complete:\n\t\t\tfor vasp_run in self.vasp_run_list:\n\t\t\t\tvasp_run.update()", "def RefreshCaptions(self):\r\n\r\n for part in self._uiparts:\r\n if part.type == AuiDockUIPart.typeCaption:\r\n self._frame.Refresh(True, part.rect)\r\n self._frame.Update()", "def work(self):\r\n if self.working:\r\n if self.time == 0:\r\n self.items = [Item.P]\r\n self.working = False\r\n self.time = 4\r\n else:\r\n self.time -= 1", "def refresh_dialog(self):\n self._client.update_elements()", "def advanced_gui(self, master):\r\n\r\n # Turn off polling function\r\n self.newProj.isQuickGenerate = False\r\n self._retLoop = None\r\n\r\n #Remove active widgets from the screen and then clear widget list out\r\n if self.widgetList:\r\n for w in self.widgetList:\r\n w.grid_remove()\r\n del self.widgetList[:]\r\n\r\n osName = platform.system()\r\n\r\n if osName != 'Darwin':\r\n labelFont = 'Arial 9 bold'\r\n else:\r\n labelFont = 'bold'\r\n\r\n ### Widget 0 is a label for padding column 0\r\n self.widgetList.append(Label(self, text=''))\r\n self.widgetList[0].grid(row=0, column=0, sticky=E+W, padx=5)\r\n\r\n ### Widget 1 is a button to return to simple menu\r\n self.widgetList.append(Button(self, text='Return', \\\r\n command=lambda: self.launch_basic(master)))\r\n self.widgetList[1].grid(row=16, column=1, sticky=W)\r\n\r\n ### Widget 2 is a label for the project name text field\r\n self.widgetList.append(Label(self, text='Project Name: ', font=labelFont))\r\n self.widgetList[2].grid(row=0, column=1, sticky=W, pady=(5, 0))\r\n\r\n ### Widget 3 is the text field for project name entry\r\n self.widgetList.append(Entry(self, width=25))\r\n self.widgetList[3].insert(0, self.newProj.name)\r\n self.widgetList[3].grid(row=1, column=1, sticky=W, pady=(0, 0))\r\n\r\n ### Widget 4 is the label for project type\r\n self.widgetList.append(Label(self, text='Project Type:', font=labelFont))\r\n self.widgetList[4].grid(row=2, column=1, sticky=W, pady=(5, 0))\r\n\r\n ### Widget 5 is a radio button for configuring a new project\r\n self.widgetList.append(Radiobutton(self, text='New', variable=self.advancedProjType, \\\r\n value=0))\r\n self.widgetList[5].grid(row=3, column=1, sticky=W)\r\n\r\n ### Widget 6 is a radio button for configuring a cloned project\r\n self.widgetList.append(Radiobutton(self, text='Clone', variable=self.advancedProjType, \\\r\n value=1))\r\n self.widgetList[6].grid(row=3, column=1, sticky=E)\r\n\r\n ### Widget 7 is the label for the device drop down menu\r\n self.widgetList.append(Label(self, text='Device:', font=labelFont))\r\n self.widgetList[7].grid(row=0, column=3, sticky=W, pady=(5, 0))\r\n\r\n ### Widget 8 is te drop down menu for the devices\r\n self.pop_adv_devices()\r\n #self.widgetList.append(OptionMenu(self, userDev, *self.localSDK.devList))\r\n self.widgetList.append(Combobox(self, state='readonly'))\r\n self.widgetList[8].config(textvariable=self.advDevSelect)\r\n self.widgetList[8]['values'] = self.localSDK.devList\r\n self.widgetList[8].grid(row=1, column=3, sticky=W, pady=(0, 0))\r\n try:\r\n self.newProj.add_board(self.currBoard, self.localSDK.brdList)\r\n self.widgetList[8].current(self.localSDK.devList.index(self.newProj.device[0]))\r\n except IOError: ## Catch the case where the user hasn't selected anything\r\n self.widgetList[8].current(0)\r\n except ValueError: ## Catch the case where there is no device given in manifest\r\n self.widgetList[8].current(0)\r\n\r\n ### Widget 9 is a label for the library configuration radio buttons\r\n libraryConfigurationWidget = Label(self, text='Library Configuration:', font=labelFont)\r\n self.widgetList.append(libraryConfigurationWidget)\r\n self.widgetList[9].grid(row=4, column=1, sticky=W, columnspan=2)\r\n\r\n ### Widget 10 is a radio button for the library configuration\r\n halOnlyWidget = Radiobutton(self, text='HAL only', variable=self.advancedLibType,value=0)\r\n self.widgetList.append(halOnlyWidget)\r\n self.widgetList[10].grid(row=6, column=1, sticky=W)\r\n\r\n ### Widget 11 is a radio button for the library configuration\r\n platformWidget = Radiobutton(self, text='Platform', variable=self.advancedLibType, value=1)\r\n self.widgetList.append(platformWidget)\r\n self.widgetList[11].grid(row=5, column=1, sticky=W)\r\n\r\n # Set default to select platform library\r\n self.advancedLibType.set(1)\r\n \r\n # in new version there is not hal vs. platform\r\n if self.localSDK.isNewVersion():\r\n libraryConfigurationWidget.grid_remove()\r\n halOnlyWidget.grid_remove()\r\n platformWidget.grid_remove()\r\n\r\n ### Widget 12 is a label for the library configuration radio buttons\r\n self.widgetList.append(Label(self, text='RTOS Configuration:', font=labelFont))\r\n self.widgetList[12].grid(row=7, column=1, sticky=W, columnspan=2)\r\n\r\n ### Widget 13 is a radio button for the library configuration\r\n self.widgetList.append(Radiobutton(self, text='None', variable=self.advancedRtosType, \\\r\n value=0))\r\n self.widgetList[13].grid(row=8, column=1, sticky=W)\r\n\r\n ### Widget 14 is a radio button for the library configuration\r\n mqxWidget = Radiobutton(self, text='MQX', variable=self.advancedRtosType, value=1)\r\n self.widgetList.append(mqxWidget)\r\n mqxWidget.grid(row=9, column=1, sticky=W)\r\n\r\n # in KSDK 2.0 and newer version there is no MQX support so the MQX option has to be removed\r\n # in some older version of KSDK (1.2, 1.3) MQX support is missing so this option has to be removed\r\n if not self.localSDK.isMQXSupported():\r\n mqxWidget.grid_remove()\r\n\r\n\r\n ### Widget 15 is a radio button for the library configuration\r\n freeRTOSWidget = Radiobutton(self, text='FreeRTOS', variable=self.advancedRtosType, value=2)\r\n self.widgetList.append(freeRTOSWidget)\r\n freeRTOSWidget.grid(row=10, column=1, sticky=W)\r\n # if FreeRTOS is not supported in KSDK option should be removed\r\n if not self.localSDK.isFreeRTOSSupported():\r\n freeRTOSWidget.grid_remove()\r\n\r\n ### Widget 16 is a radio button for the library configuration\r\n uCOSIIWidget = Radiobutton(self, text='uC/OS-II', variable=self.advancedRtosType, value=3)\r\n self.widgetList.append(uCOSIIWidget)\r\n uCOSIIWidget.grid(row=11, column=1, sticky=W)\r\n if not self.localSDK.isuCOSIISupported():\r\n uCOSIIWidget.grid_remove()\r\n\r\n ### Widget 17 is a radio button for the library configuration\r\n uCOSIIIWidget = Radiobutton(self, text='uC/OS-III', variable=self.advancedRtosType, value=4)\r\n self.widgetList.append(uCOSIIIWidget)\r\n uCOSIIIWidget.grid(row=12, column=1, sticky=W)\r\n if not self.localSDK.isuCOSIIISupported():\r\n uCOSIIIWidget.grid_remove()\r\n\r\n ### Widget 18 is a label for the toolchain check boxes\r\n self.widgetList.append(Label(self, text='Choose Supported Toolchain(s):', font=labelFont))\r\n self.widgetList[18].grid(row=4, column=3, sticky=W, columnspan=2)\r\n\r\n ### Widget 19 is a check box for KDS\r\n kdsOptionWidget = Checkbutton(self, text=kSdk.KDSname, variable=self.advIsKds)\r\n self.widgetList.append(kdsOptionWidget)\r\n self.widgetList[19].grid(row=5, column=3, sticky=W, columnspan=2)\r\n\r\n ### Widget 20 is a check box for IAR\r\n iarOptionWidget = Checkbutton(self, text=kSdk.IARname, variable=self.advIsIar)\r\n self.widgetList.append(iarOptionWidget)\r\n self.widgetList[20].grid(row=6, column=3, sticky=W, columnspan=2)\r\n\r\n ### Widget 21 is a check box for MDK\r\n keilMdkOptionWidget = Checkbutton(self, text=kSdk.keilMDKARMname, variable=self.advIsMdk)\r\n self.widgetList.append(keilMdkOptionWidget)\r\n self.widgetList[21].grid(row=7, column=3, sticky=W, columnspan=2)\r\n\r\n ### Widget 22 is a check box for ATS\r\n atollicOptionWidget = Checkbutton(self, text=kSdk.AtollicStudio, variable=self.advIsAts)\r\n self.widgetList.append(atollicOptionWidget)\r\n self.widgetList[22].grid(row=8, column=3, sticky=W, columnspan=2)\r\n\r\n if not self.localSDK.isToolchainTypeSupported(ToolchainType.KinetisDesignStudio):\r\n kdsOptionWidget.grid_remove()\r\n if not self.localSDK.isToolchainTypeSupported(ToolchainType.IARname):\r\n iarOptionWidget.grid_remove()\r\n if not self.localSDK.isToolchainTypeSupported(ToolchainType.KeilMDK):\r\n keilMdkOptionWidget.grid_remove()\r\n if not self.localSDK.isToolchainTypeSupported(ToolchainType.AtollicStudio):\r\n atollicOptionWidget.grid_remove()\r\n\r\n ### Widget 23 is a check box for GCC\r\n self.widgetList.append(Checkbutton(self, text='GCC Command Line', variable=self.advIsGcc))\r\n self.widgetList[23].grid(row=9, column=3, sticky=W, columnspan=2)\r\n self.widgetList[23].state([\"disabled\"])\r\n self.widgetList[23].grid_remove()\r\n\r\n ### Widget 24 is a label for adding BSP\r\n #self.widgetList.append(Label(self, text='USB and Board Support:', font=labelFont))\r\n boardSupportLabel = Label(self, text='Board Support:', font=labelFont)\r\n self.widgetList.append(boardSupportLabel)\r\n self.widgetList[24].grid(row=10, column=3, sticky=W, columnspan=2, pady=(5, 0))\r\n\r\n ### Widget 25 is a checkbox for adding BSP\r\n includeBSPFilesOption = Checkbutton(self, text='Include BSP files', variable=self.advIsBsp)\r\n self.widgetList.append(includeBSPFilesOption)\r\n self.widgetList[25].grid(row=11, column=3, sticky=W, columnspan=2)\r\n self.widgetList[25].state([\"!disabled\"])\r\n \r\n if self.localSDK.isNewVersion():\r\n boardSupportLabel.grid_remove()\r\n includeBSPFilesOption.grid_remove()\r\n\r\n ### Widget 26 is a label for the output path entry\r\n self.widgetList.append(Label(self, text='Project Parent Directory:', \\\r\n font=labelFont))\r\n self.widgetList[26].grid(row=13, column=1, sticky=W, columnspan=4, pady=(5, 0))\r\n\r\n ### Widget 27 is a text entry for the output path\r\n if self.newProj.osType == 'Windows':\r\n entryWidth = int(77.0 / WIN_SCALE)\r\n self.widgetList.append(Entry(self, width=entryWidth))\r\n else:\r\n self.widgetList.append(Entry(self, width=71))\r\n self.newProj.workSpace = self.newProj.sdkPath \r\n if self.newProj.osType == 'Windows':\r\n self.newProj.workSpace = kT.string_replace(self.newProj.workSpace, '/', '\\\\')\r\n self.widgetList[27].insert(0, self.newProj.workSpace)\r\n self.widgetList[27].grid(row=14, column=1, sticky=W, columnspan=4)\r\n\r\n ### Widget 28 is a button for browsing to a directory\r\n self.dir_opt['title'] = 'Select the directory you want the project to be generated into. '\r\n self.widgetList.append(Button(self, text='Browse', \\\r\n command=lambda: self.proj_set_directory(False, 27)))\r\n if self.newProj.osType == 'Windows':\r\n self.widgetList[28].grid(row=14, column=5, sticky=E)\r\n else:\r\n self.widgetList[28].grid(row=14, column=4, sticky=E)\r\n\r\n self.widgetList[28].state([\"disabled\"])\r\n\r\n ### Widget 29 is a checkbox for making a standalone project\r\n self.widgetList.append(Checkbutton(self, text='Generate standalone project', \\\r\n variable=self.advIsStandalone))\r\n self.widgetList[29].grid(row=15, column=1, sticky=W, columnspan=2, pady=5)\r\n\r\n ### Widget 30 is a help button\r\n self.widgetList.append(Button(self, text='Help', \\\r\n command=lambda: self.advanced_help(master, (Constants.ADV_HELP if self.localSDK.isNewVersion() else ADV_HELP))))\r\n if self.newProj.osType == 'Windows':\r\n self.widgetList[30].grid(row=1, column=5, sticky=E, pady=(0, 0))\r\n else:\r\n self.widgetList[30].grid(row=1, column=4, sticky=E, pady=(0, 0))\r\n #self.widgetList[30].state([\"disabled\"])\r\n\r\n ### Widget 31 is a button to generate the project\r\n if self.newProj.osType == 'Windows':\r\n style = Style()\r\n style.configure(\"Bold.TButton\", font='system 8 bold')\r\n self.widgetList.append(Button(self, text='Advanced Generate!', style=\"Bold.TButton\", \\\r\n command=lambda: self.package_select(master)))\r\n self.widgetList[31].grid(row=16, column=4, sticky=E+W+N+S, rowspan=2, columnspan=2)\r\n else:\r\n self.widgetList.append(Button(self, text='Advanced Generate!',\\\r\n command=lambda: self.package_select(master)))\r\n self.widgetList[31].grid(row=16, column=3, sticky=E+N+S, rowspan=2, columnspan=2)\r\n self.widgetList[31].state([\"!disabled\"])\r\n\r\n ### Widget 32 is a label for padding row 13\r\n self.widgetList.append(Label(self, text='', font=labelFont))\r\n self.widgetList[32].grid(row=0, column=6, sticky=E+W, padx=5)\r\n\r\n ### Widget 33 is a label for explaining the return button\r\n self.widgetList.append(Label(self, text='Click here to return to previous menu.'))\r\n self.widgetList[33].grid(row=17, column=1, columnspan=3, sticky=W)\r\n\r\n ### Widget 34 is a checkbox for adding USB\r\n self.widgetList.append(Checkbutton(self, text='Include USB', variable=self.advIsUsb))\r\n self.widgetList[34].grid(row=12, column=3, sticky=W, columnspan=2)\r\n self.widgetList[34].state([\"disabled\"])\r\n self.widgetList[34].grid_remove()\r\n\r\n ### Widget 35 is a radio button for configuring a new project\r\n self.widgetList.append(Radiobutton(self, text='Device', variable=self.advancedDevType, \\\r\n value=0))\r\n self.widgetList[35].grid(row=3, column=3, sticky=W)\r\n\r\n ### Widget 36 is a radio button for configuring a cloned project\r\n self.widgetList.append(Radiobutton(self, text='Board', variable=self.advancedDevType, \\\r\n value=1))\r\n self.widgetList[36].grid(row=3, column=3, sticky=E)\r\n\r\n ### Widget 37 is the label for project type\r\n self.widgetList.append(Label(self, text='Device or Board:', font=labelFont))\r\n self.widgetList[37].grid(row=2, column=3, sticky=W, pady=(5, 0))\r\n\r\n self.poll_advanced()\r\n \r\n #update project to set correct supported tools, path etc.\r\n self.update_proj()\r\n return", "def refresh_parcel_repos(self):\n if self.api_client.api_version < 'v16':\n logger.warning('Detected API version without support '\n 'for refreshParcelRepos (%s). Sleeping instead ...',\n self.api_client.api_version)\n sleep(30)\n else:\n return self.api_client.refresh_parcel_repos()", "def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )", "def refresh(self, delay, one_step=False):\n self.canvas.itemconfig(\"rect\", fill=self._primary_color)\n\n # complete a turn on a temp board\n temp_board = self.blank_board()\n for row in range(self._dim):\n for col in range(self._dim):\n temp_board[row][col] = self.live_or_die(row, col)\n\n # replace real board with new updated board\n self._board = temp_board\n\n\n # refresh UI\n self.model_refresh()\n\n\n if self._continue and not one_step:\n self.after(delay, lambda: self.refresh(delay))", "def update(self):\n for uid, server in self.servers_online.items():\n if len(server.jobs):\n self.populate_server(server)\n for uid, server in self.servers_online.items():\n if server.jobs:\n server.jobs[0].task_time -= time_interval\n server.waiting_time -= time_interval\n if server.jobs[0].task_time <= 0:\n completed_task = server.jobs.pop(0)\n print(f\"Task '{completed_task.description}' completed\")\n self.all_tasks.remove(completed_task)\n self.servers_jobs_list[uid].pop(0)\n for uid, server in self.all_servers.items():\n if server.status:\n print(f\"{server.server_name} has {len(set(server.jobs))} job(s)\")\n else:\n print(f\"{server.server_name} is offline\")", "def scan_all_projects():\n from projectscanner import ProjectScanner\n #ensure that new items get scanned by setting their last scan time to this.\n needs_scan_time = datetime.now() - timedelta(days=30)\n\n s = ProjectScanner()\n for projectinfo in s.scan_all():\n projectinfo.save_receipt(needs_scan_time)", "def do_pp(self, arg):\n self.do_projects(arg)", "def qck_gen_proj(self, master):\r\n if not self._check_project_name():\r\n return\r\n\r\n # Clear out driver list and board\r\n self.newProj.board = ()\r\n self.newProj.drvList = []\r\n\r\n # Configure ksdkProj given GUI state\r\n self.localSDK.get_version()\r\n self.newProj.name = self.widgetList[4].get()\r\n self.newProj.setKsdkPath(self.localSDK.path)\r\n self.newProj.sdkVer = self.localSDK.version\r\n self.newProj.useBSP = not self.localSDK.isNewVersion()\r\n\r\n # Add the board\r\n try:\r\n userBoard = int(self.widgetList[6].curselection()[0]) + 1\r\n self.newProj.add_board(userBoard, self.localSDK.brdList)\r\n except IndexError:\r\n tkMessageBox.showinfo(\"No board selected!\",\\\r\n \"Make sure a board has been selected.\")\r\n return\r\n\r\n self.widgetList[10].step(30)\r\n self.widgetList[10].update_idletasks()\r\n\r\n # Quick check to see if this poject already exists\r\n checkPath = self.newProj.sdkPath + '/' + self.newProj.parent.getDirectoryStructureHelper().getUserLinkedExamplesPath(self.newProj.board[1]) + '/' + self.newProj.name\r\n if os.path.isdir(checkPath):\r\n tkMessageBox.showinfo(\"Project exists\",\\\r\n \"A project by this name already exists.\")\r\n return\r\n\r\n # in quick mode there is always generated the board project\r\n self.newProj.isBoardProject = True\r\n \r\n # Add all drivers for this device\r\n self.localSDK.get_drivers()\r\n maskRet = kT.mask_features(kTool.KsdkTools(), self.newProj.sdkPath, self.newProj.sdkVer, \\\r\n self.localSDK.drvList, self.newProj.device[1], self.newProj.device[2])\r\n self.newProj.portCount = maskRet[0]\r\n self.newProj.dmaCount = maskRet[1]\r\n self.newProj.tsiVersion = maskRet[2]\r\n self.newProj.add_all_drv(self.localSDK.drvList)\r\n\r\n kT.debug_log('Port Count: ' + str(self.newProj.portCount))\r\n\r\n #Generate IAR project files\r\n #self.newProj.fast_build_IAR()\r\n self.newProj.workSpace = self.newProj.sdkPath + '/' + self.newProj.parent.getDirectoryStructureHelper().getUserLinkedExamplesPath(self.newProj.board[1]) + '/'\r\n projectPath = self.newProj.workSpace + self.newProj.name\r\n\r\n #Get all include paths lists into one list\r\n includeList = []\r\n index = 0\r\n isPresent = False\r\n while index < len(self.newProj.drvList):\r\n count = 0\r\n while count < len(self.newProj.drvList[index][2]):\r\n isPresent = False\r\n newPath = str(\\\r\n self.newProj.drvList[index][2][count]\\\r\n )\r\n if len(includeList) > 0:\r\n listIndex = 0\r\n while listIndex < len(includeList):\r\n if newPath == includeList[int(listIndex) - 1]:\r\n isPresent = True\r\n listIndex += 1\r\n if not isPresent:\r\n includeList.append(newPath)\r\n count += 1\r\n index += 1\r\n\r\n self.newProj.libList.append('platform')\r\n if not os.path.isdir(projectPath):\r\n os.makedirs(projectPath)\r\n self.newProj.rtos = 'bm'\r\n\r\n if not os.path.isfile(projectPath + '/main.c'):\r\n self.newProj.make_main_file(projectPath, includeList)\r\n if not os.path.isfile(projectPath + '/hardware_init.c'):\r\n self.newProj.make_hw_file(projectPath)\r\n\r\n self.widgetList[10].step(30)\r\n self.widgetList[10].update_idletasks()\r\n\r\n ## Copy over BSP files\r\n if self.newProj.useBSP:\r\n if not os.path.isdir(projectPath + '/board'):\r\n os.mkdir(projectPath + '/board')\r\n bspDir = self.newProj.sdkPath + '/examples/' + self.newProj.board[1]\r\n bspList = kT.list_files(bspDir)\r\n for f in bspList:\r\n if f[-2:] == '.c':\r\n shutil.copyfile(bspDir + '/' + f, projectPath + '/board/' + f)\r\n if f[-2:] == '.h':\r\n shutil.copyfile(bspDir + '/' + f, projectPath + '/board/' + f)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.IARname, self.newProj.device):\r\n print self.newProj.isLinked\r\n if self.localSDK.isNewVersion():\r\n newIar = kIarNew.KsdkIarNew(self.newProj)\r\n else:\r\n newIar = kIar.KsdkIar(self.newProj)\r\n newIar.gen_ewp(self.newProj)\r\n newIar.gen_eww(self.newProj)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.KeilMDK, self.newProj.device):\r\n #Generate MDK project files\r\n if self.localSDK.isNewVersion():\r\n newMdk = kMdkNew.KsdkMdkNew(self.newProj)\r\n else:\r\n newMdk = kMdk.KsdkMdk(self.newProj)\r\n newMdk.gen_proj(self.newProj)\r\n newMdk.gen_wkspace(self.newProj)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.KinetisDesignStudio, self.newProj.device):\r\n #Generate KDS project fiels\r\n print self.newProj.isLinked\r\n if self.localSDK.isNewVersion():\r\n newKds = kKdsNew.KsdkKdsNew(self.newProj)\r\n else:\r\n newKds = kKds.KsdkKds(self.newProj)\r\n\r\n newKds.gen_cproject(self.newProj)\r\n newKds.gen_project(self.newProj)\r\n newKds.gen_working_set(self.newProj)\r\n newKds.gen_debug(self.newProj)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.AtollicStudio, self.newProj.device):\r\n #Generate ATL project files\r\n if self.localSDK.isNewVersion():\r\n newAtl = kAtlNew.KsdkAtlNew(self.newProj)\r\n else:\r\n newAtl = kAtl.KsdkAtl(self.newProj)\r\n newAtl.gen_cproject(self.newProj)\r\n newAtl.gen_project(self.newProj)\r\n newAtl.gen_debug(self.newProj)\r\n newAtl.gen_settings(self.newProj)\r\n\r\n if self.localSDK.isToolchainTypeSupported(kSdk.ToolchainType.ARMgcc):\r\n #Generate GCC project files\r\n if not self.newProj.fast_build_GCC():\r\n tkMessageBox.showinfo(\"Missing CMake Files\",\\\r\n \"CMake files are missing from your KSDK installation.\")\r\n\r\n #Text for window\r\n genString = 'Your project was created in the following location:\\n'\r\n pathString = ''\r\n pathString += self.newProj.sdkPath + '/' + self.newProj.parent.getDirectoryStructureHelper().getUserLinkedExamplesPath( self.newProj.board[1]) + '/' + self.newProj.name + '/'\r\n genString += pathString\r\n genString += '\\nPress the button below to open project location folder.'\r\n\r\n #Create window to show USER that project has been generated and where it is.\r\n popGen = Toplevel()\r\n if self.newProj.osType == 'Windows':\r\n winH = 100 * WIN_SCALE\r\n winW = 600 * WIN_SCALE\r\n elif self.newProj.osType == 'Darwin':\r\n if platform.mac_ver()[0][:5] == '10.10':\r\n winH = 100\r\n winW = 600\r\n elif platform.mac_ver()[0][:5] == '10.11':\r\n winH = 100\r\n winW = 660\r\n else:\r\n winH = 100\r\n winW = 600\r\n popGen.config(height=winH, width=winW)\r\n popGen.grid()\r\n if self.newProj.osType == 'Linux':\r\n img = Image(\"photo\", data=kImg.boardImages['kds_icon.gif']) # Use the .gif in Linux\r\n popGen.tk.call('wm', 'iconphoto', popGen._w, img)\r\n popGen.title(\"Project created\")\r\n popGen.geometry('%dx%d+%d+%d' % (winW, winH, master.winfo_x() + 20, master.winfo_y() + 20))\r\n popGen.resizable(width=FALSE, height=FALSE)\r\n popGen.configure(background='#E7E7E7')\r\n\r\n genTxt = Label(popGen, text=genString, justify=LEFT)\r\n genTxt.grid(row=0, column=0, columnspan=2, padx=5, pady=5)\r\n\r\n #Create button to open project folder\r\n ## IF we are in windows, we need to replace all '/' with '\\\\'\r\n tempString = pathString[:]\r\n if self.newProj.osType == 'Windows':\r\n pathString = ''\r\n pathString = kT.string_replace(tempString, '/', '\\\\')\r\n\r\n genButton = Button(popGen, text='Open Project Folder', command=lambda: self.view_project(pathString, popGen))\r\n genButton.grid(row=2, column=0, sticky=W, padx=5, pady=5)\r\n\r\n self.widgetList[10].step(35)\r\n self.widgetList[10].update_idletasks()\r\n\r\n # patch to implement automation test\r\n self.pop_gen = popGen\r\n\r\n return", "def refresh(self):\n pass", "def refresh(self):\n pass", "def committees(self):\n print(\"Scheduling a refresh of committees\")\n if not self.background_scheduler.get_job('committees'):\n self.background_scheduler.add_job(Refresh.committees,\n 'cron',\n id='committees',\n name='committees',\n day='last fri')\n self._start()", "def threadComplete(self):\r\n self.flabel.config(text=\"Import Complete\")\r\n tk.Button(self.focus,text=\"Ok\",command=self.closePopup).pack()", "def refresh(self):\n\t\tself.win.refresh()\n\t\tfor c in self.components:\n\t\t\tc.refresh()", "def refresh(self):\n\t\tif self.id is None:\n\t\t\tprint(\"({cls}): self.id is None, can't refresh\".format(cls=self.__class__.__name__))\n\t\t\treturn\n\n\t\tif self.needs_project_id and self.project_id is None:\n\t\t\tprint(\"({cls}): self.project_id is None, can't refresh\".format(cls=self.__class__.__name__))\n\t\t\treturn\n\n\t\tif self.needs_project_id:\n\t\t\targs = [self.project_id, self.id]\n\t\telse:\n\t\t\targs = [self.id]\n\n\t\tres = getattr(self._client, \"get_\" + self.method)(*args, raw=True)\n\t\tself._create_fields(res)", "def update_page(self, waittime):\n if not self.runningtask.get():\n return\n if self.vars[\"enabled\"].get():\n logger.trace(\"Updating page\")\n self.display_item_set()\n self.load_display()\n self.after(waittime, lambda t=waittime: self.update_page(t))", "def interval_reported_4_current_view(self):\r\n # time_passed = self.current_view.time_spent_until_now()\r\n # self.log('interval_reported_4_current_view(): Time passed: ', time_passed)\r\n time_passed = self.current_view.time_spent_until_now()\r\n if time_passed >= 5.9:\r\n self.log('interval_reported_4_current_view(): Time passed: ', time_passed)\r\n self.root.after(0, lambda: self.current_view.done())", "def _refresh(self):\n self._need_display_update = True\n self._update()", "def time_remaining(self):\n elapsed_time = time.time() - self.start_time\n self.progressbar['value'] = progressbar.current\n time_remaining = round((1 - progressbar.current) * elapsed_time)\n # Disabled for Demo due to confusion\n # if time_remaining < 60:\n # self.progress_label.config(text=f'Estimated Time Remaining: {time_remaining} seconds')\n # elif 3600 > time_remaining > 60:\n # time_remaining = round(time_remaining / 60)\n # self.progress_label.config(text=f'Estimated TIme Remaining: {time_remaining} minutes')\n # elif time_remaining > 3600:\n # time_remaining = dt.timedelta(seconds=time_remaining)\n # self.progress_label.config(text=f'Estimated Time Remaining: {time_remaining}')", "def RefreshSlide(self):\n pass", "def switch_project(self, project_name, check=True):\n with self.app.page_base.dropdown_menu_project as menu:\n\n if menu.label_project.value == project_name:\n self.app.current_project = project_name\n return\n\n menu.click()\n menu.item_project().click()\n self.app.current_project = project_name\n\n if check:\n self.close_notification('success')\n assert_that(menu.label_project.value, equal_to(project_name))", "def refresh_jobs(self):\n jobs = self.connection.user_jobs()\n\n self.init_jobs()\n self.jobsTableWidget.setRowCount(len(jobs))\n row = 0\n for val in jobs:\n\n if \"id\" in val:\n qitem = QTableWidgetItem(val[\"id\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 0, qitem)\n\n if \"error\" in val:\n if val[\"error\"]:\n if \"message\" in val[\"error\"]:\n qitem = QTableWidgetItem(val[\"error\"][\"message\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n elif \"description\" in val:\n qitem = QTableWidgetItem(val[\"description\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n\n if \"submitted\" in val:\n qitem = QTableWidgetItem(val[\"submitted\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 2, qitem)\n\n execBtn = QPushButton(self.jobsTableWidget)\n execBtn.setText('Execute')\n\n if \"status\" in val:\n qitem = QTableWidgetItem(val[\"status\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 3, qitem)\n\n if val[\"status\"] == \"finished\":\n dispBtn = QPushButton(self.jobsTableWidget)\n dispBtn.setText('Display')\n self.jobsTableWidget.setCellWidget(row, 5, dispBtn)\n dispBtn.clicked.connect(lambda *args, row=row: self.job_display(row))\n\n self.jobsTableWidget.setCellWidget(row, 4, execBtn)\n execBtn.clicked.connect(lambda *args, row=row: self.job_execute(row))\n\n row += 1", "def projectClosed(self):\n for editor in self.editors:\n editor.projectClosed()\n \n self.__editProjectPwlAct.setEnabled(False)\n self.__editProjectPelAct.setEnabled(False)", "def track_portfolio(self, p):\n\n global st_refresh_thread\n\n if self.terminate:\n return\n\n p.refresh()\n\n self.lock.acquire()\n self.active_portfolio = p\n self.display_portfolio(p)\n self.lock.release()\n\n if not self.refresh_thread:\n thr_args = list()\n thr_args.append(self)\n self.refresh_thread = threading.Thread(target=st_refresh_thread,\n args=thr_args)\n self.refresh_thread.start()", "def open_project_activated(self):\n\n action = self.sender()\n\n # check if current observation\n if self.observationId:\n if dialog.MessageDialog(programName, \"Existe uma observação atual. O que você quer fazer?\",\n [\"Fechar observação\", \"Continuar observação\"]) == \"Fechar observação\":\n self.close_observation()\n else:\n return\n\n if self.projectChanged:\n response = dialog.MessageDialog(programName, \"O que fazer com o projeto atual?\",\n [SAVE, DISCARD, CANCEL])\n\n if response == SAVE:\n if self.save_project_activated() == \"not saved\":\n return\n\n if response == CANCEL:\n return\n\n if action.text() == \"Abrir projeto\":\n fn = QFileDialog(self).getOpenFileName(self, \"Open project\", \"\", \"Project files (*.eMOC);;All files (*)\")\n fileName = fn[0] if type(fn) is tuple else fn\n\n else: # recent project\n fileName = action.text()\n\n if fileName:\n project_path, project_changed, pj, msg = project_functions.open_project_json(fileName)\n\n if \"error\" in pj:\n logging.debug(pj[\"error\"])\n QMessageBox.critical(self, programName, pj[\"error\"])\n else:\n if msg:\n QMessageBox.information(self, programName, msg)\n\n self.load_project(project_path, project_changed, pj)", "def _page_projects(self):\n return self._open(self.app.page_projects)", "def __CB_ProjectChanged(self, files):\r\n \r\n for fn in files:\r\n if fn.endswith('settings.py') or fn.endswith('manage.py'):\r\n self.ScheduleUpdate()\r\n return", "def refresh(self):\n for i in self.data:\n values = self.data[i]\n try:\n if values[\"state\"] == \"Teardown\":\n t_delta = (values[\"t_end\"] or values[\n \"date\"]) - values[\"ts\"]\n else:\n t_delta = values[\"date\"] - values[\"ts\"]\n\n if t_delta.total_seconds() < 0:\n t_delta = values[\"ts\"] - values[\"ts\"]\n values[\"duration\"] = str(t_delta.total_seconds())\n except:\n print sys.exc_info()\n # print values\n values[\"duration\"] = 0", "def close(self):\n\n if not self.__projects:\n return\n\n Console.debug(\"Closing session...\")\n Console.indent()\n\n for project in self.__projects:\n project.close()\n\n self.__projects = None\n\n Console.outdent()", "def buildPage(self):\n Users = [(u['name']) for u in driver.nodes.match(\"User\")]\n Tissues = [(t['name']) for t in driver.nodes.match(\"Tissue\")]\n Diseases = [(d['name']) for d in driver.nodes.match(\"Disease\")]\n self.add_basic_layout()\n layout = [html.Div([\n html.Div([html.H4('Project information', style={'width': '15.5%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.H4('', id='update_project_id', style={'width': '15%', 'verticalAlign': 'top', 'display': 'none'}),\n html.Br(),\n html.Div(children=[html.Label('Project name:*', style={'marginTop': 15}),\n dcc.Input(id='project name', placeholder='Insert name...', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '100%'}),\n html.Br(),\n html.Div(children=[html.Label('Project Acronym:', style={'marginTop': 15}),\n dcc.Input(id='project acronym', placeholder='Insert name...', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '100%'}),\n html.Br(),\n html.Div(children=[html.Label('Project Responsible:*', style={'marginTop': 15})],\n style={'width': '49%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Project Participants:*', style={'marginTop': 15})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='responsible-picker', options=[{'label': i, 'value': i} for i in Users], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='participant-picker', options=[{'label': i, 'value': i} for i in Users], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Project Data Types:*', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Project Disease:*', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='data-types-picker', options=[{'label': i, 'value': i} for i in DataTypes], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='disease-picker', options=[{'label': i, 'value': i} for i in Diseases], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Project Tissue:*', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Project Intervention:', style={'marginTop': 10})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Dropdown(id='tissue-picker', options=[{'label': i, 'value': i} for i in Tissues], value=[], multi=True, searchable=True, style={'width': '100%'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Input(id='intervention-picker', placeholder='E.g. SNOMED identifier|SNOMED identifier|...', type='text', style={'width': '100%', 'height': '54px'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Number of subjects:*', style={'marginTop': 15})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[html.Label('Timepoints:', style={'marginTop': 15})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Input(id='number_subjects', placeholder='E.g. 77 (each unique patient counts as 1 subject)', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Div(children=[dcc.Input(id='number_timepoints', placeholder='E.g. 2 months|15 days|24 hours...', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '49%', 'marginLeft': '2%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Follows up project:', style={'marginTop': 15}),\n dcc.Input(id='related_to', placeholder='Use the Project Identifier (P000000X)', type='text', style={'width': '100%', 'height': '35px'})],\n style={'width': '49%', 'marginLeft': '0%', 'verticalAlign': 'top', 'display': 'inline-block'}),\n html.Br(),\n html.Br(),\n html.Div(children=[html.Label('Project Description:', style={'marginTop': 15}),\n dcc.Textarea(id='project description', placeholder='Enter description...', style={'width': '100%', 'height': '100px'})]),\n html.Br(),\n html.Div(children=[html.Label('Starting Date:', style={'marginTop': 10}),\n dcc.DatePickerSingle(id='date-picker-start', placeholder='Select date...', clearable=True)],\n style={'width': '30%', 'verticalAlign': 'top', 'marginTop': 10, 'display': 'inline-block'}),\n html.Div(children=[html.Label('Ending Date:', style={'marginTop': 10}),\n dcc.DatePickerSingle(id='date-picker-end', placeholder='Select date...', clearable=True)],\n style={'width': '30%', 'verticalAlign': 'top', 'marginTop': 10, 'display': 'inline-block'}),\n html.Div(children=html.Button('Create Project', id='project_button', n_clicks=0, className=\"button_link\",\n style={'fontSize': '25px'}), style={'width': '100%', 'padding-left': '87%', 'padding-right': '0%'}),\n html.Br(),\n html.Div(children=[html.A(children=html.Button('Download Clinical Data template', id='download_button', n_clicks=0,\n style={'fontSize': '16px', 'display': 'block'}),\n id='download_link', href='', n_clicks=0)], style={'width': '100%', 'padding-left': '87%', 'padding-right': '0%'}),\n html.Br(),\n html.Div(id='project-creation', style={'fontSize': '20px', 'marginLeft': '70%'}),\n html.Br()]),\n html.Hr()])]\n\n self.extend_layout(layout)", "def update_project_name(self, curr_proj, proj_new_name):\r\n for proj in self.__projects:\r\n if proj == curr_proj: # Find the project with the same current name\r\n proj.update_name(proj_new_name) # Update the project's name\r", "def time_tracking(self):\n fb = FreshBooks()\n tg = Toggl()\n self.print_splash()\n self.print(\"Tip: You can always enter 'skip' when you want to skip a time entry.\", format='warn')\n days = self.get_interactive_days() # number of days to go back\n self.print(\"OK, I'll run you through the Toggl time entries of the past %i day(s).\" % (days))\n timestamp = self.get_timestamp(days) # unix timestamp including tz\n time_entries = tg.get_time_entries(timestamp)\n if len(time_entries) == 0:\n self.print(\"No Toggl entries in this time span!\", 'warn')\n return False\n time_entries = self.merge_toggl_time_entries(time_entries) # merge Toggl entries\n fb_projects = fb.get_projects()\n # Loop through merged Toggl time entries:\n for entry in time_entries:\n # Get and convert all necessary info:\n client_id = tg.get_client_id(project_id=entry.get('pid'))\n client_name = tg.get_client_name(client_id)\n project = tg.get_project(entry.get('pid'))\n duration = int(entry['duration']) / 60 / 60 # convert duration to hours\n duration = round(duration * 4 ) / 4 # round hours to nearest .25\n description = self.format_description(project['name'], entry['description'])\n date = str(parser.parse(entry['start']).date())\n # Print info in a nice way:\n self.print_divider(30)\n self.print(\"Description: \" + description)\n self.print(\"Date: \" + date)\n self.print(\"Hours spent: \" + str(duration))\n # Skip if Toggl entry is already booked:\n if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:\n self.print(\"Skipping this entry because it is already in Freshbooks.\", 'cross')\n # Skip if duration is below 0.25:\n elif duration < 0.25:\n self.print(\"Skipping this entry because there are less than 0.25 hours spent.\", 'cross')\n # If billable, add to Freshbooks:\n elif entry['billable']:\n # Get FreshBooks project name through interactive search:\n try:\n self.print(\"Project: \\U0001F50D \")\n fb_project_name = self.interactive_search(fb_projects.keys(), client_name)\n # Handle KeyboardInterrupt\n except KeyboardInterrupt:\n answer = input(\"\\nKeyboardInterrupt! Skip current entry or quit time tracking? (S/q) \")\n if answer.lower() == 's' or answer == '':\n self.clear_lines(1)\n self.print(\"Skipping this entry.\", 'cross')\n continue\n else:\n self.clear_lines(1)\n self.print(\"Ok, stopping time tracking.\", 'cross')\n sys.exit()\n # If user requests so, skip this entry:\n self.clear_lines(1)\n if not fb_project_name:\n self.print(\"Skipping this entry.\", 'cross')\n continue\n # Otherwise, add entry to FreshBooks and tag Toggl entry/entries:\n self.print(\"Project: \" + fb_project_name)\n project_id = fb.get_project_id(fb_project_name)\n fb.add_entry(project_id, duration, description, date)\n tg.tag_projects(entry['merged_ids'], tg.BOOKED_TAG)\n # If not billable, skip entry:\n else:\n self.print(\"Skipping this entry because it is not billable.\", 'cross')\n self.print_divider(30)\n answer = input(\"All done! Open FreshBooks in browser to verify? (Y/n) \")\n if answer.lower() == 'y' or answer == '':\n webbrowser.open('https://%s.freshbooks.com/timesheet' % fb.fb_creds['subdomain'])", "def resync_domains_projects(self):\n pass", "def action_to_do(self) -> None:\n # 1\n order = ct.Controls.end_round()\n self.master.master.list_instances_menus_tournament = Menu.update_menus_tournament(order, self.master)\n self.master.master.left_window.update_and_display(self.master.master.list_instances_menus_tournament)\n self.master.master.launch()\n # 2\n self.master.destroy()", "def cb_gui_test_2( self, ):\r\n pass\r\n # TASK list is gone self.task_list.stop_auto( )\r", "def build_advanced(self, master):\r\n self.newProj.isBoardProject = self.advancedDevType.get() != 0\r\n self.widgetList[31].state([\"!disabled\"])\r\n\r\n if not self._check_project_name():\r\n return\r\n\r\n self.widgetList[31].step(1)\r\n self.widgetList[31].update_idletasks()\r\n clonedProjectRoot = ''\r\n if self.advancedProjType.get():\r\n ## Clone selected project\\\r\n cloneName = self.widgetList[36].get()\r\n\r\n # Add the board\r\n userBoard = self.localSDK.brdList.index(self.advBrdSelect.get()) + 1\r\n self.newProj.add_board(userBoard, self.localSDK.brdList)\r\n ##print self.newProj.board\r\n\r\n sourcePath = ''\r\n\r\n self.widgetList[31].step(1)\r\n self.widgetList[31].update_idletasks()\r\n\r\n if self.advIsStandalone.get():\r\n kT.debug_log('Standalone')\r\n self.newProj.isLinked = False\r\n ## Check if user updated project name\r\n checkName = self.widgetList[27].get()\r\n if checkName != self.newProj.workSpace:\r\n if kT.check_wksp_name(checkName):\r\n self.newProj.workSpace = checkName\r\n else:\r\n self.newProj.workSpace = None\r\n if self.prevWksp != checkName:\r\n tkMessageBox.showinfo(\"Invalid Output Path\",\\\r\n \"Permitted characters: 0-9a-zA-Z_ :\\\\/-.\")\r\n self.prevWksp = checkName\r\n kT.debug_log(\"Invalid name\")\r\n return False\r\n self.localSDK.get_drivers()\r\n maskRet = kT.mask_features(kTool.KsdkTools(), self.newProj.sdkPath, self.newProj.sdkVer, \\\r\n self.localSDK.drvList, self.newProj.device[1], self.newProj.device[2])\r\n self.newProj.portCount = maskRet[0]\r\n self.newProj.dmaCount = maskRet[1]\r\n self.newProj.tsiVersion = maskRet[2]\r\n self.newProj.add_all_drv(self.localSDK.drvList)\r\n\r\n self.widgetList[31].step(1)\r\n self.widgetList[31].update_idletasks()\r\n\r\n self.localSDK.get_hal()\r\n maskRet = kT.mask_features(kTool.KsdkTools(), self.newProj.sdkPath, self.newProj.sdkVer, \\\r\n self.localSDK.halList, self.newProj.device[1], self.newProj.device[2])\r\n self.newProj.portCount = maskRet[0]\r\n self.newProj.dmaCount = maskRet[1]\r\n self.newProj.tsiVersion = maskRet[2]\r\n self.newProj.add_all_hal(self.localSDK.halList)\r\n\r\n self.widgetList[31].step(1)\r\n self.widgetList[31].update_idletasks()\r\n\r\n sourcePath = self.newProj.workSpace\r\n else:\r\n sourcePath = self.newProj.sdkPath + '/' + self.newProj.parent.getDirectoryStructureHelper().getUserLinkedExamplesPath(self.newProj.board[1])\r\n \r\n self.newProj.workSpace = sourcePath\r\n cloneCheck = self.newProj.proj_clone(cloneName, self)\r\n if cloneCheck != True:\r\n if cloneCheck == False:\r\n tkMessageBox.showinfo(Texts.PROJECT_EXIST_HEADER, Texts.PROJECT_EXIST_TEXT)\r\n return\r\n else:\r\n tkMessageBox.showinfo(\"Error!\",\"An exception has occured, please restart the tool.\")\r\n sys.exit()\r\n return\r\n\r\n self.widgetList[31].step(5)\r\n self.widgetList[31].update_idletasks()\r\n\r\n kT.debug_log('Clone')\r\n else:\r\n ## Build a new project from scratch\r\n if len(self.newProj.toolChain) < 1:\r\n tkMessageBox.showinfo(\"No Toolchain Selected\",\\\r\n \"Select a toolchain to generate a project.\")\r\n return\r\n\r\n if self.newProj.rtos != 'bm':\r\n if self.newProj.rtos == 'mqx':\r\n if (self.newProj.device[1][1:] in MQX_DEVICES):\r\n pass\r\n else:\r\n tkMessageBox.showinfo(\"Invalid selection\",\\\r\n \"RTOS support not available at this time for \"+ self.newProj.board[3] + \".\")\r\n return\r\n else:\r\n #FIXME Radka add special detection based on RAM size (from manifest) for KSDK 2.0\r\n if not self.localSDK.isNewVersion():\r\n if self.newProj.device[1][1:] in RTOS_DEVICES:\r\n pass\r\n else:\r\n tkMessageBox.showinfo(\"Invalid selection\", \"RTOS support not available at this time for \"+ self.newProj.board[3] + \".\")\r\n return\r\n if self.isValidConfig.get():\r\n kT.debug_log('New')\r\n\r\n if not os.path.isdir(self.newProj.workSpace):\r\n os.mkdir(self.newProj.workSpace)\r\n\r\n # Generate main.c, main.h and hardware_init.c if needed\r\n sourcePath = self.newProj.workSpace + \\\r\n ('' if self.newProj.workSpace[-1:] == '/' else '/') + \\\r\n self.newProj.name\r\n\r\n if not os.path.isdir(sourcePath):\r\n os.mkdir(sourcePath)\r\n else:\r\n tkMessageBox.showinfo(\"Project already exists!\", \"Rename your project.\")\r\n return\r\n\r\n # Add all drivers for this device\r\n if self.newProj.libList[0] != 'hal':\r\n self.localSDK.get_drivers()\r\n maskRet = kT.mask_features(kTool.KsdkTools(), self.newProj.sdkPath, self.newProj.sdkVer, \\\r\n self.localSDK.drvList, self.newProj.device[1], self.newProj.device[2])\r\n self.newProj.portCount = maskRet[0]\r\n self.newProj.dmaCount = maskRet[1]\r\n self.newProj.tsiVersion = maskRet[2]\r\n self.newProj.add_all_drv(self.localSDK.drvList)\r\n else:\r\n self.localSDK.get_hal()\r\n maskRet = kT.mask_features(kTool.KsdkTools(), self.newProj.sdkPath, self.newProj.sdkVer, \\\r\n self.localSDK.halList, self.newProj.device[1], self.newProj.device[2])\r\n self.newProj.portCount = maskRet[0]\r\n self.newProj.dmaCount = maskRet[1]\r\n self.newProj.tsiVersion = maskRet[2]\r\n self.newProj.add_all_hal(self.localSDK.halList)\r\n ##print self.newProj.halList\r\n\r\n #Get all include paths lists into one list\r\n includeList = []\r\n projList = self.newProj.drvList if self.newProj.libList[0] != 'hal' else self.newProj.halList\r\n index = 0\r\n isPresent = False\r\n while index < len(projList):\r\n count = 0\r\n while count < len(projList[index][2]):\r\n isPresent = False\r\n newPath = str(\\\r\n projList[index][2][count]\\\r\n )\r\n if len(includeList) > 0:\r\n listIndex = 0\r\n while listIndex < len(includeList):\r\n if newPath == includeList[int(listIndex) - 1]:\r\n isPresent = True\r\n listIndex += 1\r\n if not isPresent:\r\n includeList.append(newPath)\r\n count += 1\r\n index += 1\r\n\r\n self.newProj.make_main_file(sourcePath, includeList)\r\n\r\n ## Copy over BSP files\r\n if self.newProj.useBSP:\r\n if not os.path.isdir(sourcePath + '/board'):\r\n os.mkdir(sourcePath + '/board')\r\n bspDir = self.newProj.sdkPath + '/examples/' + self.newProj.board[1]\r\n bspList = kT.list_files(bspDir)\r\n for f in bspList:\r\n if f[-2:] == '.c':\r\n shutil.copyfile(bspDir + '/' + f, sourcePath + '/board/' + f)\r\n if f[-2:] == '.h':\r\n shutil.copyfile(bspDir + '/' + f, sourcePath + '/board/' + f)\r\n self.newProj.make_hw_file(sourcePath)\r\n\r\n if self.newProj.isLinked == False:\r\n #print 'Standalone'\r\n if self.newProj.libList[0] != 'hal':\r\n self.localSDK.get_hal()\r\n maskRet = kT.mask_features(kTool.KsdkTools(), self.newProj.sdkPath, self.newProj.sdkVer, \\\r\n self.localSDK.halList, self.newProj.device[1], self.newProj.device[2])\r\n self.newProj.portCount = maskRet[0]\r\n self.newProj.dmaCount = maskRet[1]\r\n self.newProj.tsiVersion = maskRet[2]\r\n self.newProj.add_all_hal(self.localSDK.halList)\r\n self.newProj.copy_device_components(sourcePath, self)\r\n\r\n self.widgetList[31].step(5)\r\n self.widgetList[31].update_idletasks()\r\n\r\n index = 0\r\n\r\n while index < len(self.newProj.toolChain):\r\n if 'iar' in self.newProj.toolChain[index][1]:\r\n if self.localSDK.isNewVersion():\r\n newIar = kIarNew.KsdkIarNew(self.newProj)\r\n else:\r\n newIar = kIar.KsdkIar(self.newProj)\r\n newIar.gen_ewp(self.newProj)\r\n newIar.gen_eww(self.newProj)\r\n if 'kds' in self.newProj.toolChain[index][1]:\r\n if self.localSDK.isNewVersion():\r\n newKds = kKdsNew.KsdkKdsNew(self.newProj)\r\n else:\r\n newKds = kKds.KsdkKds(self.newProj)\r\n newKds.gen_cproject(self.newProj)\r\n newKds.gen_project(self.newProj)\r\n newKds.gen_working_set(self.newProj)\r\n newKds.gen_debug(self.newProj)\r\n if 'mdk' in self.newProj.toolChain[index][1]:\r\n if self.localSDK.isNewVersion():\r\n newMdk = kMdkNew.KsdkMdkNew(self.newProj)\r\n else:\r\n newMdk = kMdk.KsdkMdk(self.newProj)\r\n newMdk.gen_proj(self.newProj)\r\n newMdk.gen_wkspace(self.newProj)\r\n if 'atl' in self.newProj.toolChain[index][1]:\r\n if self.localSDK.isNewVersion():\r\n newAtl = kAtlNew.KsdkAtlNew(self.newProj)\r\n else:\r\n newAtl = kAtl.KsdkAtl(self.newProj)\r\n newAtl.gen_cproject(self.newProj)\r\n newAtl.gen_project(self.newProj)\r\n newAtl.gen_debug(self.newProj)\r\n newAtl.gen_settings(self.newProj)\r\n index += 1\r\n else:\r\n kT.debug_log('Not valid project.')\r\n return\r\n\r\n self.widgetList[31].step(5)\r\n self.widgetList[31].update_idletasks()\r\n\r\n #Create window to show USER that project has been generated and where it is.\r\n popGen = Toplevel()\r\n if self.newProj.osType == 'Windows':\r\n winH = 100 * WIN_SCALE\r\n winW = 600 * WIN_SCALE\r\n elif self.newProj.osType == 'Darwin':\r\n if platform.mac_ver()[0][:5] == '10.10':\r\n winH = 100\r\n winW = 600\r\n elif platform.mac_ver()[0][:5] == '10.11':\r\n winH = 100\r\n winW = 660\r\n else:\r\n winH = 100\r\n winW = 600\r\n popGen.config(height=winH, width=winW)\r\n popGen.grid()\r\n if self.newProj.osType == 'Linux':\r\n img = Image(\"photo\", data=kImg.boardImages['kds_icon.gif']) # Use the .gif in Linux\r\n popGen.tk.call('wm', 'iconphoto', popGen._w, img)\r\n popGen.title(\"Project created\")\r\n popGen.geometry('%dx%d+%d+%d' % (winW, winH, master.winfo_x() + 20, master.winfo_y() + 20))\r\n #FIXME Radka window about location of generated project is too small, its width should be based on its content \r\n popGen.resizable(width=TRUE, height=FALSE)\r\n popGen.configure(background='#E7E7E7')\r\n\r\n #Text for window\r\n genString = 'Your project was created in the following location:\\n'\r\n pathString = ''\r\n pathString += sourcePath if not clonedProjectRoot else clonedProjectRoot\r\n genString += pathString\r\n genString += '\\nPress the button below to open project location folder.'\r\n genTxt = Label(popGen, text=genString, justify=LEFT)\r\n genTxt.grid(row=0, column=0, columnspan=2, padx=5, pady=5)\r\n\r\n #Create button to open project folder\r\n ## IF we are in windows, we need to replace all '/' with '\\\\'\r\n tempString = pathString[:]\r\n pathString = ''\r\n if self.newProj.osType == 'Windows':\r\n pathString = kT.string_replace(tempString, '/', '\\\\')\r\n else:\r\n pathString = tempString[:]\r\n\r\n kT.debug_log('This is the path string: ' + pathString)\r\n\r\n genButton = Button(popGen, text='Open Project Folder', command=lambda: self.view_project(pathString, popGen))\r\n genButton.grid(row=2, column=0, sticky=W, padx=5, pady=5)\r\n\r\n self.newProj.clean_up()\r\n\r\n self.widgetList[31].step(5)\r\n self.widgetList[31].update_idletasks()\r\n\r\n # support automation test\r\n self.pop_gen = popGen\r\n\r\n return", "def update_timer(self):\n # Keep working\n if self.current_status == 0 and self.timer.time_left:\n time_left = seconds_to_minutes(self.timer.time_left)\n time_str = 'Pomodoro4linux - %02d:%02d' % (time_left)\n\n self.status_icon.set_tooltip(time_str)\n\n # Go get some coffee\n elif self.current_status == 0 and not self.timer.time_left:\n if self.break_count < self.timer.max_break_count:\n self.image.set_from_file(REST_ICON)\n self.break_count += 1\n self.warn_coffee_break()\n else:\n self.image.set_from_file(LONG_REST_ICON)\n self.break_count = 0\n self.warn_long_break()\n\n # Keep breaking\n elif self.current_status == 1 and self.timer.time_left:\n self._set_icon()\n time_left = seconds_to_minutes(self.timer.time_left)\n if self.break_count == 0:\n label_str = 'Long Break\\nRest for %02d:%02d minutes.' % \\\n (time_left)\n else:\n label_str = 'Coffee Break\\nRest for %02d:%02d minutes. (%d/%d)' % \\\n (time_left[0],time_left[1],self.break_count,self.timer.max_break_count)\n self.dialog.set_markup(label_str)\n\n # Come back to work, lazy boy\n elif self.current_status == 1 and not self.timer.time_left:\n label_str = 'You should be working now!'\n self.image.set_from_file(WORK_ICON)\n self.dialog.set_markup(label_str)\n self.pause_timer()\n self.current_status = 0\n self.timer.time_left = self.timer.work_time\n\n return True", "def __gitBisectReset(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitBisect(self.project.getProjectPath(), \"reset\") or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Bisect\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()" ]
[ "0.6487655", "0.6308911", "0.6178959", "0.6015678", "0.59574705", "0.59144545", "0.58194315", "0.5614993", "0.54217035", "0.53814393", "0.53416014", "0.5321008", "0.5291773", "0.5270808", "0.5252898", "0.5235397", "0.52225703", "0.51972866", "0.5182955", "0.5182299", "0.51771", "0.5168812", "0.51545185", "0.5148401", "0.5140571", "0.5108239", "0.51044935", "0.5087547", "0.5086424", "0.5068756", "0.50666976", "0.50494945", "0.5040012", "0.5039188", "0.5035745", "0.5026767", "0.5016265", "0.5010421", "0.5004731", "0.50032187", "0.49813756", "0.4978875", "0.49609083", "0.49541578", "0.49501407", "0.49336448", "0.4929752", "0.49213463", "0.49165127", "0.49164507", "0.49147862", "0.49062994", "0.49045965", "0.48942575", "0.4887006", "0.4879196", "0.48787585", "0.4868843", "0.4858207", "0.48580015", "0.4853032", "0.48322466", "0.48149708", "0.48018366", "0.47986573", "0.4795921", "0.47936454", "0.47929257", "0.47721967", "0.4766979", "0.47638413", "0.47583959", "0.47583959", "0.4754978", "0.47396642", "0.47390515", "0.47347957", "0.47317174", "0.47287518", "0.47220403", "0.47195715", "0.47057477", "0.4702338", "0.47013754", "0.4700254", "0.4695926", "0.46930856", "0.46905768", "0.46901423", "0.46874118", "0.4686407", "0.46816242", "0.46814293", "0.46761677", "0.46711975", "0.46684954", "0.46679938", "0.46652687", "0.46633577", "0.46593186" ]
0.56734455
7
Returns a request handler class that redirects to supplied `url`
def redirect_handler_factory(): class RedirectHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_GET(self): self.send_response(301) domain = self.headers['host'] if ':' in domain: domain = domain.split(':')[0] self.send_header('Location', "https://" + domain + self.path) self.end_headers() return RedirectHandler
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redirect_handler_factory(url):\n class RedirectHandler(http.server.SimpleHTTPRequestHandler):\n def do_GET(self):\n self.send_response(302)\n self.send_header('Location', url)\n self.end_headers()\n\n return RedirectHandler", "def redirect(url):", "def __call__(self, cls):\n handler = type('handler', (Handler,), dict(cls.__dict__))\n return handler(self._url_pattern)", "def redirect(self, url):\n raise RequestRedirect(url)", "def __init__(self, url, **kwargs):\n super(Redirect, self).__init__(**kwargs)\n self.value = url", "def urlfor( request, *args, **kwargs ):", "def _make_ssh_forward_handler_class(self, remote_address_):\n class Handler(_ForwardHandler):\n remote_address = remote_address_\n ssh_transport = self._transport\n logger = self.logger\n return Handler", "def _redirect(self, url):\n logger.debug('Redirecting to URL %s', url)\n segments = urllib.parse.urlparse(url)\n\n host = segments.netloc\n if host != self._host:\n self.new_connection(host)\n\n relurl = urllib.parse.urlunparse(('', '') + segments[2:])\n try:\n self._raw_get(relurl)\n except http.client.HTTPException as e:\n logger.debug('Got exception: %s.', e)\n raise DDGConnectionError(\"Failed to get '%s'.\" % url)", "def get_redirect_handler_for_site(site, request):\n\n view = queryMultiAdapter((site, request), name=\"redirect_handler\")\n if view:\n return view\n\n # Check if we have a redirect handler script in the site root\n if \"redirect_handler\" in site:\n return site[\"redirect_handler\"]\n\n return None", "def redirect(url, code=302):\n exc = status_map[code]\n raise exc(location=url).exception", "def get(url):\n url = add_slash(url)\n\n def _(func):\n re_url = re.compile(\"^%s$\" % url)\n REQUEST_MAPPINGS['GET'].append((re_url, url, func))\n return func\n return _", "def requestredirect(function):\n def wrapped(self, id, *args):\n try:\n server = self.db.requests.get_imaging_server(id)\n except exceptions.NotFound:\n raise web.notfound()\n if server != config.get('server', 'fqdn'):\n raise web.found(\"http://%s%s\" % (server, web.ctx.path))\n return function(self, id, *args)\n return wrapped", "def post(url):\n url = add_slash(url)\n\n def _(func):\n re_url = re.compile(\"^%s$\" % url)\n REQUEST_MAPPINGS['POST'].append((re_url, url, func))\n return func\n return _", "def __init__(self, url, proxy=None, **kwargs):\n self.proxy = proxy\n self.query_params = urllib.parse.urlencode(kwargs)\n self.url = url if not self.query_params else f\"{url}?{self.query_params}\"\n logger.info(\"UrllibHandler initialized: url=%s, proxy=%s\", self.url, self.proxy)", "def get_request_handler(self):\n if not hasattr(self, '_oauth_handler'):\n handler_class = self.get_handler_class()\n server = self.get_server()\n self._oauth_handler = handler_class(server)\n return self._oauth_handler", "def getRedirectedURL(url):\n try:\n cj = cookielib.CookieJar()\n cp = urllib2.HTTPCookieProcessor(cj)\n opener = urllib2.build_opener(cp)\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n url = url.replace('^https', 'http')\n request = urllib2.Request(url)\n u = opener.open(request)\n redirected_url = u.geturl()\n return redirected_url\n except urllib2.URLError:\n print \"URLError: %s\" % url\n return None\n except (urllib2.HTTPError, BadStatusLine, InvalidURL):\n print \"HTTPError: %s\" % (url)\n return None\n except (socket.timeout):\n print \"Timeout: %s\" % url\n return None\n except Exception as e:\n print \"Error: %s\" % url\n print e\n return None", "def urlopen(url):\n logging.info(\"urlopen %s\", url)\n \n try:\n return _urlopen(url)\n except ProxyError, e:\n logging.error(\"%s - %s\", str(e), url)\n response = ProxyHTTPResponse(url, None, method=\"GET\")\n response.error_bad_gateway()\n return response", "def handler(req):\n name = gethandlername(req.uri)\n if name == \"dispatcher\":\n raise404(\"Can't display the dispatcher\")\n handlerfunc = gethandlerfunc(name)\n return handlerfunc(req)", "def _find_url_handler(self, req):\n # First try - lookup in explicit (non parameterized URLs)\n if req.path in self.explicit_url_map:\n return self.explicit_url_map[req.path]\n # Second try - strip last path segment and lookup in another map\n idx = req.path.rfind(b'/') + 1\n path2 = req.path[:idx]\n if len(path2) > 0 and path2 in self.parameterized_url_map:\n # Save parameter into request\n req._param = req.path[idx:].decode()\n return self.parameterized_url_map[path2]\n\n if self.catch_all_handler:\n return self.catch_all_handler\n\n # No handler found\n return (None, None)", "def make_request(url):\n logger = logging.getLogger(\"http_request\")\n\n ff_agent = \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0)\" + \\\n \"Gecko/20100101 Firefox/35.0\"\n req = Request(url, headers={\"User-Agent\": ff_agent})\n\n logger.info(\"Sending request to %s\", url)\n try:\n response = urlopen(req)\n except URLError as e:\n if hasattr(e, \"reason\"):\n logger.error(\"Failed to reach a server. URL: %s Reason: %s\",\n (url, e.reason))\n elif hasattr(e, \"code\"):\n logger.error(\"The server couldn't fulfill the request. \" +\n \"URL: %s Error code: %s\", url, e.code)\n else:\n logger.error(\"Unknown URLError while making request. \" +\n \"URL: %s Error: %s\", url, str(e))\n return\n except Exception as e:\n logger.error(\"Unknown Exception while making request. \" +\n \"URL: %s Exception: %s\", url, str(e))\n return\n\n response_url = response.geturl()\n if response_url != url:\n logging.warn(\"Response url does not match requested url. \" +\n \"Request: %s Response %s\", url, response_url)\n\n try:\n page = response.read()\n except Exception as e:\n logger.error(\"Failed to read page from response. URL: %s Error: %s\",\n (url, str(e)))\n return\n\n logger.info(\"Successfully fetched page %s\", url)\n return page", "def redirect_request(self, req, fp, code, msg, headers, newurl):\n m = req.get_method()\n if (code in (301, 302, 303, 307) and m in (\"GET\", \"HEAD\")\n or code in (301, 302, 303) and m == \"POST\"):\n # Strictly (according to RFC 2616), 301 or 302 in response\n # to a POST MUST NOT cause a redirection without confirmation\n # from the user (of urllib2, in this case). In practice,\n # essentially all clients do redirect in this case, so we\n # do the same.\n # be conciliant with URIs containing a space\n newurl = newurl.replace(' ', '%20')\n newheaders = dict((k,v) for k,v in req.headers.items()\n if k.lower() not in (\"content-length\", \"content-type\")\n )\n return Request2(newurl,\n headers=newheaders,\n origin_req_host=req.get_origin_req_host(),\n unverifiable=True,\n method=\"GET\" if code == 303 else m)\n else:\n raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)", "def dispatcher(handlers):\n\n # Transforms wsgi_env, start_response args into request\n @wrappers.Request.application\n def dispatch(request):\n \"\"\"Handle one request.\"\"\"\n for url_re, app in handlers:\n matcher = re.match(url_re, request.path)\n if matcher and matcher.end() == len(request.path):\n if app is not None:\n # Send a response via the app specified in the handler.\n return app\n else:\n # The import must have failed. This will have been logged\n # at import time. Send a 500 error response.\n return response_for_error(httplib.INTERNAL_SERVER_ERROR)\n logging.error('No handler found for %s', request.path)\n return response_for_error(httplib.NOT_FOUND)\n\n return dispatch", "def __call__(self, environ, start_response):\n path_info = environ['PATH_INFO']\n for key, redirect in self.redirects.items():\n if self.match(key, path_info):\n environ['PATH_INFO'] = redirect\n return self(environ, start_response)\n else:\n path, cut, prefix = self.first_path_segment(path_info)\n root = path[:cut]\n rest = path[cut:]\n if root in self.routes:\n environ['PATH_INFO'] = rest\n #XXX shouldn't we += to SCRIPT_NAME?\n environ['SCRIPT_NAME'] = prefix + root\n app = self.routes[root]\n else:\n app = webob.exc.HTTPNotFound()\n return app(environ, start_response)", "def get_redirect(self, url):\n self._load_redirects()\n if url not in self.redirects:\n return url\n return self.redirects[url]", "def _request_handler_factory(custom_param):\n\n def factory(*args):\n return _RequestHandler(custom_param, *args)\n\n return factory", "def redirect(url, status=None):\n raise cherrypy.HTTPRedirect(url, status)", "def processUrl(self, url: str) -> dict:\n site = self.sf.urlFQDN(url)\n cookies = None\n\n # Filter out certain file types (if user chooses to)\n if list(filter(lambda ext: url.lower().split('?')[0].endswith('.' + ext.lower()), self.opts['filterfiles'])):\n # self.debug(f\"Ignoring URL with filtered file extension: {link}\")\n return None\n\n if site in self.siteCookies:\n self.debug(f\"Restoring cookies for {site}: {self.siteCookies[site]}\")\n cookies = self.siteCookies[site]\n\n # Fetch the contents of the supplied URL\n fetched = self.sf.fetchUrl(\n url,\n cookies=cookies,\n timeout=self.opts['_fetchtimeout'],\n useragent=self.opts['_useragent'],\n sizeLimit=10000000,\n verify=False\n )\n self.fetchedPages[url] = True\n\n if not fetched:\n return None\n\n # Track cookies a site has sent, then send the back in subsquent requests\n if self.opts['usecookies'] and fetched['headers'] is not None:\n if fetched['headers'].get('Set-Cookie'):\n self.siteCookies[site] = fetched['headers'].get('Set-Cookie')\n self.debug(f\"Saving cookies for {site}: {self.siteCookies[site]}\")\n\n if url not in self.urlEvents:\n # TODO: be more descriptive\n self.error(\"Something strange happened - shouldn't get here: url not in self.urlEvents\")\n self.urlEvents[url] = None\n\n # Notify modules about the content obtained\n self.contentNotify(url, fetched, self.urlEvents[url])\n\n real_url = fetched['realurl']\n if real_url and real_url != url:\n # self.debug(f\"Redirect of {url} to {real_url}\")\n # Store the content for the redirect so that it isn't fetched again\n self.fetchedPages[real_url] = True\n # Notify modules about the new link\n self.urlEvents[real_url] = self.linkNotify(real_url, self.urlEvents[url])\n url = real_url # override the URL if we had a redirect\n\n data = fetched['content']\n\n if not data:\n return None\n\n if isinstance(data, bytes):\n data = data.decode('utf-8', errors='replace')\n\n # Extract links from the content\n links = SpiderFootHelpers.extractLinksFromHtml(\n url,\n data,\n self.getTarget().getNames()\n )\n\n if not links:\n self.debug(f\"No links found at {url}\")\n return None\n\n # Notify modules about the links found\n # Aside from the first URL, this will be the first time a new\n # URL is spotted.\n for link in links:\n if not self.opts['reportduplicates']:\n if link in self.urlEvents:\n continue\n # Supply the SpiderFootEvent of the parent URL as the parent\n self.urlEvents[link] = self.linkNotify(link, self.urlEvents[url])\n\n self.debug(f\"Links found from parsing: {links.keys()}\")\n return links", "def json_redirect(url):\n return json_response(isRedirect=True, redirectTo=url)", "def redirect(url, code=None):\r\n if code is None:\r\n code = 303 if request.get('SERVER_PROTOCOL') == \"HTTP/1.1\" else 302\r\n location = urljoin(request.url, url)\r\n raise HTTPResponse(\"\", status=code, header=dict(Location=location))", "def follow_redirects(self, url):\n try:\n return requests.get(url).url\n except requests.RequestException:\n return None", "def setup_config_handler(url, access_key, secret_key, secure, debug):\n global config_handler\n if not config_handler:\n config_handler = ConfigHandler(url, access_key, secret_key, secure, debug)", "def redirect(self, url):\n # todo: use Flask's redirect support\n seen_urls = {url}\n from_url = url\n while True:\n to_url = self.get(from_url)\n if to_url is None:\n break\n if to_url in seen_urls:\n raise RedirectException('Saw redirect loop with key {0}'.format(url))\n from_url = to_url\n return from_url", "def _create_normal_request(self, url):\r\n request = self.factory.get(url)\r\n request.user = AnonymousUser()\r\n middleware = SessionMiddleware()\r\n middleware.process_request(request)\r\n request.session.save()\r\n MakoMiddleware().process_request(request)\r\n return request", "def handle_request(request, targetUrl):\n config = ucb_deployment_site.getConfig()\n connection = cspace.connection.create_connection(config, request.user)\n (targetUrl, data, statusCode) = connection.make_get_request(targetUrl)\n\n if statusCode == 200:\n result = HttpResponse(data, mimetype='application/xml')\n elif statusCode == 404:\n raise Http404\n elif statusCode == 401:\n logout(request)\n result = redirect(cspace.LOGIN_URL_REDIRECT % request.path)\n else:\n result = HttpResponse(\"HTTP request error: %d.\" % statusCode)\n\n return result", "def redirect_func(request, tiny_url):\n if tiny_url:\n try:\n url_obj = UrlMap.objects.get(short_url=tiny_url)\n return redirect(url_obj.original_url)\n except Exception as e:\n return render(request, 'shortifyUrl/index.html',\n {'some_data': 'Could not find matching URL in DB, Exception : {}'.format(e)})", "def register_url(url, handler, name=None, kwargs=None):\n if name is None and kwargs is None:\n app_config.urls.append((url, handler))\n return\n\n if name is None:\n app_config.urls.append((url, handler, kwargs))\n return\n\n app_config.urls.append((url, handler, kwargs, name))", "def compat_middleware_factory(klass):\n class compatwrapper(klass):\n def process_response(self, req, resp):\n if not hasattr(resp, 'streaming'):\n return klass.process_response(self, req, resp)\n return resp\n return compatwrapper", "def redirect_to():\n\n args_dict = request.args.items()\n args = CaseInsensitiveDict(args_dict)\n\n # We need to build the response manually and convert to UTF-8 to prevent\n # werkzeug from \"fixing\" the URL. This endpoint should set the Location\n # header to the exact string supplied.\n response = app.make_response(\"\")\n response.status_code = 302\n if \"status_code\" in args:\n status_code = int(args[\"status_code\"])\n if status_code >= 300 and status_code < 400:\n response.status_code = status_code\n response.headers[\"Location\"] = args[\"url\"].encode(\"utf-8\")\n\n return response", "def http_response(url, requests: HTTPRequests):\n loc = parse_url(url)\n result = Result(url=url)\n\n try:\n start = timeit.default_timer()\n\n result = _http_request(loc, requests)\n result.latency = '{:2.3}'.format(timeit.default_timer() - start)\n\n if 400 <= result.status < 500:\n return result\n\n # if response code is a HTTP redirect then follow it recursively\n if result.status in (301, 302, 303, 307, 308):\n # if URL in Location (or location) is a relative URL, ie starts\n # with a /, then reconstruct the new URL using the current one's\n # scheme and host\n if 'Location' in result.headers:\n new_url = result.headers.get('Location')\n elif 'location' in result.headers:\n new_url = result.headers.get('location')\n\n requests.redirect_count += 1\n\n if not new_url:\n result.desc = 'Redirect location not set'\n elif new_url == result.url:\n result.desc = 'URL redirecting to itself'\n elif requests.redirect_count == REDIRECT_LIMIT:\n result.desc = 'To many redirects'\n else:\n if new_url.startswith('/'):\n new_url = '{}://{}{}'.format(\n loc.scheme, loc.netloc, new_url)\n result.redirect = http_response(new_url, requests)\n\n if result.content and requests.parse_xml:\n requests.parse_xml = False\n sitemap = urls_from_xml(result.content)\n result.sitemap_urls = []\n for s_url in sitemap:\n # some sites include the sitemap's url in the sitemap\n if s_url == result.url:\n continue\n result.sitemap_urls.append(http_response(s_url, requests))\n\n except socket.gaierror:\n result.desc = 'Could not resolve'\n except (TimeoutError, socket.timeout):\n result.desc = 'Operation timed out'\n except http.client.RemoteDisconnected as exc:\n result.desc = str(exc)\n except http.client.InvalidURL:\n result.desc = 'Invalid URL'\n except (ConnectionRefusedError, ConnectionResetError) as exc:\n result.desc = exc.strerror\n except ssl.SSLCertVerificationError as exc:\n result.desc = exc.verify_message\n except ssl.SSLError:\n result.desc = 'SSL is misconfigured'\n return result", "def route(cls, url, method='GET'):\n def route_decorator(func):\n item = (url, method, func)\n cls._docoratedRouteHandlers.append(item)\n return func\n return route_decorator", "def redirect(uri):\n response = HttpResponse('', status=302)\n response['Location'] = uri\n return response", "def forward_to(id):\n\n db = init_connection_engine()\n\n if id == 'short_URL':\n return redirect(url_for('index'))\n else:\n # Looking up the URL by its ID in the DB.\n try:\n # Using a with statement ensures that the connection is always released\n # back into the pool at the end of statement (even if an error occurs).\n with db.connect() as conn:\n lookup_url = \"SELECT url_data FROM url_list WHERE url_id='\" + id + \"';\"\n target_url = conn.execute(lookup_url).fetchone()\n # If target URL is not found.\n if not target_url:\n flash('Not found')\n return redirect(url_for('index'))\n # If something goes wrong.\n except:\n flash('Something went wrong')\n return redirect(url_for('index'))\n\n return redirect(target_url[0])", "def launch_request_handler(handler_input):\n return launch_request(handler_input)", "def url(regex, view, kwargs=None, name=None, prefix='', decorators=None,\n middleware_classes=None):\n if not (decorators or middleware_classes):\n try:\n return urls.url(regex, view, kwargs, name)\n except TypeError: # Django<1.10\n return urls.url(regex, view, kwargs, name, prefix)\n r = _url(regex, view, kwargs, name, prefix)\n r.decorators = get_decorator_tuple(decorators, middleware_classes)\n return r", "def _urlopen(url):\n headers = config.get(\"extra_headers\",{})\n headers['User-Agent'] = config.user_agent\n\n type, host, selector = split_type_host(url)\n\n if type.lower() == \"https\":\n conn = ProxyHTTPSConnection(host, url=url)\n else:\n conn = ProxyHTTPConnection(host, url=url)\n\n conn.request(\"GET\", selector, headers=headers)\n return conn.getresponse()", "def redirect_to_url(request, short_url):\n try:\n url = Url.objects.get(short_url=short_url)\n except Url.DoesNotExist:\n raise Http404()\n else:\n return HttpResponseRedirect(url.url)", "def url_opener():\n from ssl import SSLError\n from socket import timeout\n\n try:\n from urllib.request import urlopen\n except ImportError:\n from urllib2 import urlopen # suppress(import-error)\n\n def _urlopen(*args, **kwargs):\n \"\"\"Open url, but set the timeout to 30 and retry a few times.\"\"\"\n kwargs[\"timeout\"] = kwargs.get(\"timeout\", None) or 30\n\n if kwargs.get(\"retrycount\"):\n retrycount = (kwargs[\"retrycount\"] + 1)\n del kwargs[\"retrycount\"]\n else:\n retrycount = 100\n\n errors = list()\n\n while retrycount != 0:\n try:\n return urlopen(*args, **kwargs)\n except (url_error(), SSLError, timeout) as error:\n errors.append(error)\n retrycount -= 1\n\n errors_string = \" \\n\".join([repr(e) for e in errors])\n raise url_error()(u\"\"\"Failed to open URL {0}, \"\"\"\n u\"\"\"exceeded max retries {1}. \"\"\"\n u\"\"\" Errors [{2}]\\n\"\"\".format(args[0],\n retrycount,\n errors_string))\n\n return _urlopen", "def POST(url, mime_type='text/html'):\n def_app = DefaultHttpRequestHandler()\n return def_app.RequestMap(url, methods=['POST'], produces=mime_type)", "def __init__(self, url_handler=None):\r\n\r\n self.url_handler = url_handler if url_handler else self._default_url_handler\r\n\r\n self.username_mapping = {}", "def handler(self):\n\t\treturn self.handle_request", "def __init__(self, url, redirectChain=[], serverEncoding=None,\n HTTPignore=[]):\n self.url = url\n self.serverEncoding = serverEncoding\n\n fake_ua_config = config.fake_user_agent_default.get(\n 'weblinkchecker', False)\n if fake_ua_config and isinstance(fake_ua_config, str):\n user_agent = fake_ua_config\n elif fake_ua_config:\n user_agent = comms.http.fake_user_agent()\n else:\n user_agent = comms.http.user_agent()\n self.header = {\n 'user-agent': user_agent,\n 'Accept': 'text/xml,application/xml,application/xhtml+xml,'\n 'text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',\n 'Accept-Language': 'de-de,de;q=0.8,en-us;q=0.5,en;q=0.3',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',\n 'Keep-Alive': '30',\n 'Connection': 'keep-alive',\n }\n self.redirectChain = redirectChain + [url]\n self.changeUrl(url)\n self.HTTPignore = HTTPignore", "def route_view(request, code):\n try:\n instance = get_object_or_404(ShortUrl, url_code=code)\n return redirect(instance.long_url, permanent=True)\n except Http404:\n return redirect('/', permanent=True)", "def handle(req):\n #Set destination host and port\n destination_port = 443\n destination_host = req #TODO:\n out = initf(destination_host, destination_port)\n\n return out", "def mapping(mapping_path):\n def wrapper(target):\n if issubclass(target, tornado.web.RequestHandler):\n if mapping_path in page_dict:\n raise KeyError(\"KeyError: page '%s' is already registered.\" % mapping_path)\n setattr(target, '__url__', mapping_path)\n return page_dict.setdefault(mapping_path, target)\n elif issubclass(target, tornado.web.UIModule):\n if mapping_path in module_dict:\n raise KeyError(\"KeyError: module '%s' is already registered.\" % mapping_path)\n setattr(target, '__url__', mapping_path)\n return module_dict.setdefault(mapping_path, target)\n else:\n raise TypeError(\"TypeError: unknown type '%s' registered.\" % repr(target))\n return wrapper", "def __init__(self, redirect_url, headers=None, content=None):\n super().__init__(redirect_url, headers=headers, content=content)", "def __init__(self, redirect_url, headers=None, content=None):\n super().__init__(redirect_url, headers=headers, content=content)\n self.headers.append((\"Location\", redirect_url))", "def GET(url, mime_type='text/html'):\n def_app = DefaultHttpRequestHandler()\n return def_app.RequestMap(url, methods=['GET'], produces=mime_type)", "def init_from_url(cls, url):\n init_kwargs = cls._validate_init_kwargs(url)\n return cls(**init_kwargs)", "def __init__(self, url):\n self.url = url\n self.admin_url = os.path.join(url, \"__admin\")\n self.admin_mapping_url = os.path.join(self.admin_url, \"mappings\")\n self.mapping_reset_url = os.path.join(self.admin_mapping_url, 'reset')\n self.requests_url = \"%s/requests\" % self.admin_url", "def add_route(self, url, f, **kwargs):\n if url == '' or '?' in url:\n raise ValueError('Invalid URL')\n # Initial params for route\n params = {'methods': ['GET'],\n 'save_headers': [],\n 'max_body_size': 1024,\n 'allowed_access_control_headers': '*',\n 'allowed_access_control_origins': '*',\n }\n params.update(kwargs)\n params['allowed_access_control_methods'] = ', '.join(params['methods'])\n # Convert methods/headers to bytestring\n params['methods'] = [x.encode() for x in params['methods']]\n params['save_headers'] = [x.encode() for x in params['save_headers']]\n # If URL has a parameter\n if url.endswith('>'):\n idx = url.rfind('<')\n path = url[:idx]\n idx += 1\n param = url[idx:-1]\n if path.encode() in self.parameterized_url_map:\n raise ValueError('URL exists')\n params['_param_name'] = param\n self.parameterized_url_map[path.encode()] = (f, params)\n\n if url.encode() in self.explicit_url_map:\n raise ValueError('URL exists')\n self.explicit_url_map[url.encode()] = (f, params)", "def request(self, **request):\r\n environ = {\r\n 'HTTP_COOKIE': self.cookies,\r\n 'PATH_INFO': '/',\r\n 'QUERY_STRING': '',\r\n 'REQUEST_METHOD': 'GET',\r\n 'SCRIPT_NAME': '',\r\n 'SERVER_NAME': 'testserver',\r\n 'SERVER_PORT': 80,\r\n 'SERVER_PROTOCOL': 'HTTP/1.1',\r\n }\r\n environ.update(self.defaults)\r\n environ.update(request)\r\n request = WSGIRequest(environ)\r\n\r\n handler = BaseHandler()\r\n handler.load_middleware()\r\n for middleware_method in handler._request_middleware:\r\n if middleware_method(request):\r\n raise Exception(\"Couldn't create request object - \"\r\n \"request middleware returned a response\")\r\n\r\n return request", "def friendly_uri(environ, start_response):\n _setup_friendly_environ(environ)\n return get_tiddler(environ, start_response)", "def redirect(cls, location, status_code=302):\n if '\\x0d' in location or '\\x0a' in location:\n raise ValueError('invalid redirect URL')\n return cls(status_code=status_code, headers={'Location': location})", "def __call__(self, *args, **kwargs):\n\t\treturn self.handler()(self.request(kwargs))", "def _redirect_implementation(request, model, b36_encoded_pk):\n endpoint = get_object_or_404(model, pk=base36_to_int(b36_encoded_pk))\n shorturl_redirect.send(sender=model, instance=endpoint, user=request.user)\n return endpoint.url", "def _create_ssl_request(self, url):\r\n request = self.factory.get(url)\r\n request.META['SSL_CLIENT_S_DN'] = self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL)\r\n request.user = AnonymousUser()\r\n middleware = SessionMiddleware()\r\n middleware.process_request(request)\r\n request.session.save()\r\n MakoMiddleware().process_request(request)\r\n return request", "def _request(self, url: str) -> http.client.HTTPResponse:\n self.request = urllib.request.Request(\n url,\n headers={'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X)'})\n try:\n return urllib.request.urlopen(self.request, timeout=10)\n except Exception as e:\n # print(e)\n # exit(-1)\n print(e, url)\n raise e", "def make_new_handler(self, *args, **kwargs):", "def parse(cls, url, *, map_instance=True):\n lgr.debug(\"Parsing url %s\", url)\n\n # Loop through known url regexes and stop as soon as one is matching\n match = None\n for regex, settings in cls.known_urls.items():\n match = re.match(regex, url)\n if not match:\n continue\n handle_redirect = settings.get(\"handle_redirect\", False)\n if handle_redirect:\n assert handle_redirect in (\"pass\", \"only\")\n new_url = cls.follow_redirect(url)\n if new_url != url:\n return cls.parse(new_url)\n if handle_redirect == \"pass\":\n # We used to issue warning in such cases, but may be it got implemented\n # now via reverse proxy and we had added a new regex? let's just\n # continue with a debug msg\n lgr.debug(\"Redirection did not happen for %s\", url)\n else:\n raise RuntimeError(\n f\"{url} did not redirect to another location which dandi client would\"\n f\" know how to handle.\"\n )\n elif settings.get(\"map_instance\"):\n if map_instance:\n server, *_ = cls.parse(url, map_instance=False)\n if settings[\"map_instance\"] not in known_instances:\n raise ValueError(\n \"Unknown instance {}. Known are: {}\".format(\n settings[\"map_instance\"], \", \".join(known_instances)\n )\n )\n return (known_instances[settings[\"map_instance\"]].girder,) + tuple(\n _\n )\n continue # in this run we ignore an match further\n else:\n break\n\n if not match:\n known_regexes = \"\\n - \".join([\"\"] + list(cls.known_urls))\n # TODO: may be make use of etelemetry and report if newer client\n # which might know is available?\n raise UnknownURLError(\n f\"We do not know how to map URL {url} to girder.\\n\"\n f\"Regular expressions for known setups:\"\n f\"{known_regexes}\"\n )\n\n groups = match.groupdict()\n girder_server = cls.map_to_girder.get(\n groups[\"server\"].rstrip(\"/\"), groups[\"server\"]\n )\n if not girder_server.endswith(\"/\"):\n girder_server += \"/\" # we expected '/' to be there so let it be\n\n if \"multiitem\" not in groups:\n # we must be all set\n asset_ids = [groups[\"id\"]]\n asset_type = groups[\"asset_type\"]\n asset_type = cls.map_asset_types.get(asset_type, asset_type)\n else:\n # we need to split/parse them and return a list\n asset_ids = [i.split(\"+\")[1] for i in groups[\"multiitem\"].split(\"/\") if i]\n asset_type = \"item\"\n ret = girder_server, asset_type, asset_ids\n lgr.debug(\"Parsed into %s\", ret)\n return ret", "def _get_target():\n from webdispatch.mixins import URLMapperMixin\n return URLMapperMixin", "def get_page(self, url, *args, **kwds):\n contextFactory = None\n scheme, host, port, path = parse(url)\n data = kwds.get('postdata', None)\n self._method = method = kwds.get('method', 'GET')\n self.request_headers = self._headers(kwds.get('headers', {}))\n if (self.body_producer is None) and (data is not None):\n self.body_producer = FileBodyProducer(StringIO(data))\n if scheme == \"https\":\n proxy_endpoint = os.environ.get(\"https_proxy\")\n if proxy_endpoint:\n proxy_url = urlparse.urlparse(proxy_endpoint)\n endpoint = TCP4ClientEndpoint(self.reactor, proxy_url.hostname, proxy_url.port)\n agent = ProxyAgent(endpoint)\n else:\n if self.endpoint.ssl_hostname_verification:\n contextFactory = WebVerifyingContextFactory(host)\n else:\n contextFactory = WebClientContextFactory()\n agent = Agent(self.reactor, contextFactory)\n self.client.url = url\n d = agent.request(method, url, self.request_headers,\n self.body_producer)\n else:\n proxy_endpoint = os.environ.get(\"http_proxy\")\n if proxy_endpoint:\n proxy_url = urlparse.urlparse(proxy_endpoint)\n endpoint = TCP4ClientEndpoint(self.reactor, proxy_url.hostname, proxy_url.port)\n agent = ProxyAgent(endpoint)\n else:\n agent = Agent(self.reactor)\n d = agent.request(method, url, self.request_headers,\n self.body_producer)\n d.addCallback(self._handle_response)\n return d", "def crawl_redirect_page(self, target_url):\n target_redirect_url = \"\"\n\n try:\n response = requests.get(url=target_url)\n response.encoding = \"utf-8\"\n html = etree.HTML(response.text)\n script_str = html.xpath(\"/html/body/script[@type='application/ld+json']/text()\")[0]\n\n entity_json = json.loads(script_str)\n\n if \"url\" in entity_json:\n url = entity_json[\"url\"]\n target_redirect_url = url.replace(\"\\\\\", \"\")\n\n except:\n print(\"crawl redirected error\")\n\n # print(target_url, own_target_url)\n\n return target_redirect_url", "def _route(self, request, url):\n operationIndex = url[:-1].rfind('/')\n processorPath = url[:operationIndex]\n processor = self.api_processor_map.get(processorPath.lower()) \n operation = url[operationIndex+1:].rstrip('/').lower()\n \n http_methods, is_admin, is_cron = self.api_const.get_api_operation_perms(operation)\n \n if is_cron and users.get_current_user() is not None and not users.is_current_user_admin() :\n raise self.api_error.ApiError(self.api_error.API_ERROR_ADMIN_OPERATION, operation)\n \n if is_admin and not is_cron and not users.is_current_user_admin():\n raise self.api_error.ApiError(self.api_error.API_ERROR_ADMIN_OPERATION, operation)\n \n if request.method not in http_methods:\n raise self.api_error.ApiError(self.api_error.API_ERROR_INVALID_HTTP_METHOD, request.method, operation)\n \n if is_cron :\n context.get_context().set_login_required(False)\n \n return self._process(request, processor(), operation)", "def get_url_with_redirect(url, redirect_url):\n if redirect_url:\n url = url + '?' + urlencode({settings.REDIRECT_FIELD_NAME: redirect_url})\n\n return url", "def redirected_to_url(url):\r\n def was_redirected(client, response, testcase):\r\n status(302)(client, response, testcase)\r\n testcase.assertEqual(\r\n response['Location'],\r\n url\r\n )\r\n return was_redirected", "def redirect(self, route):\n\n self.redirect_url = route\n return self", "def getPage(url, contextFactory=None, *args, **kwargs):\n scheme, host, port, path = client._parse(url)\n factory = client.HTTPClientFactory(url, *args, **kwargs)\n if scheme == 'https':\n if contextFactory is None:\n raise RuntimeError, 'must provide a contextFactory'\n conn = reactor.connectSSL(host, port, factory, contextFactory)\n else:\n conn = reactor.connectTCP(host, port, factory)\n\n return factory", "def find_handler(url):\n for handler in __all__:\n # Get the symbol for handler\n mod = globals()[handler]\n # Ask handler if it can handle the url\n if getattr(mod, \"can_handle\")(url):\n return mod\n return None", "def get_request_handler(self, headers):\n api_version = self.get_api_version(headers)\n\n if api_version == '1.0.0':\n request_handler = V1_0_0_HelperHandler()\n else:\n request_handler = V1_0_0_HelperHandler()\n\n return request_handler", "def redirect(self, url):\n self.setResponseCode(responsecode.FOUND)\n self.setHeader(\"location\", url)", "def my_url(url):\n if USE_HTTPS:\n return url_for(url, _scheme=\"https\", _external=True)\n else:\n return url_for(url)", "def captive(f):\n\n def wrapper(self, request, *args, **kwargs):\n return captiveHandler(request) or f(self, request, *args, **kwargs)\n functools.update_wrapper(wrapper, f)\n return wrapper", "def get_redirect_url(self, *args, **kwargs):\n redirect = kwargs['route']\n self.permanent = redirect.permanent\n return redirect.target.url", "def redirectPage() -> Response:\n # pass in the function name\n return redirect(url_for('view.loadMainPage'))", "def set_handler(cls, hnd, route):\n cls.hnd = hnd\n cls.request = hnd.request\n cls.response = hnd.response\n cls.params = route\n if cls.context:\n del cls.context\n cls.context = Context()", "def get_full_url(request_handler, path):\n pr = urlparse(request_handler.request.url)\n return '%s://%s%s' % (pr.scheme, pr.netloc, path)", "def wrap_route(self, cbl: typing.Callable, *args, **kwargs) -> Route:\n rtt = Route(cbl, *args, **kwargs)\n return rtt", "def handler_url(self, block, handler_name, suffix='', query='', thirdparty=False):\n raise NotImplementedError(\"Runtime needs to provide handler_url()\")", "def _make_opener(self, ref_url):\n cookiejar = cookielib.CookieJar()\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))\n if ref_url:\n opener.addheaders.append(('Referer', ref_url))\n return opener, cookiejar", "def get_request_handler(self, headers):\n api_version = self.get_api_version(headers)\n\n if api_version == '1.0.0':\n request_handler = V1_0_0_UserHandler()\n else:\n request_handler = V1_0_0_UserHandler()\n\n return request_handler", "def get_or_create_url(url, session=session, model=URL):\n\n instance = session.query(model).filter_by(**{'text': url}).first()\n if instance:\n return instance\n else:\n instance = create_url(url)\n return instance", "def _get(self, url):\n return self._request(url)", "def create_request(url, headers, attempts, request_type, data=None):\n request_func = getattr(requests, request_type)\n kwargs = {\"url\": url, \"headers\": headers}\n if request_type == \"post\" or request_type == \"patch\":\n kwargs[\"json\"] = data\n try:\n req = request_func(**kwargs)\n status_code = req.status_code\n time.sleep(1)\n while status_code >= 400 and attempts < 5:\n req = request_func(**kwargs)\n status_code = req.status_code\n attempts += 1\n time.sleep(1)\n return req\n except Exception as e:\n print(\"[ERROR] There was an error with the request, details:\")\n print(e)\n return None", "def redirects_to(response, url):\n is_redirect = response.status_code == 302\n parsed_url = urlparse(response.get('Location'))\n is_url = parsed_url.path == url\n\n return is_redirect and is_url", "def _handle_request(self, method, url, handler):\n if not(method in self.handlers):\n handler.set_status(405) # Method Not Allowed\n handler.write({})\n return\n for (path, fn) in self.handlers[method].items():\n if re.match(path, url):\n fn(url, handler)\n return\n handler.set_status(404) # Not Found\n handler.write({})", "def __init__(self, url):\n self._headers = {}\n \n parsed_url = urlparse.urlparse(url)\n if parsed_url.scheme and not parsed_url.netloc:\n # If we have a scheme but no netloc, someone's entered\n # a URL like 'foo.com:123'. Add an http://to the\n # start, and reparse.\n url = 'http://' + url\n parsed_url = urlparse.urlparse(url)\n \n if not parsed_url.scheme:\n # If no scheme was provided, then the url parsing\n # won't have worked. Reparse.\n scheme = 'http'\n url = '%s://%s' % (scheme, url)\n parsed_url = urlparse.urlparse(url)\n else:\n scheme = parsed_url.scheme\n\n if parsed_url.netloc.find(':') < 0:\n if scheme == 'http':\n netloc = parsed_url.netloc + ':80'\n else:\n netloc = parsed_url.netloc + ':443'\n else:\n # Already had an explicit port\n netloc = parsed_url.netloc\n \n # Normalise\n self.url = urlparse.urlunparse((scheme, netloc, parsed_url.path,\n parsed_url.params, parsed_url.query, parsed_url.fragment))\n self.parsed_url = urlparse.urlparse(self.url)", "def route(self, rule, **options):\n def decorator(f):\n self.add_url_rule(rule, f, **options)\n return f\n return decorator", "def wrap(self, handler):\n\t\tdef wrapper(request:Request)->Response:\n\t\t\tself.__store.clear()\n\t\t\treturn handler(request)\n\t\treturn wrapper", "def GetHandlerForHttpRequest(request):\n\n matcher = http_routing.HTTP_ROUTING_MAP.bind(\n \"%s:%s\" % (request.environ[\"SERVER_NAME\"],\n request.environ[\"SERVER_PORT\"]))\n try:\n match = matcher.match(request.path, request.method)\n except werkzeug_exceptions.NotFound:\n raise api_call_handlers.ApiCallHandlerNotFoundError(\n \"No API handler was found for (%s) %s\" % (request.path,\n request.method))\n\n handler_cls, route_args = match\n return (handler_cls(), route_args)", "def class_based_view(class_obj):\n def _instantiate_view_class(request, *args, **kwargs):\n return class_obj()(request, *args, **kwargs)\n return _instantiate_view_class" ]
[ "0.8006581", "0.63602716", "0.60251427", "0.5762764", "0.562808", "0.5593964", "0.5454222", "0.5417794", "0.54076666", "0.5355651", "0.5345712", "0.53292465", "0.53158814", "0.53054017", "0.526144", "0.5258285", "0.52532136", "0.5251986", "0.52408147", "0.5233149", "0.5226429", "0.51969796", "0.5193553", "0.51904196", "0.51902217", "0.5186876", "0.5181511", "0.51453596", "0.5138872", "0.5126173", "0.51110524", "0.5109886", "0.51008636", "0.5097166", "0.50944185", "0.50732344", "0.5055551", "0.50194055", "0.5014667", "0.49928212", "0.49673447", "0.4966501", "0.49654728", "0.49479133", "0.49337384", "0.49240083", "0.4915506", "0.49050832", "0.4904702", "0.4902076", "0.48963276", "0.48888507", "0.48776776", "0.4876311", "0.48738763", "0.48631248", "0.48561084", "0.48560393", "0.48508847", "0.48452365", "0.4836857", "0.48311633", "0.4824454", "0.48236835", "0.48234525", "0.48160172", "0.48010242", "0.48001933", "0.47915852", "0.4787122", "0.47803035", "0.47759563", "0.47751826", "0.4763718", "0.47609082", "0.47511142", "0.47495306", "0.47339886", "0.47281322", "0.4725803", "0.4721819", "0.47157162", "0.47144842", "0.47118258", "0.47085834", "0.470483", "0.47002575", "0.46772447", "0.46763498", "0.46665293", "0.4650692", "0.46482733", "0.4648013", "0.46459174", "0.4643291", "0.46394214", "0.46393722", "0.463638", "0.4633263", "0.46282336" ]
0.6883462
1
loop and copy serial>console
def reader(self): try: line = '' while self.alive: data = self.serial.read(1) if data == '\r': continue line += data if data == '\n': self.log.print_distant(datetime.now().strftime( "%d/%m/%Y %H:%M:%S> ")) if line.startswith('ALARM:'): self.log.alert(line) elif line.startswith('EVENT:') or line.startswith('INFO'): self.log.warn(line) else: self.log.print_distant(line) self.parse(line.strip()) line = '' sys.stdout.flush() except serial.SerialException: self.alive = False # would be nice if the console reader could be interruptted at this # point... raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_terminal(self):\n if (self.is_connected):\n self.mySerialConnection.do_serial()", "def programflow(serial_port):\n print (\"\")\n print (\"Program flow: \")\n print (\" 1. Connect to the Arduino on port: \" + serial_port)\n print (\" 2. Start polling for commands\")\n print (\" 3. Start writing incoming data to the terminal\")\n print (\" repeat forever... \")\n print (\"\")", "def debug(self) -> None:\n \n # print(bytes([3]))\n # self.ser.write(bytes([x for x in range(256)]))\n \n for byte in [[x] for x in range(256)]:\n print(\"Loading -> {}\".format(byte))\n self.ser.write(bytes(byte))\n time.sleep(0.1)", "def console():\n start_console()", "def run_interactive_shell(serial_port_name = None):\r\n\t\r\n\tprint (\"- \"*14)\r\n\tprint (\" Qontrol Interactive Shell\")\r\n\tprint (\"- \"*14+\"\\n\")\r\n\t\r\n\tbaudrate = 115200\r\n\t\r\n\tdef tty_supports_color():\r\n\t\t\"\"\"\r\n\t\tReturns True if the running system's terminal supports color, and False\r\n\t\totherwise. From django.core.management.color.supports_color.\r\n\t\t\"\"\"\r\n\t\t\r\n\t\tplat = sys.platform\r\n\r\n\t\tif plat == \"win32\":\r\n\t\t\treturn False\r\n\t\telse:\r\n\t\t\tsupported_platform = plat != 'Pocket PC' and (plat != 'win32' or\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t 'ANSICON' in os.environ)\r\n\t\t# isatty is not always implemented, #6223.\r\n\t\t\tis_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\r\n\t\t\treturn supported_platform and is_a_tty\r\n\t\t\t\r\n\r\n\tif tty_supports_color():\r\n\t\tnormal_text = \"\\033[0m\"\r\n\t\tin_text = \"\\033[33;1m\"\r\n\t\tout_text = \"\\033[36;1m\"\r\n\t\temph_text = \"\\033[97;1m\"\r\n\telse:\r\n\t\tnormal_text = \"\"\r\n\t\tin_text = \"\"\r\n\t\tout_text = \"\"\r\n\t\temph_text = \"\"\r\n\t\r\n\t# List available serial ports\r\n\t# Separate ports that are probably Qontrol devices from those that are probably not\r\n\tports_of_interest = list(list_ports.grep('.*usbserial-FT[A-Z0-9].*'))\r\n\tports_other = [port for port in list(list_ports.grep('.*')) \r\n\t\t\t\t\t\t\t\t\tif port not in ports_of_interest]\r\n\tports = ports_of_interest + ports_other\r\n\tn_ports = len(ports)\r\n\tprint (\"Available ports:\")\r\n\ti = 0\r\n\tfor port in ports_of_interest:\r\n\t\tprint (\" {:}#{:2} - {:15}{:}\".format(emph_text, i, str(port), normal_text))\r\n\t\ti += 1\r\n\tfor port in ports_other:\r\n\t\tprint (\" #{:2} - {:15}\".format(i, str(port)))\r\n\t\ti += 1\r\n\t\r\n\t# Ask user which port to target\r\n\tif serial_port_name is None:\r\n\t\tfor i in range(3):\r\n\t\t\trequested_port_str = input(\"\\nWhich would you like to communicate with? #\")\r\n\t\t\ttry:\r\n\t\t\t\trequested_port_index = int(requested_port_str)\r\n\t\t\t\tif requested_port_index > n_ports:\r\n\t\t\t\t\traise RuntimeError()\r\n\t\t\t\tbreak\r\n\t\t\texcept:\r\n\t\t\t\tprint (\"Port index '{:}' not recognised.\".format(requested_port_str))\r\n\t\t\r\n\t\tfor i,port in enumerate(ports):\r\n\t\t\tif i == requested_port_index:\r\n\t\t\t\tbreak\r\n\telse:\r\n\t\tfor port in ports:\r\n\t\t\tif port.device == serial_port_name:\r\n\t\t\t\tbreak\r\n\t\r\n\t\r\n\tport = serial.Serial(port.device, baudrate, timeout = 0)\r\n\t\r\n\t\r\n\t# Multithread the user and hardware monitoring\r\n\timport threading, copy, collections\r\n\r\n\tclass WatcherThread(threading.Thread):\r\n\r\n\t\tdef __init__(self, stream, name='keyboard-input-thread'):\r\n\t\t\tself.stream = stream\r\n\t\t\tself.buffer = fifo(maxlen = 8) # Unlikely to ever need > 1\r\n\t\t\tsuper(WatcherThread, self).__init__(name=name, daemon=True)\r\n\t\t\tself.stop_flag = False\r\n\t\t\tself.start()\r\n\r\n\t\tdef run(self):\r\n\t\t\twhile True:\r\n\t\t\t\tr = self.stream.readline()\r\n\t\t\t\tif r:\r\n\t\t\t\t\tif type(r) is bytes:\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tself.buffer.appendleft(r.decode('ascii'))\r\n\t\t\t\t\t\texcept UnicodeDecodeError:\r\n\t\t\t\t\t\t\timport binascii\r\n\t\t\t\t\t\t\tself.buffer.appendleft(str(binascii.hexlify(r)))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.buffer.appendleft(r)\r\n\t\t\t\tif self.stop_flag:\r\n\t\t\t\t\tbreak\r\n\t\t\r\n\t\tdef has_data(self):\r\n\t\t\treturn (len(self.buffer) > 0)\r\n\t\t\r\n\t\tdef pop(self):\r\n\t\t\treturn self.buffer.pop()\r\n\t\t\r\n\t\tdef stop(self):\r\n\t\t\tself.stop_flag = True\r\n\r\n\t# Start threads\r\n\tuser_watcher = WatcherThread(sys.stdin)\r\n\thardware_watcher = WatcherThread(port)\r\n\t\r\n\tprint (\"\\nEntering interactive mode. Use Ctrl+C/stop/quit/exit to finish.\\n\")\r\n\tprint (\"- \"*14+'\\n')\r\n\tsys.stdout.write(out_text + \" > \" + normal_text)\r\n\tsys.stdout.flush()\r\n\tcmd = \"\"\r\n\tresp = \"\"\r\n\ttry:\r\n\t\twhile True:\r\n\t\t\t\r\n\t\t\t# Handle commands from user\r\n\t\t\tif user_watcher.has_data():\r\n\t\t\t\tcmd = user_watcher.pop().strip()\r\n\t\t\t\t\r\n\t\t\t\t# Safe words\r\n\t\t\t\tif cmd in ['quit', 'exit', 'stop']:\r\n\t\t\t\t\tbreak\r\n\t\t\t\t\r\n\t\t\t\tcmd = cmd + '\\n'\r\n\t\t\t\tport.write(cmd.encode('ascii'))\r\n\t\t\t\t\r\n\t\t\t\t\r\n# \t\t\t\tsys.stdout.write(\"\\r\"+\" \"*40+\"\\r\")\r\n# \t\t\t\tsys.stdout.write('> ' + cmd.strip() + \"\\r\\n\")\r\n\t\t\t\tsys.stdout.write(out_text + \" > \" + normal_text)\r\n\t\t\t\tsys.stdout.flush()\r\n\t\t\t\r\n\t\t\t# Handle response from hardware\r\n\t\t\tif hardware_watcher.has_data():\r\n\t\t\t\tresp = hardware_watcher.pop()\r\n\t\t\t\t\r\n\t\t\t\tresp = resp.strip()\r\n\t\t\t\tsys.stdout.write(\"\\r\"+\" \"*40+\"\\r\")\r\n\t\t\t\tsys.stdout.write(in_text + \" < \" + normal_text + resp + \"\\r\\n\")\r\n\t\t\t\tsys.stdout.write(out_text + \" > \" + normal_text)\r\n\t\t\t\tsys.stdout.flush()\r\n\t\r\n\texcept KeyboardInterrupt:\r\n\t\tprint(\"\\n\")\r\n\t\r\n\t# Kill our threaded friends\r\n\ttry:\r\n\t\tuser_watcher._stop()\r\n\texcept:\r\n\t\tpass\r\n\ttry:\r\n\t\thardware_watcher._stop()\r\n\texcept:\r\n\t\tpass\r\n\t\r\n\tprint (\"- \"*14+'\\n')\r\n\t\r\n\tprint (\"Interactive shell closed.\")", "def run(self):\n self.read_from_serial()", "def load(device, baud, program):\n serialport = serial.Serial(device, baud, timeout=0)\n c = fdpexpect.fdspawn(serialport.fd)\n #c.logfile_read = sys.stdout\n # synch with the command prompt\n c.sendline('')\n waitprompt(c)\n # do stuff\n if(program.find('\\n')>=0):\n #program is many lines\n text = program.split('\\n')\n for line in text:\n line = line.strip()\n print(line)\n if (len(line) > 0) and (line[0] != '#'):\n c.sendline(line)\n waitprompt(c)\n print(\"waiting for prompt after twinkle\")\n else:\n #program is one line\n if (len(program) > 0) and (program[0] != '#'):\n c.sendline(program)\n waitprompt(c)\n c.close()\n print(\"Done\")\n return None", "def dump_data(ser, meta, args):\n ser.reset_input_buffer()\n ser.reset_output_buffer()\n\n command = b\"TEXT.DUMP\\r\"\n rx = \"\"\n ntry = 0\n while not rx or (rx.split()[-1] != \"data?\"):\n rx = send_cmd(ser, command, args.debug)\n # sys.stderr.write(rx)\n ntry += 1\n if ntry > 3:\n LOGGER.warning(\"Wrong response to dump command ({})\".format(command))\n return 0\n\n command = b\"Y\"\n rx = \"\"\n ntry = 0\n while not rx or (rx.split()[-1] != \"ready\"):\n rx = send_cmd(ser, command, args.debug)\n # sys.stderr.write(rx)\n ntry += 1\n if ntry > 3:\n LOGGER.warning(\"Wrong response to dump command ({})\".format(command))\n return 0\n\n b = b\"\\r\"\n n = ser.write(b)\n if args.debug:\n LOGGER.debug(\"{} byte ({}) written to port\\n\".format(n, repr(b)))\n time.sleep(0.05)\n\n dumpst = time.time()\n suff = \"\"\n if meta.badclock:\n suff = \"-badclock\"\n\n fname = \"{}/{}sb.{}{}\".format(args.path, meta.modserial, args.calday, suff)\n fh = open(fname, \"w\")\n\n fraw = \"\"\n rxline = 1\n try:\n while rxline:\n rxline = ser.readline()\n if rxline:\n sys.stdout.write(rxline.decode(errors=\"replace\"))\n fout = crlfpat.sub(linend, rxline.decode(errors=\"replace\"))\n if cafepat.search(fout):\n meta.cafe = True\n fh.write(fout)\n except KeyboardInterrupt:\n interrupt = b\"\\x03\"\n send_cmd(ser, interrupt, args.debug)\n # time.sleep(1.0)\n # rxline = 1\n # while rxline:\n # rxline = ser.readline()\n # if rxline:\n # sys.stdout.write(rxline.decode(errors='replace'))\n # fout = crlfpat.sub(linend, rxline.decode(errors='replace'))\n # fh.write(fout)\n ser.reset_input_buffer()\n fh.close()\n fsize = os.stat(fname).st_size\n frename = fname + \"-abort\"\n os.rename(fname, frename)\n sys.stderr.write(\"\\n\\n\")\n LOGGER.warning(\"Download aborted: wrote {} bytes to {}\".format(fsize, frename))\n return 0\n\n fh.close()\n\n if meta.cafe:\n frename = fname + \"-cafe\"\n os.rename(fname, frename)\n fname = frename\n\n dumpend = time.time()\n etsec = dumpend - dumpst\n dtet = datetime(1900, 1, 1, 0, 0, 0) + timedelta(seconds=etsec)\n\n fsize = os.stat(fname).st_size\n sys.stderr.write(\"\\n\\n\")\n if meta.badclock or meta.cafe:\n LOGGER.warning(\"Wrote {} bytes to {}\".format(fsize, fname))\n else:\n LOGGER.info(\"Wrote {} bytes to {}\".format(fsize, fname))\n LOGGER.info(\n \"Dumped {} records in {} (hh:mm:ss)\".format(meta.ndumprec, dtet.strftime(etfmt))\n )\n\n FLOGGER.info(\"Wrote {} bytes to {}\".format(fsize, fname))\n FLOGGER.info(\n \"Dumped {} records in {} (hh:mm:ss)\".format(meta.ndumprec, dtet.strftime(etfmt))\n )\n\n return fsize", "def startComProcess(self):\n # if statement prevents attempting to open serial if already in use\n # And prevents creation of multiple log files\n if self.ser is None or self.ser.closed:\n self.ser = serial.Serial(self.COMPort.get(), 115200)\n self.logFile = open(self.fileLocation.get(), 'w')\n root.after(1, self.getData)", "def read_and_print(serial):\r\n resp = read_buffer(serial)\r\n if resp != \"\":\r\n print(resp)", "def raw(serial):\n p = solo.client.find(serial)\n while True:\n r = p.get_rng(255)\n sys.stdout.buffer.write(r)", "def raw(serial):\n p = solo.client.find(serial)\n while True:\n r = p.get_rng(255)\n sys.stdout.buffer.write(r)", "def console():\r\n while True:\r\n interpret_command(input(\"POM> \"))", "def test_echo_console():\n port = os.ctermid() # default to console\n\n try: # check to see if running in external console\n fd = os.open(port, os.O_NONBLOCK | os.O_RDWR | os.O_NOCTTY)\n except OSError as ex:\n # maybe complain here\n return # not in external console\n else:\n os.close(fd) # cleanup\n\n\n tock = 0.03125\n ticks = 16\n limit = 0.0\n # limit = ticks * tock\n doist = doing.Doist(tock=tock, real=True, limit=limit)\n assert doist.tyme == 0.0 # on next cycle\n assert doist.tock == tock == 0.03125\n assert doist.real == True\n assert doist.limit == 0.0\n # assert doist.limit == limit == 0.5\n assert doist.doers == []\n\n console = serialing.Console()\n echoer = doing.EchoConsoleDoer(console=console)\n\n doers = [echoer]\n doist.do(doers=doers)\n # assert doist.tyme == limit\n assert console.opened == False\n\n\n \"\"\"End Test \"\"\"", "def writer(self):\n menu_active = False\n try:\n while self.alive:\n try:\n char = self.console.getkey()\n except KeyboardInterrupt:\n char = '\\x03'\n\n if menu_active:\n # Menu character again/exit char -> send itself\n if char in self.config.menu_char:\n self.serial.write(char) # send character\n elif char in self.config.exit_char:\n self.stop()\n break # exit app\n elif char in 'hH?': # h, H, ? -> Show help\n sys.stderr.write(self.get_help_text())\n elif char in self.config.photo_char:\n ENV.send_image_f = \"Asked by console\"\n else:\n sys.stderr.write('--- unknown menu character %s ---\\n' %\n char)\n menu_active = False\n elif char in self.config.menu_char: # next char will be for menu\n menu_active = True\n elif char == '\\n' or ord(char) == 10:\n sys.stderr.write('\\n')\n else:\n self.serial.write(char) # send character\n except:\n self.alive = False\n raise", "def genout(self):\n ch = self.buffer_output()\n while ch:\n print(ch, end='')\n ch = self.buffer_output()", "def readlines():\n while 1:\n line = nb_server.stdout.readline().decode(\"utf-8\").strip()\n if line:\n print(line)", "def connect(self, mach) -> channel.Channel:\n self.console_uart = self.servo_get_tty()\n return mach.open_channel(\"picocom\", \"-q\", \"-b\", \"115200\",\n self.console_uart)", "def Mode_Console(self):\r\n if self.Connected == False:\r\n raise IOError(\"The serial port is disconnected\")\r\n self.Reset_Input()\r\n self.Reset_Output()\r\n self.Write_Byte(0x00)\r\n self.Write_Byte(0x0F)\r\n self.Mode = BPMode.Console", "def serial(self):", "def dumb_tty(port):\n\tUART.setup(\"UART1\")\n\thlsd = pyhypnolsd.command_mode.from_port(port)\n\n\tget_command = True\n\twhile (get_command):\n\t\t# Get input\n\t\tcommand = raw_input(\"> \")\n\n\t\t# Check for exit\n\t\tif (command == \"exit\"):\n\t\t\tget_command = False\n\t\t\tcontinue\n\n\t\t# Send command, let it print output\n\t\thlsd.send_command(command, True)\n\n\t# Close remaining connections\n\thlsd.close()", "def read_stdout(self, dt):\n\n self.temp_stdout += self.temp_output\n self.ids[\"txt_code_output\"].text = self.temp_output", "def serial(self, may_block=True):\n pass", "def send_cmd(ser, command, debug=0):\n ser.reset_input_buffer()\n ser.reset_output_buffer()\n\n for b in serial.iterbytes(command):\n n = ser.write(b)\n if debug:\n LOGGER.debug(\"{} byte ({}) written to port\".format(n, b))\n time.sleep(0.1)\n\n out = \"\"\n rx = 1\n while rx:\n rx = ser.read(ser.in_waiting or 1)\n if rx:\n out += rx.decode(errors=\"replace\")\n if debug:\n LOGGER.debug(rx)\n\n return out", "def main(self) :\n\n # Close the serial port in case the previous run didn't closed it properly\n self.serialPortDWM1001_1.close()\n self.serialPortDWM1001_2.close()\n \n # Sleep for one sec\n time.sleep(1)\n \n # Open serial port\n self.serialPortDWM1001_1.open()\n self.serialPortDWM1001_2.open()\n\n # Check if the serial port is opened\n if(self.serialPortDWM1001_1.isOpen()):\n rospy.loginfo(\"Port `O` opened: {}\".format(self.serialPortDWM1001_1.name) )\n else:\n rospy.logerr(\"Can't open port `O`: {}\".format(self.serialPortDWM1001_1.name))\n \n if(self.serialPortDWM1001_2.isOpen()):\n rospy.loginfo(\"Port `P` opened: {}\".format(self.serialPortDWM1001_2.name) )\n else:\n rospy.logerr(\"Can't open port `P`: {}\".format(self.serialPortDWM1001_2.name))\n\n try: \n tags = ['A', 'B', 'C', 'D']\n # Counting the msgs received and processed to check the system status\n msg_portO_cnt = [0,0,0,0]\n msg_portP_cnt = [0,0,0,0]\n while not rospy.is_shutdown():\n self.serialPortDWM1001_1.write(\"S\".encode())\n # just read everything from serial port \n for _ in range(4): \n serialReadLine = self.serialPortDWM1001_1.read_until()\n # print(serialReadLine)\n if not serialReadLine:\n rospy.logwarn(\"Port O: Read a empty line from Serial Port\")\n continue\n self.publishTagPositions('O',serialReadLine)\n msg_portO_cnt[tags.index(serialReadLine[9])] += 1\n self.rate.sleep()\n\n self.serialPortDWM1001_2.write(\"S\".encode()) \n for _ in range(4): \n serialReadLine = self.serialPortDWM1001_2.read_until()\n #print(serialReadLine)\n if not serialReadLine:\n rospy.logwarn(\"Port P: Read a empty line from Serial Port\")\n continue\n self.publishTagPositions('P',serialReadLine) \n msg_portP_cnt[tags.index(serialReadLine[9])] += 1 \n self.rate.sleep()\n\n if sum(msg_portO_cnt) > 100 and self.verbose :\n rospy.loginfo(\"All OK. Done O-> {}, P-> {} times !\".format(msg_portO_cnt, msg_portP_cnt))\n msg_portO_cnt = [0,0,0,0]\n msg_portP_cnt = [0,0,0,0]\n\n finally:\n rospy.loginfo(\"Quitting, and sending reset command to dev board\")", "def open_serial(self):\n self.port = serial.Serial(\n self.device,\n baudrate=SERIAL_BAUD,\n timeout=5.0,\n bytesize=serial.EIGHTBITS,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n xonxoff=False,\n rtscts=False,\n dsrdtr=False)\n\t\n self.port.flushInput()\n self.port.flushOutput()", "def cmd_execute_io(*a):\n for cmd in a:\n cmd = cmd + \" > tmp.out\"\n os.system(cmd)\n with open(\"tmp.out\") as f:\n # print(\"\".join(x for x in f.readlines()))\n a = \"\".join(x for x in f.readlines())\n b = \"\".join(\"\\\\x\" + a[x : x + 2] for x in range(0, len(a), 2))\n print('buffer += b\"' + b + '\"')\n os.system(\"rm -f tmp.out\")", "def postloop() -> None:\n print(\"\\n\")", "def app_principal_led():\n \"\"\"\n import serial\n ser = serial.Serial(0) # open first serial port\n print ser.portstr # check which port was really used\n ser.write(\"hello\") # write a string\n ser.close() # close port \n \"\"\"\n\n\n start = mpa.ModuloPyArduino()\n p, v = start.config_arduino()\n con = start.set_conection(p, v)\n\n\n print \"\\n Status of conection: \", con\n if con != 0:\n start.serial_loop_app(con, 1)\n else:\n pass\n\n con.close()", "def spin(self):\n\n for x in self.spinchars:\n self.string = self.msg + \">> \" + x + \"\\r\"\n self.out.write(self.string.encode('utf-8'))\n self.out.flush()\n time.sleep(self.waittime)", "def send_serial_command(data):\n print(data)\n serial_command = data\n SERIAL_PARENT.send(serial_command)\n OUTGOING.append(serial_command)", "def Run(port):\n\tport.write(\"R\");", "def continuous_shell_reader(self):\n\n while not self.thread_stop.is_set():\n out = self.shell_reader()\n\n if not out == \"\":\n print(\"IPC: Received: {}\".format(out))", "def daemon_code():\n import serial_driver as sd\n drv = sd.SerialDriver(force=True)\n drv.run() # no return", "def startSerial(self):\n self.ser.setPort(self.SERIAL_PORT)\n self.ser.open()", "def data_available(self):\n\n self.run = True\n self.serial.reset_input_buffer()\n while self.run:\n if self.serial.in_waiting:\n data: str = self.serial.readline().decode(\"utf-8\")\n data = data.replace(\">>>\", \"\").lstrip()\n\n if len(data) > 0:\n self.output_text.config(state=NORMAL)\n self.output_text.insert(END, data)\n self.output_text.see(END)\n self.output_text.config(state=DISABLED)\n else:\n time.sleep(0.1)", "def encode(port,baudrate,n_pir,template,winsize,destructive):\n template_filename=template+\"%02d\"\n if destructive:\n for n in range(n_pir):\n with open(template_filename%(n+1),'wb') as f:\n f.write()\n try:\n t1=time.time()\n click.echo(\"[ ] Serial port\")\n ser=serial.Serial(port,baudrate)\n tt1=time.localtime()[:6]\n click.echo(\"Start time: %04d-%02d-%02d %02d:%02d:%02d.\"%tt1)\n click.echo(\"\\r[*] Serial port\")\n click.echo(\"[*] Reading stream\")\n # Wait 1.5 second for garbage to get out serial\n while time.time()-t1<1.5:\n ser.readline()\n except serial.SerialException:\n click.echo(\"\\r[-] Serial connection error.\")\n return\n\n # Write to multiple files (one per pir)\n while True:\n current_time=datetime.now()\n n_reads=0\n summing_array=numpy.zeros(n_pir)\n end_loop=current_time+timedelta(seconds=winsize)\n while current_time<end_loop:\n try:\n in_serial=ser.readline()\n if in_serial==b'':\n continue\n cleaned_serial=[int(x,2) for x in in_serial.strip().split(b'\\t')]\n if len(cleaned_serial)!=n_pir:\n continue\n click.echo(cleaned_serial)\n for n in range(n_pir):\n summing_array[n]+=cleaned_serial[n]\n n_reads+=1\n current_time=datetime.now()\n except (KeyboardInterrupt,SystemExit):\n t_end=time.localtime(time.time())[:6]\n click.echo(\"\\n[C] Exiting\")\n click.echo(\"[-] Serial connection ended at %04d-%02d-%02d %02d-%02d-%02d\"%t_end)\n return\n except ValueError:\n continue\n\n # Write values to file\n bin_start=int(time.mktime(current_time.timetuple()))\n for n in range(n_pir):\n with open(template_filename%(n+1),'ab') as f:\n float_avg=summing_array[n]/n_reads\n out_string=struct.pack('=If',bin_start,float_avg)\n f.write(out_string)\n\n\n\n # For Epoch time, the minimum bit length to represent the seconds is 31bits --> brings us to 2038\n # Use 32 bits == 4 bytes for time representation as bytes is the smallest size to write in using Py", "def main():\n print(\"Scanning for Jiobits. Please wait...\")\n connection = ci_bluetooth.BTConnectedDevice()\n potential_connections = connection.scan_for_bt_devices(ci_bluetooth.BTDeviceTypes.ANY)\n selected_index = raw_input(\"select a device index from the list above > \")\n selected_index = int(selected_index.strip())\n selected_device = potential_connections[selected_index-1]\n if(selected_device is not None):\n connection.connect(selected_device.addr)\n print(\"Connection complete\")\n command = \"\"\n while 1:\n signal.signal(signal.SIGINT, catch_ctrl_C)\n if isData():\n while isData():\n if (command.find('loop') != -1):\n a,b,c = command.split(' ')\n for n in int(b):\n c += sys.stdin.readline()\n else:\n command += sys.stdin.readline()\n if '\\n' in command:\n command = command.strip()\n connection.send_console_cmd(command)\n command = \"\"\n connection.peripheral.waitForNotifications(1)\n data_str = connection.delegate.print_bare_clear_console()", "def _writeloop(self):\r\n while self._ll_alive:\r\n ## Add a thread lock\r\n if not self._uart_tx_queue.empty():\r\n data = self._uart_tx_queue.get()\r\n #clear the response list before send the command\r\n #self._uart_rx_queue.clear()\r\n #self.log.debug(\"Uart send cmd:\",data)\r\n #time.sleep(0.01)\r", "def init_com(self):\r\n self.__ser = serial.Serial(\r\n self.__dev_no, self.__baudrate, timeout=self.__timeout)\r\n\r\n # Stop the Continious Stream, avoid error\r\n self.__ser.write(self.__api.esc_cmd())\r\n self.__ser.write(self.__api.devid_cmd())\r\n tmp = self.__ser.readline().decode()\r\n\r\n # Get Dev ID\r\n if \"ID= \" in tmp:\r\n self.__api.devid = tmp.split(\"ID= \")[1].replace(\"\\r\", \"\")\r\n rospy.loginfo(self.__api.devid)\r\n\r\n init_cmds = [self.__api.factory_settings_cmd, self.__api.format_cmd(self.__format),\r\n self.__api.sample_rate_cmd(100), self.__api.continuous_stream_cmd]\r\n\r\n for cmd in init_cmds:\r\n self.__ser.write(self.__api.write_enable_cmd)\r\n rospy.loginfo(self.__ser.readline().decode())\r\n time.sleep(self.init_sleep)\r\n rospy.loginfo(cmd)\r\n self.__ser.write(cmd)\r\n if cmd != self.__api.continuous_stream_cmd:\r\n rospy.loginfo(self.__ser.readline().decode())\r\n time.sleep(self.init_sleep)\r\n return True\r\n return False", "def spin(self):\n\n for x in self.spinchars:\n self.string = self.msg + x + \"\\r\"\n self.out.write(self.string)\n self.out.flush()\n time.sleep(self.waittime)", "def console(self):\n fricas_console()", "def _connect(self):\n while self._com is None:\n try:\n self._com = serial.Serial()\n self._com.baudrate = self.BAUDRATE\n self._com.port = self.port\n self._com.setDTR(False)\n self._com.timeout = self.TIMEOUT\n self._com.open()\n # Wait for arduino to reset\n time.sleep(2)\n except serial.SerialException as e:\n print \"Unable to connect to Arduinio, retrying...\"\n self._com = None\n time.sleep(5)", "def __init__(self):\n self.ser = serial.Serial('/dev/ttyUSB3',9600)\n collect_readings = False\n self.colours = []\n self.max_readings = 50 #maximum number of readings to use", "def write_command(serial, comm, verbose = False, dt = None):\r\n if verbose and comm != \"\":\r\n if dt is None:\r\n print(\"{} \\t\\t-> {}\".format(comm, serial.port))\r\n else:\r\n print(\"{} \\t\\t-> {} at {:2.3f} ms\".format(comm, serial.port, dt))\r\n serial.write(comm.encode())", "def __write_command(serial_port, command):\n line_ending = \"\\r\\n\"\n ctrl_c_cmd = \"\\x03\" + line_ending\n\n # Clear any existing text by sending a CTRL-C\n # command and waiting for a prompt\n serial_port.write(ctrl_c_cmd.encode(\"utf-8\"))\n Cambrionix.__get_response(serial_port)\n\n if not command.endswith(line_ending):\n command += line_ending\n\n for char in command:\n serial_port.write(char.encode(\"utf-8\"))\n if command.startswith(\"reboot\") and char == \"\\r\":\n break\n\n while True:\n ready = select.select([serial_port], [], [], 25)[0]\n if ready:\n if serial_port.read(1).decode(\"utf-8\") == char:\n break\n else:\n raise errors.DeviceError(\"Device cambrionix write command failed. \"\n \"Read timeout on serial port: {} \"\n \"while writing command: {}\".format(\n serial_port, command))", "def test_console_driver(duthost):\n out = duthost.shell('ls /dev/ttyUSB*', module_ignore_errors=True)['stdout']\n ttys = set(out.split())\n pytest_assert(len(ttys) > 0, \"No virtual tty devices been created by console driver\")\n\n out = list(duthost.console_facts()[\"ansible_facts\"][\"console_facts\"][\"lines\"].keys())\n for i in range(0, len(out)):\n expected_virtual_tty = \"/dev/ttyUSB{}\".format(i)\n pytest_assert(\n expected_virtual_tty in ttys,\n \"Expected virtual tty device [{}] not found.\".format(expected_virtual_tty))", "def __init__(self, port, debugLogFile):\n \n self.port = port\n self.dbF = debugLogFile\n debugRow = ''\n buf = []\n\n # Open port, nominal baudrate = 19200, TC required 9600 though\n self.ser = serial.Serial(self.port, baudrate=9600, timeout=1)\n time.sleep(1) # Give serial port time to set up\n \n # Clearing serial buffer\n while self.ser.inWaiting() > 0:\n ch = self.ser.read(1) #Read 1 BYTE\n buf.append(ch)\n debugRow = ''.join(buf) \n print debugRow\n self.dbF.writerow([debugRow])", "def scan_serial(self,event):\n if str(platform.system()) == 'Windows':\n tempList = ['COM%s' % (i + 1) for i in range(256)]\n elif str(platform.system()) == 'Linux':\n tempList = glob.glob('/dev/tty[A-Za-z]*')\n elif str(platform.system()) == 'Darwin':\n tempList = glob.glob('/dev/tty.*')\n else:\n raise EnvironmentError(\"Unsupported platform\")\n\n results = []\n for a_port in tempList:\n try:\n s = serial.Serial(a_port)\n s.close()\n results.append(a_port)\n except serial.SerialException:\n pass\n return results", "def write_display(self):\n for i, value in enumerate(self.buffer):\n self.bus.write_byte_data(self.address, i, value)", "async def copier_recorder(\r\n self,\r\n ) -> None:\r\n if not self.process:\r\n raise Exception(\"missing process; was this called inside a with statement?\")\r\n\r\n assert (\r\n self.process.stdout is not None\r\n ), \"process must be opened with stdout=PIPE and stderr=STDOUT\"\r\n\r\n async with self.process.stdout, self.printer_send_channel, self.notifier_send_channel:\r\n async for chunk in self.process.stdout:\r\n # print(f\"seen chunk: '{chunk!r}'\", flush=True) # debug\r\n self.stdout += chunk\r\n await self.printer_send_channel.send(chunk)\r\n\r\n # send notification\r\n # if it's full, that's fine: if expect() is run, it'll see\r\n # there's a \"pending\" notification and check stdout, then wait\r\n # for another notification\r\n try:\r\n self.notifier_send_channel.send_nowait(b\"\")\r\n except trio.WouldBlock:\r\n pass\r\n except trio.BrokenResourceError as err:\r\n print(f\"cause '{err.__cause__}'\")\r\n raise err", "def _sendingCommand(self): \n\n while True:\n self.tello.send_command('command') \n time.sleep(5)", "def start_console(self):\n return", "def enter_repl(self):\n text_input = ''\n while True:\n text_input = input('>>')\n if text_input == 'exit':\n break\n #An alias for querying an instrument error string\n elif text_input == 'err?':\n self.write_to_serial(':SYST:ERR?')\n print(self.read_from_serial())\n else:\n self.write_to_serial(text_input)\n print(self.read_from_serial())", "def _reader(self):\n while self._alive:\n self._paused = False\n if self._interface:\n self._interface.serial_resumed()\n time.sleep(4)\n self._paused = True\n if self._interface:\n self._interface.serial_paused()\n time.sleep(2)", "def shortcut(self):\n if not options.quiet: sys.stderr.write(\">> shortcut enter\\n\")\n self.alive = True\n self.thread_read = threading.Thread(target=self.reader)\n self.thread_read.setDaemon(True)\n self.thread_read.setName('serial->socket')\n self.thread_read.start()\n self.socket.settimeout(1)\n self.writer()\n if not options.quiet: sys.stderr.write(\">> shortcut exit\\n\")", "def write( shell, data ):\n #print 'cmd: ' + data\n global waiting\n os.write( shell.stdin.fileno(), data )\n waiting = True", "def workerThread1(self):\n while self.running:\n sleep(READ_DELAY)\n\n self.gui.do_terminal() \n\n #self.queue.put(self.gui.readSerialByte) # this didn't\n #self.gui.readSerialByte() # this works", "def sn(self):\n\t\tstring = []\n\t\tresp = [0x00]\n\t\tself.spi.transfer([0x10], [0x00], 1)\n\t\ttime.sleep(9e-3)\n\t\tfor i in range(60):\n\t\t\tself.spi.transfer([0x00], resp, 1)\n\t\t\tstring.append(chr(resp[0]))\n\t\ttime.sleep(0.1)\n\t\treturn ''.join(string).strip()", "def start(self):\n #TODO add thread checking, should only be 1 thread per serial interface\n self.connected = True\n t1 = Thread(target=self._read_cardiochip) \n t1.daemon = True\n t1.start()\n print \"Started CardioChip reader\"", "async def console_writer(payload: ConsumerPayload):\n print(f\"console writer: {payload}\")", "def send_command(self, command):\n self.enable_serial_port(self.port)\n time.sleep(.2)\n self.serial_com.write(command.encode() + b'\\r\\n')\n time.sleep(.2)", "def run(self):\n for i in range(self.nreps):\n print self.ch,", "def run(outs, ins_filter='/dev/ttyUSB.*', newport=lambda conn: None, write_queue=None):\r\n data_queue = multiprocessing.Queue()\r\n\r\n multiprocessing.Process(\r\n target=writer,\r\n args=(data_queue, write_queue, outs)\r\n ).start()\r\n\r\n readers = {}\r\n\r\n while True:\r\n\r\n for (path, _, _) in serial.tools.list_ports.grep(ins_filter):\r\n\r\n if path not in readers.keys() or not readers[path].is_alive():\r\n\r\n readers[path] = multiprocessing.Process(\r\n target=reader, args=(data_queue, path, newport)\r\n )\r\n readers[path].start()", "def initialize_mainframe(self, c):\n dev = self.selectedDevice(c)\n yield dev.write_line(\"*RST\\n\")\n self.wait_for_completion(c)\n yield dev.write_line(\"VERB 127\\n\")\n self.wait_for_completion(c)\n yield dev.write_line(\"CEOI ON\\n\")\n self.wait_for_completion(c)\n yield dev.write_line(\"EOIX ON\\n\")\n self.wait_for_completion(c)\n yield dev.write_line(\"TERM D,LF\\n\")\n self.wait_for_completion(c)\n \n for ii in range(0, 12):\n yield dev.write_line(\"BAUD \"+str(ii)+\",9600\\n\")\n self.wait_for_completion(c)\n \n yield dev.write_line(\"FLSH\\n\")\n self.wait_for_completion(c)\n yield dev.write_line(\"SRST\\n\")\n self.wait_for_completion(c)\n returnValue(True)", "def turn_output_on(self):\n self.instr.write('RF1')\n time.sleep(self.sleep_time)", "def run(self):\n self._go_on = True\n if self._serport is not None:\n if self._serport.isOpen():\n # Flush buffers\n self._serport.flushInput()\n self._serport.flushOutput()\n serbuf = []\n # Listen for incoming serial data\n while self._go_on:\n try:\n # Read single byte (non blocking function)\n ch = self._serport.read()\n if len(ch) > 0: \n # End of serial packet?\n if ch == '\\r' or ((ch == '(') and (len(serbuf) > 0)):\n strBuf = \"\".join(serbuf)\n serbuf = []\n \n # Enable for debug only\n if self._verbose == True:\n print \"Rved: \" + strBuf\n \n # Notify reception\n if self.serial_received is not None:\n try:\n self.serial_received(strBuf)\n except StationException as ex:\n ex.display()\n elif ch != '\\n':\n # Append char at the end of the buffer (list)\n serbuf.append(ch)\n else:\n time.sleep(0.01)\n except serial.SerialException:\n raise StationException(\"Serial port \" + self.portname + \" not available\")\n except OSError:\n raise StationException(str(sys.exc_type) + \": \" + str(sys.exc_info()))\n \n # Anything to be sent? \n #self._send_lock.acquire()\n if not self._strtosend.empty():\n if time.time() - self.last_transmission_time > SerialPort.txdelay:\n strpacket = self._strtosend.get() \n # Send serial packet\n self._serport.write(strpacket) \n # Update time stamp\n self.last_transmission_time = time.time() \n # Enable for debug only\n if self._verbose == True:\n print \"Sent: \" + strpacket\n #self._send_lock.release()\n else:\n raise StationException(\"Unable to read serial port \" + self.portname + \" since it is not open\")\n else:\n raise StationException(\"Unable to read serial port \" + self.portname + \" since it is not open\")\n print \"Closing serial port...\"", "def send_to_port(self):\r\n time.sleep(2)\r\n # ser.write(\"R\".encode())\r\n ser.flush()\r\n ser.write(\"{},{},{},{},{}\".format(self.x_Pos, self.y_Pos, self.t_Tap, self.U_on, self.u_off).encode())\r\n # ser.flush()\r\n # while (1 == 1):\r\n # mydata = ser.readline().lstrip()\r\n # print(mydata.decode('utf-8'))\r\n # value = str(mydata)\r", "async def read_console(self):\n while self.proc is not None and self.proc.poll() is None:\n line = await self.loop.run_in_executor(None, self.proc.stdout.readline) # Async readline\n # Parse the command output and get the time in epoch format\n match = re.match(r'\\[([0-9]{2}):([0-9]{2}):([0-9]{2})\\] \\[([^][]*)\\]: (.*)$', line.decode())\n if match is None:\n return\n h, m, s, log, text = match.groups()\n local = time.localtime()\n if h == 23 and local.tm_hour == 0: # In case a line from 23:59 gets parsed at 00:00\n local = time.localtime(time.time()-3600)\n log_t = list(local)\n log_t[3:6] = map(int, (h, m, s))\n log_time = time.mktime(tuple(log_t))\n self.loop.create_task(self.on_line(log_time, log, text))", "def run(self):\n self.startSerial()\n # Wait about five seconds before doing anything\n time.sleep(5)\n while True:\n # Check setpoints against all controllers\n self.check_setpoints()\n\n # Issue any new commands as necessary\n self.check_pins()\n\n # Receive the latest Arduino data and process into dictionary\n self.read_arduino_data_and_format_dictionary()\n\n # Clean all of the arduino stuff to avoid incorrect inputs\n with self.lock:\n self.ser.reset_output_buffer()\n with self.lock:\n self.ser.reset_input_buffer()", "def startup_info(serial_port):\n top()\n programflow(serial_port)", "def __init__(self, port):\r\n self.ser = serial.Serial(port=port,\r\n baudrate=9600,\r\n # bytesize=serial.EIGHTBITS,\r\n # parity=serial.PARITY_EVEN,\r\n # stopbits=serial.STOPBITS_ONE,\r\n timeout=0.05)\r\n self.ser_io = io.TextIOWrapper(io.BufferedRWPair(self.ser, self.ser),\r\n newline='\\r',\r\n line_buffering=True)", "def writer(self):\n while self.alive:\n try:\n if controlEvent.isSet() == False:\n self.alive = False\n self.thread_read.join()\n break\n data = self.socket.recv(1024)\n if not data:\n break\n #if self.ser_newline and self.net_newline:\n # do the newline conversion\n # XXX fails for CR+LF in input when it is cut in half at the begin or end of the string\n #data = ser_newline.join(data.split(net_newline))\n # Only send data to serial if it is in active state\n if controlEvent.isSet() == True:\n self.serial.write(data) # get a bunch of bytes and send them\n # the spy shows what's on the serial port, so log it after converting newlines\n if self.spy:\n sys.stdout.write(codecs.escape_encode(data)[0])\n sys.stdout.flush()\n except socket.timeout:\n continue\n except socket.error, msg:\n sys.stderr.write('writer socket.error: %s\\n' % msg)\n # probably got disconnected\n break\n except IOError, msg:\n sys.stderr.write('writer IOError: %s\\n' % msg)\n except Exception, msg:\n sys.stderr.write('writer Other Exception: %s\\n' % msg)\n #self.alive = False", "def _stdout_to_flag(self):\n self._is_running.wait()\n while self._is_running.is_set():\n msg = self.stdout_queue.get()\n if msg is None or len(msg) < 1: # It's time to stop\n break\n if msg[0] == \"#\": # It's a signal from the kxkmcard program\n self.onEvent(msg[1:].split(' '))\n else:\n self._log(\"warning\", \"unknown stdout line {0}\".format(msg))", "def copy_file_to_stdout(file_):\n while True:\n block = file_.read(const.BUFFER_SIZE)\n if not block:\n break\n const.STDOUT.write(block)", "def read_from_serial(self):\n output = b''\n time.sleep(self._sleep_time)\n while self._ser.inWaiting() > 0:\n output = output + self._ser.read(1)\n #A default ten powercycle delay means that some measurements may still be processing\n #by the time the read function is called. This slows down the read but ensures that\n #it will finish (per my testing). There is probably a better way to do this. TODO\n time.sleep(0.06)\n return output.decode('utf-8').strip()", "def reader(self):\n while self.alive:\n try:\n if controlEvent.isSet() == False:\n break\n data = self.serial.read(1) # read one, blocking\n n = self.serial.inWaiting() # look if there is more\n if n:\n data = data + self.serial.read(n) # and get as much as possible\n if data:\n # the spy shows what's on the serial port, so log it before converting newlines\n if self.spy:\n sys.stdout.write(codecs.escape_encode(data)[0])\n sys.stdout.flush()\n #if self.ser_newline and self.net_newline:\n # do the newline conversion\n # XXX fails for CR+LF in input when it is cut in half at the begin or end of the string\n #data = net_newline.join(data.split(ser_newline))\n # escape outgoing data when needed (Telnet IAC (0xff) character)\n self._write_lock.acquire()\n try:\n # Only send data to socket if it is in active state\n if controlEvent.isSet() == True:\n self.socket.sendall(data) # send it over TCP\n except Exception, msg:\n sys.stderr.write('reader Socket ERROR IOError: %s\\n' % msg)\n finally:\n self._write_lock.release()\n except IOError, msg:\n sys.stderr.write('reader ERROR IOError: %s\\n' % msg)\n break\n except socket.error, msg:\n sys.stderr.write('reader ERROR socket.error: %s\\n' % msg)\n break\n except Exception, msg:\n sys.stderr.write('reader ERROR Other Exception: %s\\n' % msg)\n break", "def display_content(com,message):\n #message = message.encode('utf-8')\n #message = message.decode('ascii', 'ignore')\n safeMsg = filter(lambda x: x in string.printable, message)\n safeMsg = safeMsg.replace('\\n', ' ')\n print \"ALPHA: \", safeMsg\n try:\n #com = serial.Serial(config.devalpha, 9600, timeout=3)\n #com.close()\n #com.open()\n comstr = config.alpha['display'] + safeMsg + config.alpha['eot']\n com.write(comstr)\n #com.close()\n except serial.SerialException as e:\n logging.warning(\"Serial exception: \"+str(e))", "def run_terminal():\n playing = True\n while playing:\n print(\"$\", end='')\n command = input()\n list_of_inputs = command.split(\" \")\n if list_of_inputs[0] == \"exit\":\n playing = False\n break\n handle_command(list_of_inputs)", "def connect_serial(self):\n if self.ser == 0:\n try:\n self.ser = serial.Serial(PORT, BAUDRATE, BYTESIZE, PARITY, STOPBITS, TIMEOUT)\n #self.ser = serial.Serial('/dev/ttyUSB0', 115200, timeout=10)\n except:\n self.textBrowser.append(QtGui.QApplication.translate(\"MainWindow\", \"\\n\\t----Failed to connect to the device----\\n\", None, QtGui.QApplication.UnicodeUTF8))\n return 0\n # this code shouldn't be never executed\n else:\n try:\n self.ser.open()\n except:\n self.textBrowser.append(QtGui.QApplication.translate(\"MainWindow\", \"\\n\\t----Failed to connect to the device----\\n\", None, QtGui.QApplication.UnicodeUTF8))\n return 0\n return 1", "def _flush():\n libtcod.console_flush()", "def run(self):\n while self.running:\n if self.commslock.acquire(False):\n try:\n self.flush()\n finally:\n self.commslock.release()\n sleep(1)", "def send_cmd(self):\n\n cmd = self.repl_input.get().encode()\n self.serial.write(cmd + b\"\\r\")\n self.repl_input.set(\"\")", "def connect(self):\r\n\r\n wBytes = self.ser.write(str('UART Enabled').encode('ascii'))\r\n sleep(0.05)\r\n echoStr = self.ser.read(self.ser.in_waiting)\r\n if echoStr is 'UART Enabled':\r\n print(f'\\t<Connection Succesful: {echoStr}>\\n')", "def dump(self):\n# self.partial_in=\"\"\n# for line in sys.stdin: \n# self.partial_in+=sys.stdin.read(1)\n sys.stdout = sys.__stdout__\n os.system('cls')\n for cb in self.buffers.values():\n cb.dump(sys.stdout)\n sys.stdout = self", "def main(dev=\"/dev/ttyUSB0\", br = 115200, verbose=0):\n bridge = RadiometerDaemon(dev)\n bridge.connect()", "def run(self):\n if has_GUI:\n self.GUI(self.buffer)\n else:\n while True:\n message = input(\"Write your command:\\n\")\n # print(message)\n self.buffer.append(message)", "def output_to_screen(stdout_fd, stderr_fd):\n os.dup2(stdout_fd, 1)\n #os.dup2(stderr_fd, 2)", "def start(self):\n print(\"*\"*20)\n print(\"*\" + \" \"*18 + \"*\")\n print(\"*\" + \" \"*4 + \"Connect 4X\" + \" \"*4 + \"*\")\n print(\"*\" + \" \" * 18 + \"*\")\n print(\"*\" * 20)\n print(\"\\nConsole Version 1.0.0\\n\")\n self.print_menu()\n self.get_input()", "def run(self):\n terminal.open()\n self.terminal_init()\n terminal.refresh()\n\n try:\n self.loop_until_terminal_exits()\n except KeyboardInterrupt:\n pass\n finally:\n terminal.close()", "def tester(request):\n dev = serial.Serial(port=request.config.getoption(\"--tester_port\"),\n baudrate=115200, timeout=0.3, write_timeout=1)\n yield dev\n dev.close()", "def run(self):\n self.cmdloop()", "async def interactive_shell(self) -> None:\n session = PromptSession()\n while True:\n try:\n result = await session.prompt_async(f\"redCisco> \", style=style)\n if not result:\n continue\n await self.command_interpreter(str(result).strip())\n except (EOFError, KeyboardInterrupt):\n break", "def print_serial_ports():\n for port, description, hwid in sorted(comports()):\n print(f\"{port!r}\\n description: {description!r}\\n hwid: {hwid!r}\")", "def loop(self):\n while True:\n self._print_field()\n try:\n cmd = input(PROMPT)\n self._invoke_cmd(cmd)\n except EOFError: # Allows to exit by pressing ⌃D without error\n break", "def sendAuto(self):\n if (self.ser is not None):\n if len(self.cmd)>0:\n try:\n # TODO gestion python 2.0 (str) ou 3.0 (encode)\n cmd = self.cmd[0] + \"\\n\"\n self.ser.write(cmd.encode())\n self.t_TX = time.time()\n if self.mutexCmd.tryLock(100):\n del self.cmd[0]\n self.mutexCmd.unlock()\n else:\n print(\"WARN: cmd not send\")\n print(self.t_TX, cmd[:-1])\n except Exception as e:\n print(\"ERROR:Serial:sendAuto\",e)", "def esp32_console(ctx, port=None):\n if port is None:\n port = _esp32_guess_console_port()\n # For now, just use miniterm, idf_monitor.py doesn't play nice with pyftdi for some reason :(\n # _run_idf_script(ctx, '-p {port}'.format(port=port), 'monitor', pty=True)\n ctx.run(\"miniterm.py --raw {port} 115200\".format(port=port), pty=True)", "def main():\n\tports = glob.glob(\"/dev/tty.wchusbserial*\") + glob.glob(\"/dev/tty.usbserial*\") + glob.glob(\"COM3\") + glob.glob(\"COM4\")\n\tBAUDRATE = 9600\n\tchoice = int(input((str(ports) + \" enter numerical index for port: \")))\n\tportname = ports[choice]\n\tport = None\n\tsending_queue = None\n\treceiving_process_on = None\n\treceiving_process = None\n\ttry:\n\t\tsending_queue = multiprocessing.Queue()\n\t\treceiving_process_on = multiprocessing.Value(c_bool,False)\n\t\treceiving_process = multiprocessing.Process(target = communication, args = (portname,BAUDRATE,sending_queue,receiving_process_on))\n\t\treceiving_process.start()\n\t\twhile True:\n\t\t\tword = input(\"Enter a message: \")\n\t\t\tsending_queue.put(create_chunk(word)) #sending 32 bytes to the process queue\n\t\t\t\n\texcept Exception as e:\n\t\tprint(\"ERROR:\", e)\n\tfinally:\n\t\treceiving_process_on.value = False\n\t\tfor i in range(10): #wait for the process to stop\n\t\t\tpass\n\t\tif receiving_process != None:\n\t\t\treceiving_process.join()\n\t\t\n\t\tif sending_queue != None:\n\t\t\tsending_queue.close()", "def main():\n\n #robot = S2Serial( \"/dev/ttyUSB0\" )\n robot = S2Fluke2( \"/dev/rfcomm2\" )\n\n for i in range( 30 ):\n print( \"getIRLeft : \", robot.getIRLeft() )\n print( \"getIRRight: \", robot.getIRRight() )\n print( \"getAllIR : \", robot.getAllIR() )\n print( \"getIrEx(0): \", robot.getIrEx( 0, 128 ) )\n print( \"getIrEx(1): \", robot.getIrEx( 1, 128 ) )\n\n robot.close()", "def run(self):\n while True:\n buf = \"\"\n while len(buf) == 0 or buf[-1] != '\\n':\n if self.ser.available(): buf += self.ser.read()\n else: delay(1) # Avoid pegging CPU\n\n tokens = buf.split(' ')\n s = tokens[0]\n self.lock.acquire()\n try:\n if s == \"PPM\":\n self.ppm = [int(i) for i in tokens[1:]]\n elif s == \"Wind\":\n self.wind = int(tokens[1])\n elif s == \"roll\":\n self.roll = float(tokens[1])\n elif s == \"yaw\":\n self.yaw = float(tokens[1])\n elif s == \"Wpot\":\n self.winch = int(tokens[1])\n elif s == \"Rpot\":\n self.rudder = int(tokens[1])\n except: pass # A cast likely failed\n self.lock.release()" ]
[ "0.6927939", "0.6439883", "0.61899203", "0.6172371", "0.6096557", "0.60786813", "0.6066571", "0.599614", "0.59852797", "0.5948746", "0.5931728", "0.5931728", "0.5916063", "0.589414", "0.5853896", "0.5821321", "0.579844", "0.57957584", "0.57879835", "0.57793695", "0.57518065", "0.5745163", "0.56720823", "0.5660544", "0.5634493", "0.56138426", "0.5601743", "0.55971974", "0.55736184", "0.55724496", "0.5562091", "0.55483073", "0.5539223", "0.55379045", "0.55279523", "0.55276465", "0.5503682", "0.5472408", "0.54657817", "0.5461637", "0.5446613", "0.5445966", "0.54294765", "0.54210037", "0.5419295", "0.5408508", "0.5404231", "0.54040974", "0.53969", "0.537302", "0.5372185", "0.5368968", "0.5362655", "0.5354743", "0.5349698", "0.53409", "0.53277916", "0.5326484", "0.5314511", "0.5310038", "0.53086144", "0.5302623", "0.529577", "0.5291719", "0.5289641", "0.5285507", "0.5282893", "0.52821773", "0.52809066", "0.5278234", "0.5262355", "0.5259132", "0.52566516", "0.5238949", "0.5231415", "0.5221074", "0.5216662", "0.52149636", "0.52132493", "0.52116287", "0.5210025", "0.52080053", "0.5198737", "0.5195527", "0.5191992", "0.5188254", "0.5185589", "0.51809144", "0.5180732", "0.51743495", "0.5167812", "0.51672006", "0.51659954", "0.51646125", "0.5162338", "0.5161718", "0.5160779", "0.5160733", "0.5159551", "0.51542646" ]
0.5474405
37
loop and copy console>serial until config.exit_char character is found. when config.menu_char is found, interpret the next key locally.
def writer(self): menu_active = False try: while self.alive: try: char = self.console.getkey() except KeyboardInterrupt: char = '\x03' if menu_active: # Menu character again/exit char -> send itself if char in self.config.menu_char: self.serial.write(char) # send character elif char in self.config.exit_char: self.stop() break # exit app elif char in 'hH?': # h, H, ? -> Show help sys.stderr.write(self.get_help_text()) elif char in self.config.photo_char: ENV.send_image_f = "Asked by console" else: sys.stderr.write('--- unknown menu character %s ---\n' % char) menu_active = False elif char in self.config.menu_char: # next char will be for menu menu_active = True elif char == '\n' or ord(char) == 10: sys.stderr.write('\n') else: self.serial.write(char) # send character except: self.alive = False raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getKey(self):\n while not rospy.is_shutdown():\n tty.setraw(sys.stdin.fileno())\n select.select([sys.stdin], [], [], 0)\n self.key = sys.stdin.read(1)\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)\n time.sleep(.05)", "def run(self):\n global key\n getch = _GetchUnix()\n key = getch()\n while key != \"e\":\n key = getch()\n #time.sleep(0.1)", "def ReadKeys(self):\n\n reg = re.compile(r\"\\w|\\s\")\n chars = \"\"\n while True:\n key = getch()\n keynum = ord(key)\n\n if keynum == 27: #escape\n self.shouldExit = True\n return \"\"\n\n if keynum == 13: #enter\n stdout.write(\"\\n\")\n break\n\n if keynum == 8: #backspace\n chars = chars[:-1]\n stdout.write(key)\n stdout.write(\" \")\n stdout.write(key)\n continue\n\n if reg.match(key): \n chars += key\n stdout.write(key)\n\n return chars", "def enter_repl(self):\n text_input = ''\n while True:\n text_input = input('>>')\n if text_input == 'exit':\n break\n #An alias for querying an instrument error string\n elif text_input == 'err?':\n self.write_to_serial(':SYST:ERR?')\n print(self.read_from_serial())\n else:\n self.write_to_serial(text_input)\n print(self.read_from_serial())", "def cmdKeyboard(self, dev):\n # Slaap één-tiende van een seconde om te voorkomen dat de toetsaanslag <enter>\n # wordt gepakt als het wordt uitgevoerd in een terminal\n time.sleep(0.1)\n\n self.hitsKeyboards[dev] = False\n f = open(self.inputPath + dev, 'rb')\n f.flush()\n while self.live:\n # Lees de toetsaanslag --> Pak de 42ste byte\n self.hitsKeyboards[dev] = f.read(144)[42]\n time.sleep(0.1)", "def get(self):\n fd = stdin.fileno()\n default = termios.tcgetattr(fd)\n\n tty.setraw(stdin.fileno()) # set terminal to raw input mode to get bytes\n\n while True: # read in until a key is pressed\n char = ord(stdin.read(1))\n if char != \"\":\n break\n # logic for keyboard interrupt\n if char == 0x03: # is the input the interrupt code?\n termios.tcsetattr(fd, termios.TCSADRAIN, default)\n raise KeyboardInterrupt\n # logic for when the user hits enter\n elif char == 0x0D or char == 0x0A: # enter is one of these depending on system\n marked = self.mask[self.cursor_location]\n\n # toggle the corresponding spot in the selection mask\n self.mask[self.cursor_location] = not self.mask[self.cursor_location]\n\n current = self.options[self.cursor_location]\n if marked: # if the item was previously selected\n self.selected = list(\n filter(lambda item: item != current, self.selected)\n ) # remove item from selecteed\n else: # if not\n self.selected.append(current) # add it to the list of selected options\n # logic for arrow keys\n # these keypresses are three bytes long\n # the first byte is an escape character\n elif char == 0x1B: # check for escape character\n if ord(stdin.read(1)) == 0x5B: # check for next byte, same for up and down\n last = ord(stdin.read(1))\n if last == 0x42: # up arrow\n # adjust the cursor position, wrapping if reached the end\n self.cursor_location = (self.cursor_location + 1) % len(\n self.options\n )\n elif last == 0x41: # down arrow\n self.cursor_location = (self.cursor_location - 1) % len(\n self.options\n )\n termios.tcsetattr(\n fd, termios.TCSADRAIN, default\n ) # reset the terminal out of raw mode", "def inputloop():\n while True:\n for char in raw_input().decode('utf-8'):\n print script(char)", "def get_key():\n\tinput_key: str = \"\"\n\ttry:\n\t\twhile not False:\n\t\t\twith Raw(sys.stdin):\n\t\t\t\tif not select([sys.stdin], [], [], 0.1)[0]: #* Wait 100ms for input on stdin then restart loop to check for stop flag\n\t\t\t\t\tcontinue\n\t\t\t\tinput_key += sys.stdin.read(1) #* Read 1 key safely with blocking on\n\t\t\t\tif input_key == \"\\033\": #* If first character is a escape sequence keep reading\n\t\t\t\t\twith Nonblocking(sys.stdin): #* Set non blocking to prevent read stall\n\t\t\t\t\t\tinput_key += sys.stdin.read(20)\n\t\t\t\t\t\tif input_key.startswith(\"\\033[<\"):\n\t\t\t\t\t\t\t_ = sys.stdin.read(1000)\n\t\t\t\tprint(\"INPUT: \"+input_key.replace(\"\\033\",\"<ESC>\"))\n\t\t\t\tif input_key == \"\\033\" or input_key == \"q\": #* Key is \"escape\" key if only containing \\033\n\t\t\t\t\tbreak\n\t\t\t\telif input_key.startswith((\"\\033[<0;\", \"\\033[<35;\", \"\\033[<64;\", \"\\033[<65;\")): #* Detected mouse event\n\t\t\t\t\ttry:\n\t\t\t\t\t\tprint((int(input_key.split(\";\")[1]), int(input_key.split(\";\")[2].rstrip(\"mM\"))))\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tif input_key.startswith(\"\\033[<35;\"):\n\t\t\t\t\t\t\tprint(\"mouse Move\") #* Detected mouse move in mouse direct mode\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<64;\"):\n\t\t\t\t\t\t\tprint(\"mouse Scroll UP\") #* Detected mouse scroll up\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<65;\"):\n\t\t\t\t\t\t\tprint(\"mouse Scroll DOWN\") #* Detected mouse scroll down\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<0;\") and input_key.endswith(\"m\"):\n\t\t\t\t\t\t\tprint(\"mouse Click Release\") #* Detected mouse click release\n\t\t\t\tinput_key = \"\"\n\texcept Exception as e:\n\t\tprint(f'EXCEPTION: Input thread failed with exception: {e}')", "def run_terminal():\n playing = True\n while playing:\n print(\"$\", end='')\n command = input()\n list_of_inputs = command.split(\" \")\n if list_of_inputs[0] == \"exit\":\n playing = False\n break\n handle_command(list_of_inputs)", "def move_debug(self, environment):\n\n ch2 = sys.stdin.read(1)\n\n if ch2 == \"w\":\n # the up arrow key was pressed\n print(\"up key pressed\")\n\n elif ch2 == \"s\":\n # the down arrow key was pressed\n print(\"down key pressed\")\n\n elif ch2 == \"a\":\n # the left arrow key was pressed\n print(\"left key pressed\")\n\n elif ch2 == \"d\":\n # the right arrow key was pressed\n print(\"right key pressed\")", "def _configure_keyboard(self):\n fd = sys.stdin.fileno()\n self.original_kbd_settings = termios.tcgetattr(fd)\n new = termios.tcgetattr(fd)\n new[3] = new[3] & ~termios.ECHO # lflags\n new[3] = new[3] & ~termios.ICANON # lflags\n new[6][6] = '\\000' # Set VMIN to zero for lookahead only\n termios.tcsetattr(fd, termios.TCSADRAIN, new)", "def console():\r\n while True:\r\n interpret_command(input(\"POM> \"))", "def onKeyPress(self):\n ch = read(fd, 4)\n if ch == '\\033': # escape\n self.pause()\n elif '\\033' in ch:\n return\n elif '\\t' in ch: # tab\n return\n elif len(self.user_input) >= 80: # too long\n self.user_input[:80]\n return\n elif ch == '\\r': # return\n if self.user_input == \"\":\n return\n command = command_list.match(self.user_input)\n if not command:\n pass\n elif command.group(1):\n self._save(0)\n elif command.group(2):\n self._save()\n elif command.group(3):\n self._save(command.group(4))\n link = self.links.match(self.user_input.lower())\n if link:\n self.reset(link.group(0))\n self.user_input = \"\"\n self.locked += 1\n print '\\033[0m'\n print_loc(' '*80, self.y+5, self.x+2)\n #print_loc(' '*80, self.y+6, 0)\n self.locked -= 1\n elif ch == '\\x7f': # backspace\n if self.user_input == \"\":\n return\n self.user_input = self.user_input[:-1]\n elif ch == ' ': # space\n if self.user_input == \"\":\n return\n elif self.user_input[-1] == ' ':\n return\n self.user_input += ' '\n else: # all else\n self.user_input += ch\n self.locked += 1\n # Highlight valid user input\n if self.links.match(self.user_input.lower()):\n print '\\033[0;96;4m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n elif command_list.match(self.user_input):\n print '\\033[0;1;92m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n else:\n print '\\033[0m'\n # Display new user input line\n print_loc(self.user_input+'\\033[0;7m \\033[0m ', self.y + 5, self.x)\n self.locked -= 1", "def getKey(self):\n tty.setraw(sys.stdin.fileno())\n select.select([sys.stdin], [], [], 0)\n self.key = sys.stdin.read(1)\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)", "def send_enter():\n sys.stdout.write('\\x0D') # send carriage return\n sys.stdout.flush()", "def sync(self):\n available = self.count\n if available > 0:\n available = available + 2\n buf = self.read_keypad(available)\n for raw in buf:\n evt = KeyEvent(_seesaw_key((raw >> 2) & 0x3F), raw & 0x3)\n if (\n evt.number < _NEO_TRELLIS_NUM_KEYS\n and self.callbacks[evt.number] is not None\n ):\n self.callbacks[evt.number](evt)", "def interact(self):\n if not self.connected(): return\n\n try:\n if sys.platform == 'win32':\n import msvcrt\n else:\n import tty, termios\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n tty.setraw(fd)\n\n self.start_listener()\n self.start_anti_idle_timer()\n\n sys.stdout.write(self.prompt)\n\n pre_ch = b''\n while True:\n if sys.platform == 'win32':\n ch = msvcrt.getch()\n if ch == b'\\xe0':\n ch = b'\\x1b'\n if pre_ch == b'\\x1b':\n if ch == b'K': ch = b'[D' # left arrow\n elif ch == b'M': ch = b'[C' # right arrow\n elif ch == b'H': ch = b'[A' # up arrow\n elif ch == b'P': ch = b'[B' # down arrow\n else:\n ch = sys.stdin.read(1)\n if not ch:\n break\n if not self.connected():\n break\n\n self.write(ch)\n pre_ch = ch\n\n if not self.connected():\n break\n finally:\n if sys.platform != 'win32':\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n self.cancel_anti_idle_timer()", "async def __bufferedReader():\n while True:\n # Get char and then append to prevent a race condition caused by the async await\n charIn = await __terminalState.osSupport.getInputChar()\n\n wasHandled = False\n for key, handlers in __terminalState.inputHandlers.items():\n if key is None or charIn in key:\n for handler in handlers:\n asyncio.get_event_loop().call_soon(handler, charIn)\n wasHandled = True\n\n if not wasHandled:\n __terminalState.inputBuffer += charIn", "def until_not_multi(chars) -> str:\n import sys\n chars = list(chars)\n y = \"\"\n sys.stdout.flush()\n while True:\n i = read_single_keypress()\n _ = sys.stdout.write(i)\n sys.stdout.flush()\n if i not in chars:\n break\n y += i\n return y", "def main(self):\n while self.leave_main_menu:\n print(fr.FR[4], fr.FR[5], fr.FR[6], fr.FR[7])\n self.choice_menu = input(fr.FR[8])\n self.main_menu_input()", "def init_keystrokes(self):\n import x84.bbs.session\n term = x84.bbs.session.getterminal()\n self.keyset['home'].append(term.KEY_HOME)\n self.keyset['end'].append(term.KEY_END)\n self.keyset['pgup'].append(term.KEY_PGUP)\n self.keyset['pgdown'].append(term.KEY_PGDOWN)\n self.keyset['up'].append(term.KEY_UP)\n self.keyset['down'].append(term.KEY_DOWN)\n self.keyset['down'].append(term.KEY_ENTER)\n self.keyset['exit'].append(term.KEY_ESCAPE)", "def enter_raw_repl(self):\n\n debug_indent(\"enter_raw_repl\")\n\n time.sleep(0.5) # allow some time for board to reset\n debug(r'self.con.write \"\\r\\x03\\x03\" (Ctrl-C twice)')\n self.con.write(b\"\\r\\x03\\x03\") # ctrl-C twice: interrupt any running program\n\n # flush input (without relying on serial.flushInput())\n n = self.con.inWaiting()\n while n > 0:\n self.con.read(n)\n n = self.con.inWaiting()\n\n if self.con.survives_soft_reset():\n debug(r'self.con.write \"\\r\\x01\" (enter raw REPL)')\n self.con.write(b\"\\r\\x01\") # ctrl-A: enter raw REPL\n data = self.read_until(1, b\"raw REPL; CTRL-B to exit\\r\\n>\", timeout=10)\n\n if not data.endswith(b\"raw REPL; CTRL-B to exit\\r\\n>\"):\n print(data)\n debug_unindent()\n raise PyboardError(\"could not enter raw repl 1\")\n\n debug(r'self.con.write \"\\x04\" (soft reset)')\n self.con.write(b\"\\x04\") # ctrl-D: soft reset\n data = self.read_until(1, b\"soft reboot\\r\\n\", timeout=10)\n if not data.endswith(b\"soft reboot\\r\\n\"):\n print(data)\n debug_unindent()\n raise PyboardError(\"could not enter raw repl 2\")\n\n # By splitting this into 2 reads, it allows boot.py to print stuff,\n # which will show up after the soft reboot and before the raw REPL.\n data = self.read_until(1, b\"raw REPL; CTRL-B to exit\\r\\n\", timeout=10)\n if not data.endswith(b\"raw REPL; CTRL-B to exit\\r\\n\"):\n print(data)\n debug_unindent()\n raise PyboardError(\"could not enter raw repl 3\")\n\n else:\n\n debug(r'self.con.write \"\\r\\x01\" (enter raw REPL)')\n self.con.write(b\"\\r\\x01\") # ctrl-A: enter raw REPL\n data = self.read_until(0, b\"raw REPL; CTRL-B to exit\\r\\n\", timeout=10)\n\n if not data.endswith(b\"raw REPL; CTRL-B to exit\\r\\n\"):\n print(data)\n debug_unindent()\n raise PyboardError(\"could not enter raw repl 4\")\n debug_unindent()", "def _on_key_down(self, event, skip=True):\n # FIXME: This method needs to be broken down in smaller ones.\n current_line_num = self.GetCurrentLine()\n key_code = event.GetKeyCode()\n if key_code in (ord('c'), ord('C')) and event.ControlDown():\n # Capture Control-C\n if self._input_state == 'subprocess':\n if self.debug:\n print >>sys.__stderr__, 'Killing running process'\n if hasattr(self._running_process, 'process'):\n self._running_process.process.kill()\n elif self._input_state == 'buffering':\n if self.debug:\n print >>sys.__stderr__, 'Raising KeyboardInterrupt'\n raise KeyboardInterrupt\n # XXX: We need to make really sure we\n # get back to a prompt.\n elif self._input_state == 'subprocess' and (\n ( key_code <256 and not event.ControlDown() )\n or\n ( key_code in (ord('d'), ord('D')) and\n event.ControlDown())):\n # We are running a process, we redirect keys.\n ConsoleWidget._on_key_down(self, event, skip=skip)\n char = chr(key_code)\n # Deal with some inconsistency in wx keycodes:\n if char == '\\r':\n char = '\\n'\n elif not event.ShiftDown():\n char = char.lower()\n if event.ControlDown() and key_code in (ord('d'), ord('D')):\n char = '\\04'\n self._running_process.process.stdin.write(char)\n self._running_process.process.stdin.flush()\n elif key_code in (ord('('), 57, 53):\n # Calltips\n event.Skip()\n self.do_calltip()\n elif self.AutoCompActive() and not key_code == ord('\\t'):\n event.Skip()\n if key_code in (wx.WXK_BACK, wx.WXK_DELETE):\n wx.CallAfter(self._popup_completion, create=True)\n elif not key_code in (wx.WXK_UP, wx.WXK_DOWN, wx.WXK_LEFT,\n wx.WXK_RIGHT, wx.WXK_ESCAPE):\n wx.CallAfter(self._popup_completion)\n else:\n # Up history\n if key_code == wx.WXK_UP and (\n event.ControlDown() or\n current_line_num == self.current_prompt_line\n ):\n new_buffer = self.get_history_previous(\n self.input_buffer)\n if new_buffer is not None:\n self.input_buffer = new_buffer\n if self.GetCurrentLine() > self.current_prompt_line:\n # Go to first line, for seemless history up.\n self.GotoPos(self.current_prompt_pos)\n # Down history\n elif key_code == wx.WXK_DOWN and (\n event.ControlDown() or\n current_line_num == self.LineCount -1\n ):\n new_buffer = self.get_history_next()\n if new_buffer is not None:\n self.input_buffer = new_buffer\n # Tab-completion\n elif key_code == ord('\\t'):\n current_line, current_line_num = self.CurLine\n if not re.match(r'^%s\\s*$' % self.continuation_prompt(),\n current_line):\n self.complete_current_input()\n if self.AutoCompActive():\n wx.CallAfter(self._popup_completion, create=True)\n else:\n event.Skip()\n elif key_code == wx.WXK_BACK:\n # If characters where erased, check if we have to\n # remove a line.\n # XXX: What about DEL?\n # FIXME: This logics should be in ConsoleWidget, as it is\n # independant of IPython\n current_line, _ = self.CurLine\n current_pos = self.GetCurrentPos()\n current_line_num = self.LineFromPosition(current_pos)\n current_col = self.GetColumn(current_pos)\n len_prompt = len(self.continuation_prompt())\n if ( current_line.startswith(self.continuation_prompt())\n and current_col == len_prompt):\n new_lines = []\n for line_num, line in enumerate(\n self.input_buffer.split('\\n')):\n if (line_num + self.current_prompt_line ==\n current_line_num):\n new_lines.append(line[len_prompt:])\n else:\n new_lines.append('\\n'+line)\n # The first character is '\\n', due to the above\n # code:\n self.input_buffer = ''.join(new_lines)[1:]\n self.GotoPos(current_pos - 1 - len_prompt)\n else:\n ConsoleWidget._on_key_down(self, event, skip=skip)\n else:\n ConsoleWidget._on_key_down(self, event, skip=skip)", "def read_next_command(self):\n colors = self.colors\n message = 'ddgr (? for help)'\n prompt = (colors.prompt + message + colors.reset + ' ') if colors else (message + ': ')\n enter_count = 0\n while True:\n try:\n cmd = input(prompt)\n except EOFError:\n sys.exit(0)\n\n if not cmd:\n enter_count += 1\n if enter_count == 2:\n # Double <enter>\n sys.exit(0)\n else:\n enter_count = 0\n\n cmd = ' '.join(cmd.split())\n if cmd:\n self.cmd = cmd\n break", "def process_key(key):\n print(chr(key))", "def onKey(self,event):\n \n ch = event.char.lower()\n \n if ch in ('\\n','\\r'):\n ch = self.defaultButton[0].lower()\n \n if ch == self.yesMessage[0].lower():\n self.yesButton()\n elif ch == self.noMessage[0].lower():\n self.noButton()\n elif ch == 'c':\n self.cancelButton()\n \n return \"break\"", "def menu():\n menu = 'main'\n while 1:\n if menu == 'main':\n click.echo('Main menu:')\n click.echo(' d: debug menu')\n click.echo(' q: quit')\n char = click.getchar()\n if char == 'd':\n menu = 'debug'\n elif char == 'q':\n menu = 'quit'\n else:\n click.echo('Invalid input')\n elif menu == 'debug':\n click.echo('Debug menu')\n click.echo(' b: back')\n char = click.getchar()\n if char == 'b':\n menu = 'main'\n else:\n click.echo('Invalid input')\n elif menu == 'quit':\n return", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.stdscr.keypad(False)\n self.stdscr.nodelay(False)\n curses.echo()\n curses.nocbreak()\n curses.endwin()", "def wait_key():\n result = None\n if os.name == 'nt':\n result = input(\"Press Enter to continue...\")\n else:\n import termios\n fd = sys.stdin.fileno()\n\n oldterm = termios.tcgetattr(fd)\n newattr = termios.tcgetattr(fd)\n newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n termios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n try:\n result = sys.stdin.read(1)\n except IOError:\n pass\n finally:\n termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n\n return result", "def send_cmd(self):\n\n cmd = self.repl_input.get().encode()\n self.serial.write(cmd + b\"\\r\")\n self.repl_input.set(\"\")", "def shortcut(self):\n if not options.quiet: sys.stderr.write(\">> shortcut enter\\n\")\n self.alive = True\n self.thread_read = threading.Thread(target=self.reader)\n self.thread_read.setDaemon(True)\n self.thread_read.setName('serial->socket')\n self.thread_read.start()\n self.socket.settimeout(1)\n self.writer()\n if not options.quiet: sys.stderr.write(\">> shortcut exit\\n\")", "def pause(self):\n \n print \"Press Enter to continue...\"\n waiting = True\n \n while waiting:\n if msvcrt.getch() == '\\r': waiting = False", "def cmdloop(self):\n completer = Completer(self._get_all_commands())\n try:\n while self._loop:\n if os.isatty(sys.stdin.fileno()):\n with patch_stdout():\n cmd = self._session.prompt(self.prompt, completer=completer, style=self.prompt_style)\n else:\n cmd = input(self.prompt)\n\n self.onecmd(cmd)\n except EOFError:\n pass", "def whait_for_keys_press(prompt, key1, key2, key3, key4):\n print(prompt)\n while True:\n Key_pressed = curses.wrapper(main)\n #if Key_pressed != (-1): print(Key_pressed) # displays number of key\n if Key_pressed == key1:\n break\n if Key_pressed == key2:\n break\n if Key_pressed == key3:\n break \n if Key_pressed == key4:\n break \n time.sleep(0.1)\n return Key_pressed", "def run(self):\r\n self.running = True\r\n while(self.running):\r\n line = self.ser.readline()\r\n if(line != ''):\r\n self.command = None\r\n self.data = None\r\n\t #line = line[:-4]\r\n temp = line.split(\":\")\r\n\t\t#print(temp)\r\n if(len(temp) > 1):\r\n self.command = temp[0]\r\n self.data = temp[1]\r\n else:\r\n self.command = temp[0]\r\n\t\t#print(self.command)\r\n if(self.command is not None):\r\n\t\t self.command_dict.get(str(self.command), lambda: 'Invalid')()", "def def_char(self, offset, data):\n self.send((\"\\x1b\\x26\\x01%c%c\\x05\") % ((offset&0xff), (offset&0xff)))\n time.sleep(0.01)\n for i in data:\n self.send((\"%c\")%i)", "def data_available(self):\n\n self.run = True\n self.serial.reset_input_buffer()\n while self.run:\n if self.serial.in_waiting:\n data: str = self.serial.readline().decode(\"utf-8\")\n data = data.replace(\">>>\", \"\").lstrip()\n\n if len(data) > 0:\n self.output_text.config(state=NORMAL)\n self.output_text.insert(END, data)\n self.output_text.see(END)\n self.output_text.config(state=DISABLED)\n else:\n time.sleep(0.1)", "def onKey(self,event):\n \n ch = event.char.lower()\n \n if ch in ('y','\\n','\\r'):\n self.yesButton()\n elif ch == 'n':\n self.noButton()\n \n return \"break\"", "def loop(self):\n while True:\n self._print_field()\n try:\n cmd = input(PROMPT)\n self._invoke_cmd(cmd)\n except EOFError: # Allows to exit by pressing ⌃D without error\n break", "def onKey(self,event):\n \n ch = event.char.lower()\n \n if ch in (self.text[0].lower(),'\\n','\\r'):\n self.okButton()\n \n return \"break\"", "def get_input(self):\n result = None\n\n try:\n while True:\n result = self.console.read_for_condition(prompt=\">>> \", condition=self.is_valid_input)\n\n if result is not None:\n break\n except KeyboardInterrupt:\n quit()\n\n # run command for next condition\n self.game_branch[result]()", "def display():\n screen.addch(head[0],head[1],'x')", "def load(device, baud, program):\n serialport = serial.Serial(device, baud, timeout=0)\n c = fdpexpect.fdspawn(serialport.fd)\n #c.logfile_read = sys.stdout\n # synch with the command prompt\n c.sendline('')\n waitprompt(c)\n # do stuff\n if(program.find('\\n')>=0):\n #program is many lines\n text = program.split('\\n')\n for line in text:\n line = line.strip()\n print(line)\n if (len(line) > 0) and (line[0] != '#'):\n c.sendline(line)\n waitprompt(c)\n print(\"waiting for prompt after twinkle\")\n else:\n #program is one line\n if (len(program) > 0) and (program[0] != '#'):\n c.sendline(program)\n waitprompt(c)\n c.close()\n print(\"Done\")\n return None", "def main():\n with FullscreenWindow() as window:\n print('Press escape to exit')\n with Input() as input_generator:\n a = FSArray(window.height, window.width)\n for c in input_generator:\n if c == '<ESC>':\n break\n elif c == '<SPACE>':\n a = FSArray(window.height, window.width)\n else:\n row = random.choice(range(window.height))\n column = random.choice(range(window.width-len(repr(c))))\n a[row, column:column+len(repr(c))] = [repr(c)]\n window.render_to_terminal(a)", "def __write_command(serial_port, command):\n line_ending = \"\\r\\n\"\n ctrl_c_cmd = \"\\x03\" + line_ending\n\n # Clear any existing text by sending a CTRL-C\n # command and waiting for a prompt\n serial_port.write(ctrl_c_cmd.encode(\"utf-8\"))\n Cambrionix.__get_response(serial_port)\n\n if not command.endswith(line_ending):\n command += line_ending\n\n for char in command:\n serial_port.write(char.encode(\"utf-8\"))\n if command.startswith(\"reboot\") and char == \"\\r\":\n break\n\n while True:\n ready = select.select([serial_port], [], [], 25)[0]\n if ready:\n if serial_port.read(1).decode(\"utf-8\") == char:\n break\n else:\n raise errors.DeviceError(\"Device cambrionix write command failed. \"\n \"Read timeout on serial port: {} \"\n \"while writing command: {}\".format(\n serial_port, command))", "def read_keypad(self, num):\n\n while num > len(self._current_events):\n self._current_events.append(0xFF)\n return self._current_events[:num]", "def menu_selection(prompt, dispatch_dict):\n while True:\n response = input(prompt).lower()\n if dispatch_dict[response]() == 'quit':\n break", "def dumb_tty(port):\n\tUART.setup(\"UART1\")\n\thlsd = pyhypnolsd.command_mode.from_port(port)\n\n\tget_command = True\n\twhile (get_command):\n\t\t# Get input\n\t\tcommand = raw_input(\"> \")\n\n\t\t# Check for exit\n\t\tif (command == \"exit\"):\n\t\t\tget_command = False\n\t\t\tcontinue\n\n\t\t# Send command, let it print output\n\t\thlsd.send_command(command, True)\n\n\t# Close remaining connections\n\thlsd.close()", "def return_menu(self):\n while True:\n number = pyip.inputNum(\"0. Back to the main menu: \")\n if number == 0:\n # Clean up the console\n self.clear_console()\n # back to the main menu\n self.run()\n else:\n print('Press the number zero to go back')", "def run_interactive_shell(serial_port_name = None):\r\n\t\r\n\tprint (\"- \"*14)\r\n\tprint (\" Qontrol Interactive Shell\")\r\n\tprint (\"- \"*14+\"\\n\")\r\n\t\r\n\tbaudrate = 115200\r\n\t\r\n\tdef tty_supports_color():\r\n\t\t\"\"\"\r\n\t\tReturns True if the running system's terminal supports color, and False\r\n\t\totherwise. From django.core.management.color.supports_color.\r\n\t\t\"\"\"\r\n\t\t\r\n\t\tplat = sys.platform\r\n\r\n\t\tif plat == \"win32\":\r\n\t\t\treturn False\r\n\t\telse:\r\n\t\t\tsupported_platform = plat != 'Pocket PC' and (plat != 'win32' or\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t 'ANSICON' in os.environ)\r\n\t\t# isatty is not always implemented, #6223.\r\n\t\t\tis_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()\r\n\t\t\treturn supported_platform and is_a_tty\r\n\t\t\t\r\n\r\n\tif tty_supports_color():\r\n\t\tnormal_text = \"\\033[0m\"\r\n\t\tin_text = \"\\033[33;1m\"\r\n\t\tout_text = \"\\033[36;1m\"\r\n\t\temph_text = \"\\033[97;1m\"\r\n\telse:\r\n\t\tnormal_text = \"\"\r\n\t\tin_text = \"\"\r\n\t\tout_text = \"\"\r\n\t\temph_text = \"\"\r\n\t\r\n\t# List available serial ports\r\n\t# Separate ports that are probably Qontrol devices from those that are probably not\r\n\tports_of_interest = list(list_ports.grep('.*usbserial-FT[A-Z0-9].*'))\r\n\tports_other = [port for port in list(list_ports.grep('.*')) \r\n\t\t\t\t\t\t\t\t\tif port not in ports_of_interest]\r\n\tports = ports_of_interest + ports_other\r\n\tn_ports = len(ports)\r\n\tprint (\"Available ports:\")\r\n\ti = 0\r\n\tfor port in ports_of_interest:\r\n\t\tprint (\" {:}#{:2} - {:15}{:}\".format(emph_text, i, str(port), normal_text))\r\n\t\ti += 1\r\n\tfor port in ports_other:\r\n\t\tprint (\" #{:2} - {:15}\".format(i, str(port)))\r\n\t\ti += 1\r\n\t\r\n\t# Ask user which port to target\r\n\tif serial_port_name is None:\r\n\t\tfor i in range(3):\r\n\t\t\trequested_port_str = input(\"\\nWhich would you like to communicate with? #\")\r\n\t\t\ttry:\r\n\t\t\t\trequested_port_index = int(requested_port_str)\r\n\t\t\t\tif requested_port_index > n_ports:\r\n\t\t\t\t\traise RuntimeError()\r\n\t\t\t\tbreak\r\n\t\t\texcept:\r\n\t\t\t\tprint (\"Port index '{:}' not recognised.\".format(requested_port_str))\r\n\t\t\r\n\t\tfor i,port in enumerate(ports):\r\n\t\t\tif i == requested_port_index:\r\n\t\t\t\tbreak\r\n\telse:\r\n\t\tfor port in ports:\r\n\t\t\tif port.device == serial_port_name:\r\n\t\t\t\tbreak\r\n\t\r\n\t\r\n\tport = serial.Serial(port.device, baudrate, timeout = 0)\r\n\t\r\n\t\r\n\t# Multithread the user and hardware monitoring\r\n\timport threading, copy, collections\r\n\r\n\tclass WatcherThread(threading.Thread):\r\n\r\n\t\tdef __init__(self, stream, name='keyboard-input-thread'):\r\n\t\t\tself.stream = stream\r\n\t\t\tself.buffer = fifo(maxlen = 8) # Unlikely to ever need > 1\r\n\t\t\tsuper(WatcherThread, self).__init__(name=name, daemon=True)\r\n\t\t\tself.stop_flag = False\r\n\t\t\tself.start()\r\n\r\n\t\tdef run(self):\r\n\t\t\twhile True:\r\n\t\t\t\tr = self.stream.readline()\r\n\t\t\t\tif r:\r\n\t\t\t\t\tif type(r) is bytes:\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tself.buffer.appendleft(r.decode('ascii'))\r\n\t\t\t\t\t\texcept UnicodeDecodeError:\r\n\t\t\t\t\t\t\timport binascii\r\n\t\t\t\t\t\t\tself.buffer.appendleft(str(binascii.hexlify(r)))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tself.buffer.appendleft(r)\r\n\t\t\t\tif self.stop_flag:\r\n\t\t\t\t\tbreak\r\n\t\t\r\n\t\tdef has_data(self):\r\n\t\t\treturn (len(self.buffer) > 0)\r\n\t\t\r\n\t\tdef pop(self):\r\n\t\t\treturn self.buffer.pop()\r\n\t\t\r\n\t\tdef stop(self):\r\n\t\t\tself.stop_flag = True\r\n\r\n\t# Start threads\r\n\tuser_watcher = WatcherThread(sys.stdin)\r\n\thardware_watcher = WatcherThread(port)\r\n\t\r\n\tprint (\"\\nEntering interactive mode. Use Ctrl+C/stop/quit/exit to finish.\\n\")\r\n\tprint (\"- \"*14+'\\n')\r\n\tsys.stdout.write(out_text + \" > \" + normal_text)\r\n\tsys.stdout.flush()\r\n\tcmd = \"\"\r\n\tresp = \"\"\r\n\ttry:\r\n\t\twhile True:\r\n\t\t\t\r\n\t\t\t# Handle commands from user\r\n\t\t\tif user_watcher.has_data():\r\n\t\t\t\tcmd = user_watcher.pop().strip()\r\n\t\t\t\t\r\n\t\t\t\t# Safe words\r\n\t\t\t\tif cmd in ['quit', 'exit', 'stop']:\r\n\t\t\t\t\tbreak\r\n\t\t\t\t\r\n\t\t\t\tcmd = cmd + '\\n'\r\n\t\t\t\tport.write(cmd.encode('ascii'))\r\n\t\t\t\t\r\n\t\t\t\t\r\n# \t\t\t\tsys.stdout.write(\"\\r\"+\" \"*40+\"\\r\")\r\n# \t\t\t\tsys.stdout.write('> ' + cmd.strip() + \"\\r\\n\")\r\n\t\t\t\tsys.stdout.write(out_text + \" > \" + normal_text)\r\n\t\t\t\tsys.stdout.flush()\r\n\t\t\t\r\n\t\t\t# Handle response from hardware\r\n\t\t\tif hardware_watcher.has_data():\r\n\t\t\t\tresp = hardware_watcher.pop()\r\n\t\t\t\t\r\n\t\t\t\tresp = resp.strip()\r\n\t\t\t\tsys.stdout.write(\"\\r\"+\" \"*40+\"\\r\")\r\n\t\t\t\tsys.stdout.write(in_text + \" < \" + normal_text + resp + \"\\r\\n\")\r\n\t\t\t\tsys.stdout.write(out_text + \" > \" + normal_text)\r\n\t\t\t\tsys.stdout.flush()\r\n\t\r\n\texcept KeyboardInterrupt:\r\n\t\tprint(\"\\n\")\r\n\t\r\n\t# Kill our threaded friends\r\n\ttry:\r\n\t\tuser_watcher._stop()\r\n\texcept:\r\n\t\tpass\r\n\ttry:\r\n\t\thardware_watcher._stop()\r\n\texcept:\r\n\t\tpass\r\n\t\r\n\tprint (\"- \"*14+'\\n')\r\n\t\r\n\tprint (\"Interactive shell closed.\")", "def on(key):\n\n global keys, esc_count, REPEAT_NUMBER, csv_name\n\n # caps, shift, etc. aren't automatically registered as strings\n if type(key) == Key:\n keys[esc_count].append((str(key), time.perf_counter(), \"pressed\"))\n else:\n keys[esc_count].append((key, time.perf_counter(), \"pressed\"))\n\n if key == Key.esc:\n esc_count = esc_count + 1\n print(esc_count)\n if esc_count >= REPEAT_NUMBER:\n print(\"\\n\\n\", keys, \"\\n\\n\")\n write_to_csv(keys, csv_name)\n print(\"wrote to mummy_data.csv\")\n return False", "def _readchar() -> str: # pragma: no cover\n if not sys.stdin.isatty():\n return sys.stdin.read(1)\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n new_settings = termios.tcgetattr(fd)\n new_settings[3] = new_settings[3] & ~termios.ECHO & ~termios.ICANON\n termios.tcsetattr(fd, termios.TCSANOW, new_settings)\n try:\n char = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return char", "def _on_key_up(self, event, skip=True):\n if event.GetKeyCode() in (59, ord('.')):\n # Intercepting '.'\n event.Skip()\n wx.CallAfter(self._popup_completion, create=True)\n else:\n ConsoleWidget._on_key_up(self, event, skip=skip)\n # Make sure the continuation_prompts are always followed by a\n # whitespace\n new_lines = []\n if self._input_state == 'readline':\n position = self.GetCurrentPos()\n continuation_prompt = self.continuation_prompt()[:-1]\n for line in self.input_buffer.split('\\n'):\n if not line == continuation_prompt:\n new_lines.append(line)\n self.input_buffer = '\\n'.join(new_lines)\n self.GotoPos(position)", "def _D(stdscr):\n curses.nocbreak()\n stdscr.keypad(0)\n curses.echo()\n curses.endwin()\n import pdb; pdb.set_trace()", "def main():\n key = DICEKey()\n key.start()", "def on_feed_key(self, key_press):\n if key_press.key in {Keys.Escape, Keys.ControlC}:\n echo(carriage_return=True)\n raise Abort()\n if key_press.key == Keys.Backspace:\n if self.current_command_pos > 0:\n self.current_command_pos -= 1\n return key_press\n ret = None\n if key_press.key != Keys.CPRResponse:\n if self.current_command_pos < len(self.current_command):\n current_key = self.current_command_key\n ret = KeyPress(current_key)\n increment = min(\n [self.speed, len(self.current_command) - self.current_command_pos]\n )\n self.current_command_pos += increment\n else:\n # Command is finished, wait for Enter\n if key_press.key != Keys.Enter:\n return None\n self.current_command_index += 1\n self.current_command_pos = 0\n ret = key_press\n return ret", "def mainloops(stdscr):\n def signal_handler(sig, frame):\n \"\"\"\n What to do in case of KeyboardInterrupt\n\n Writes times to session file\n (saving file interaction to the end saves time during frames.)\n \"\"\"\n raise ExitException()\n\n signal.signal(signal.SIGINT, signal_handler)\n\n try:\n curses.curs_set(0) # hide cursor (I have my own)\n except Exception:\n pass\n stdscr.nodelay(True) # makes stdscr.getch() non-blocking\n\n canvas = Canvas(curses.LINES - 1, curses.COLS - 1)\n cursor = Cursor(canvas)\n\n display_text(stdscr, TITLE_ART)\n\n # sessions are groups of solves, stored in files in ~/.cl-timer\n # if this is a new session, create a new file, if not, use an existing one.\n\n session_name_input = InputLine(canvas, 'session name: ')\n session = MutableString(ask_for_input(stdscr, canvas, session_name_input, cursor))\n \n times = []\n ao5s = []\n ao12s = []\n scrambles = []\n session_file = \"\"\n\n session_file = MutableString(f'{HOME}/.cl-timer/{session.string}')\n if not isfile(session_file.string):\n with open(session_file.string, 'w+') as f:\n pass\n \n with open(session_file.string, 'r') as f:\n time_lines = [line.split('\\t') for line in f.read().split('\\n')]\n\n if [''] in time_lines:\n time_lines.remove([''])\n\n for line in time_lines:\n times.append(line[0])\n ao5s.append(line[1])\n ao12s.append(line[2])\n scrambles.append(line[3])\n\n settings_file = MutableString(f'{session_file.string}-settings.json')\n if not isfile(settings_file.string):\n with open(settings_file.string, 'w+') as f:\n json.dump(settings, f)\n \n with open(settings_file.string, 'r') as f:\n for key, value in json.load(f).items():\n settings[key] = value\n\n display_text(stdscr, DISCLAIMER)\n\n def add_time(t):\n \"\"\"\n Add new solve with time of `t`\n \"\"\"\n\n times.append(t)\n\n # update number display to show real time\n number_display.time = t\n number_display.update()\n\n # generate new scramble and update scramble_image\n new_scramble = generate_scramble(int(settings['puzzle']),\n int(settings['scramble-length']))\n scrambles.append(new_scramble)\n scramble_image.clear()\n scramble_image.chars = char(new_scramble)\n\n ao5, ao12 = update_stats()\n\n with open(session_file.string, 'a') as f:\n if len(times) == 1:\n f.write(f'{add_zero(t)}\\t{ao5}\\t{ao12}\\t{new_scramble}')\n else:\n f.write(f'\\n{add_zero(t)}\\t{ao5}\\t{ao12}\\t{new_scramble}')\n \n def calculate_average(solve, length):\n \"\"\"\n Returns average of `length` during `solve`\n\n Looks through times list and finds last `length` solves before `solve`\n Excludes best and worst times, and returns average of the rest.\n \"\"\"\n if len(times[:solve]) < length:\n # `length` solves haven't been done yet.\n return ''\n else:\n latest_average = times[solve - length:] # list of last `length` solves\n latest_average, _ = convert_to_float(latest_average, \"average\")\n if len(latest_average) < (length - 1):\n return 'DNF'\n if len(latest_average) == length:\n latest_average.remove(max(latest_average))\n latest_average.remove(min(latest_average))\n\n # calculate average and add zero if it doesn't go to 100ths place.\n \n return add_zero(round(sum(latest_average) / len(latest_average), 2))\n\n def get_session_mean():\n \"\"\"\n Returns mean of all solves in session\n \"\"\"\n try:\n float_times, len_times = convert_to_float(times, 'average')\n return add_zero(round(sum(float_times) / len_times, 2))\n except ZeroDivisionError:\n return \"\"\n\n def get_best_average(length):\n \"\"\"\n Returns best average of `length` in session\n \"\"\"\n try:\n if length == 5:\n best = add_zero(min([i for i in ao5s if i != '']))\n elif length == 12:\n best = add_zero(min([i for i in ao12s if i != '']))\n except ValueError:\n return \"\"\n return best\n\n def get_best_time():\n try:\n converted_times, _ = convert_to_float(times, 'single')\n float_times = [float(t[:-1]) if isinstance(t, str) else t for t in converted_times]\n best = converted_times[float_times.index(min(float_times))]\n if isinstance(best, float):\n return add_zero(best)\n except ValueError as e:\n return \"\"\n return best\n\n def get_worst_time():\n try:\n converted_times, _ = convert_to_float(times, 'single')\n float_times = [float(t[:-1]) if isinstance(t, str) else t for t in converted_times]\n worst = converted_times[float_times.index(max(float_times))]\n if isinstance(worst, float):\n return add_zero(worst)\n except ValueError as e:\n return \"\"\n return worst\n\n def update_stats():\n ao5 = calculate_average(len(times), 5)\n ao5s.append(ao5)\n ao5_image.chars = char(f'AO5: {ao5}')\n ao12 = calculate_average(len(times), 12)\n ao12s.append(ao12)\n ao12_image.chars = char(f'AO12: {ao12}')\n best_ao5 = get_best_average(5)\n best_ao5_image.chars = char(f'Best AO5: {best_ao5}')\n best_ao12 = get_best_average(12)\n best_ao12_image.chars = char(f'Best AO12: {best_ao12}')\n best_time = get_best_time()\n best_time_image.chars = char(f'Best time: {best_time}')\n worst_time = get_worst_time()\n worst_time_image.chars = char(f'Worst time: {worst_time}')\n\n len_successes = 0\n for t in times:\n if not ((isinstance(t, str)) and (t[:3] == 'DNF')):\n len_successes += 1\n\n number_of_times_image.chars = char(f'Number of Times: {len_successes}/{len(times)}')\n session_mean = get_session_mean()\n session_mean_image.chars = char(f'Session Mean: {session_mean}')\n\n return ao5, ao12\n \n session_name_image = Image(canvas, 0, 0, char(session.string))\n scramble_image = Scramble(canvas, 0, 2, char(\n generate_scramble(int(settings['puzzle']),\n int(settings['scramble-length']))))\n scramble_image.render()\n\n number_display = NumberDisplay(canvas, 15, 7)\n timer_background = Image(canvas, 0, 5, char(TIMER_BACKGROUND))\n\n ao5_image = CoverUpImage(canvas, 51, 6, char(f'AO5: {calculate_average(len(times), 5)}'))\n ao12_image = CoverUpImage(canvas, 51, 7, char(f'AO12: {calculate_average(len(times), 12)}'))\n best_ao5_image = CoverUpImage(canvas, 51, 8, char(f'Best AO5: {get_best_average(5)}'))\n best_ao12_image = CoverUpImage(canvas, 51, 9, char(f'Best AO12: {get_best_average(12)}'))\n best_time_image = CoverUpImage(canvas, 51, 10, char(f'Best time: {get_best_time()}'))\n worst_time_image = CoverUpImage(canvas, 51, 11, char(f'Worst time: {get_worst_time()}'))\n\n len_successes = 0\n for t in times:\n if not ((isinstance(t, str)) and (t[:3] == 'DNF')):\n len_successes += 1\n number_of_times_image = CoverUpImage(canvas, 51, 12, char(f'Number of Times: {len_successes}/{len(times)}'))\n \n session_mean_image = CoverUpImage(canvas, 51, 13, char(f'Session Mean: {get_session_mean()}'))\n\n if isfile(f'{HOME}/.cl-timer_rc'):\n with open(f'{HOME}/.cl-timer_rc', 'r') as f:\n rc_commands = f.read().strip().split('\\n')\n if '' in rc_commands:\n rc_commands.remove('')\n for command in rc_commands:\n try:\n command_line(canvas, stdscr, settings, scramble_image, settings_file, session_file, times, ao5s, ao12s,\n scrambles, session, session_name_image, update_stats, add_time, calculate_average, aliases,\n True, command)\n except CommandSyntaxError:\n pass\n else:\n with open(f'{HOME}/.cl-timer_rc', 'w+') as f:\n pass\n \n ao5_image.render()\n ao12_image.render()\n best_ao5_image.render()\n best_ao12_image.render()\n best_time_image.render()\n worst_time_image.render()\n number_of_times_image.render()\n session_mean_image.render()\n\n timer_running = False\n delay = 0 # how far behind the program is\n spacebar_pressed = False\n last_25_keys = [-1 for _ in range(25)]\n\n solve_start_time = 0\n frame = 0\n while True:\n\n # to make sure each frame is exactly 0.01 secs\n start_time = time.time()\n\n key = stdscr.getch()\n\n if key == 58: # :\n try:\n command_line(canvas, stdscr, settings, scramble_image,\n settings_file, session_file, times, ao5s,\n ao12s, scrambles, session, session_name_image,\n update_stats, add_time, calculate_average, aliases)\n except CommandSyntaxError:\n pass\n continue\n\n if not timer_running:\n if key == 32:\n solve_start_time = time.time()\n last_25_keys.append(key)\n last_25_keys.pop(0)\n\n if not timer_running:\n\n if spacebar_pressed:\n if 32 in last_25_keys:\n time.sleep(0.01)\n continue\n else:\n spacebar_pressed = False\n\n timer_running = True\n number_display.reset()\n\n else:\n if key == 32: # spacebar\n spacebar_pressed = True\n\n else:\n if key == 32:\n frame = 0\n timer_running = False\n\n t = round(time.time() - solve_start_time, 2)\n\n add_time(t)\n \n\n session_name_image.render()\n \n timer_background.render()\n number_display.render()\n\n stdscr.clear()\n stdscr.addstr(canvas.display)\n stdscr.refresh()\n\n if timer_running:\n number_display.time = time.time() - solve_start_time\n number_display.update()\n\n # take away from sleep time the amount that will get us back on track\n duration = time.time() - start_time\n if (duration + delay) > 0.01:\n # can't make it back to on-time right now\n # by not sleeping, we have saved (0.01 - duration) seconds\n delay -= 0.01 - duration\n else:\n time.sleep(0.01 - (duration + delay))\n \n frame += 1", "def wait_for_keypress(self):\n register = (self.opcode & 0xF00) >> 8\n key_pressed = False\n while not key_pressed:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.KEYDOWN:\n inv_keys = {v: k for k, v in config.keys.items()}\n try:\n self.registers[register] = inv_keys[chr(event.key)]\n key_pressed = True\n except KeyError:\n pass\n\n logger.info(\"Stored key {} into V{}\".format(\n self.registers[register],\n register))", "def _get_code_command_linux():\n print('Use arrows (or \\'E\\', \\'S\\', \\'W\\',' +\\\n '\\'N\\' + a number) to move or \\'q\\' to give up.')\n return get_char_code.get()", "def clean_exit(self):\n if self.client:\n if self.client.key:\n self.client.send(\"!exit\")\n self.client.cli = None\n self.stdscr.keypad(False)\n curses.echo()\n curses.nocbreak()\n curses.endwin()", "def getInput(self):\n\t\tkeyPressed = self.screen.getch()\n\t\tif keyPressed == 113:\t\t# <escape>\n\t\t\tself.terminate()\n\t\t\tself.running = False\n\t\treturn keyPressed \t\t# return key for (possible) further action in calling program", "def text_entry(self):\n\n allowed_sequences = set(['KEY_ENTER', 'KEY_ESCAPE', 'KEY_DELETE'])\n\n sys.stdout.write('Enter text (<Esc> to abort) : ')\n sys.stdout.flush()\n\n # Track start column to ensure user doesn't backspace too far\n start_column = self.term.get_location()[1]\n cur_column = start_column\n choice = ''\n with self.term.cbreak():\n val = ''\n while val != 'KEY_ENTER' and val != 'KEY_ESCAPE':\n val = self.term.inkey()\n if not val:\n continue\n elif val.is_sequence:\n val = val.name\n if val not in allowed_sequences:\n continue\n\n if val == 'KEY_ENTER':\n break\n elif val == 'KEY_ESCAPE':\n pass\n elif val == 'KEY_DELETE':\n if cur_column > start_column:\n sys.stdout.write(u'\\b \\b')\n cur_column -= 1\n choice = choice[:-1]\n else:\n choice = choice + val\n sys.stdout.write(val)\n cur_column += 1\n sys.stdout.flush()\n\n # Clear to beginning of line\n self.set_input(choice)\n self.set_sound_stage(choice)\n sys.stdout.write(self.term.clear_bol)\n sys.stdout.write(self.term.move(self.term.height, 0))\n sys.stdout.flush()", "def main():\n print(\"Welcome to the simple encryption tool!\")\n\n while True:\n print(\"\\nPlease choose an option [e/d/a/q]:\\n\"\n \" e) Encrypt some text\\n\"\n \" d) Decrypt some text\\n\"\n \" a) Automatically decrypt English text\\n\"\n \" q) Quit\")\n \n option = str(input(\"> \"))\n\n if option == \"e\":\n text = input(\"Please enter some text to encrypt: \")\n offset = int(input(\"Please enter a shift offset (1-25): \"))\n print_encrypt_or_decrypt(option, offset, text)\n \n elif option == \"d\":\n text = input(\"Please enter some text to decrypt: \")\n offset = int(input(\"Please enter a shift offset (1-25): \"))\n print_encrypt_or_decrypt(option, offset, text)\n \n elif option == \"a\":\n encrypted_text = input(\"Please enter some encrypted text: \")\n possible_offset = find_encryption_offsets(encrypted_text)\n decrypt_message = print_possible_offset(possible_offset)\n if decrypt_message == True:\n print(\"Decrypted message:\", decrypt(encrypted_text, possible_offset[0]))\n \n elif option == \"q\":\n print(\"Bye!\")\n break\n\n else:\n print(\"Invalid command\")", "def Loop(self):\n self.coshell.SetModesCallback(self.SetModes)\n while True:\n try:\n text = self.Prompt()\n if text is None:\n break\n self.Run(text) # paradoxically ignored - coshell maintains $?\n except EOFError:\n # ctrl-d\n if not self.coshell.ignore_eof:\n break\n except KeyboardInterrupt:\n # ignore ctrl-c\n pass\n except interactive_coshell.CoshellExitError:\n break", "def prepare_command(self, arg=None):\n\n self.textwin.print_blank(0)\n self.textwin.win.addch(0, 0, ':')\n self.textwin.win.chgat(0, 1, 1, curses.A_STANDOUT)\n\n self.inp = True\n self.command_event.set()", "def getch():\n file_desc = sys.stdin.fileno()\n old_settings = termios.tcgetattr(file_desc)\n try:\n tty.setraw(sys.stdin.fileno())\n char = sys.stdin.read(1)\n finally:\n termios.tcsetattr(file_desc, termios.TCSADRAIN, old_settings)\n return char", "def process_next_char(self): \n self.current_position += 1\n if self.current_position >= len(self.code_input):\n '''End of file since the position is equal to or greater than the input's position'''\n self.current_char = '\\0' #EOF\n print('end of line')\n self.current_char = self.code_input[self.current_position]", "def _getch_linux(prompt):\n print(prompt, end=\"\")\n sys.stdout.flush()\n fd = sys.stdin.fileno()\n old = termios.tcgetattr(fd)\n new = termios.tcgetattr(fd)\n new[3] = new[3] & ~termios.ICANON & ~termios.ECHO\n new[6][termios.VMIN] = 1\n new[6][termios.VTIME] = 0\n termios.tcsetattr(fd, termios.TCSANOW, new)\n char = None\n try:\n char = os.read(fd, 1)\n finally:\n termios.tcsetattr(fd, termios.TCSAFLUSH, old)\n print(char)\n return char", "def keypress(self, event):\n events = {\n '1': lambda: self.slot.set(1),\n '2': lambda: self.slot.set(2),\n '6': lambda: self.digits.set(6),\n '8': lambda: self.digits.set(8),\n }\n try:\n events[event.keysym]()\n except KeyError:\n pass\n if event.keysym in ('1', '2', 'Return', 'Enter'):\n self.get_totp()\n self.root.wm_withdraw()", "def execute_pause(self):\n print(self.empty_lines + self.double_tab + \"to continue press any key..\\r\")\n self.getch()", "def shell(s_socket):\r\n shellname = \"powershell\"\r\n bytes_value = to_bytes(len(shellname), 4, 'little')\r\n s_socket.send('o' + bytes_value + shellname)\r\n value = raw_input(shellname + \"#> \")\r\n while True:\r\n bytes_value = to_bytes(len(value), 4, 'little')\r\n s_socket.send('s' + bytes_value + value)\r\n print(s_socket.recv(20000))\r\n\r\n if 'exit' in value:\r\n break\r\n\r\n value = raw_input(shellname + \"#> \")", "def keyPressEvent(self, event):\n self.Serial.send_keystroke(event.text())", "def interactive_run(self):\r\n while True:\r\n try:\r\n #self.display_mang.print_instructions()\r\n input_string = input()\r\n if input_string == \"exit\":\r\n break\r\n self.input_mang.process_input(input_string)\r\n except Exception as e:\r\n print(e)", "def looping(self):\n\n pretty_print(\"To Exit enter: 101\", \":\")\n pretty_print(\"To continue press any number key:\", \":\")\n decision = get_int_input()\n\n if decision == 101:\n self.again = False", "async def run_command(device, command):\n print(\"Waiting for button presses ...\")\n async for event in device.async_read_loop():\n if EV_KEY == event.type:\n key_event = evdev.KeyEvent(event)\n if evdev.KeyEvent.key_down == key_event.keystate:\n os.system(command)", "def receive_command(self):\n if self.serial.is_open:\n return self.serial.read(999).decode('utf-8')\n self.serial.flush()", "def int_21H_1(self):\r\n\r\n ascii_char = self.GUI.get_key_value() # ten do w wczytania\r\n self.registers['AX'].move_into(ascii_char, 0, is_int=True) # zapisanie kodu ascii do AXL\r", "def mode_crypt_cramershoup(stdscr, message=None):\n loop = True\n cursor = 0\n while loop:\n show_key_choices(stdscr, cursor, message)\n key = stdscr.getkey()\n loop = False\n cs = CramerShoup()\n if key == '1' or (key == '\\n' and cursor == 1):\n key_size = choose_keys_size(stdscr)# choose the size of key [256,512,1024]\n stdscr.clear()\n stdscr.addstr(\"Création des clés de chiffrement ...\\n\\n\")\n stdscr.refresh()\n cs.generate_keys(key_size)\n stdscr.addstr(\"Vos clés ont été générés dans keys/\\n\")\n stdscr.refresh()\n napms(2000)\n mode_crypt_cramershoup(stdscr, \"Les clés ont été générés\\n\")\n \n elif key == '2' or (key == '\\n' and cursor == 2):\n # chiffre avec la clé privé (la clé privé contient la clé publique)\n key_file_name = input_user(stdscr, \"Veuiller entrer l'enplacement de la clé public. Ctrl + G pour confirmer\")\n try:\n cs.read_key(key_file_name)\n except IOError:\n # cannot open the file\n mode_crypt_cramershoup(stdscr, \"Impossible de lire la clé dans le fichier {}\".format(key_file_name))\n return\n file_name = input_user(stdscr, \"Clé chargé avec succès.\\n Veuillez entrer le nom du fichier à chiffrer\")\n try:\n file = open(file_name)\n file.close()\n except IOError:\n mode_crypt_cramershoup(stdscr, \"Impossible d'ouvrir le fichier {}\".format(file_name))\n return\n # si le fichier est un pgm, on laisse le choix à l'utilisateur\n pgm = False\n if re.match('.+\\.pgm.*', file_name) is not None:\n pgm = choix_mode_PGM(stdscr)\n \n # on chiffre le fichier\n stdscr.clear()\n stdscr.addstr(\"En cours de chiffrement ...\\n\")\n stdscr.refresh()\n wrap = None\n if pgm:\n wrap = PGMEncrypter(file_name, cs, cs.bit_size//(2*8), file_name + \".crypted\", 4*cs.bit_size//8)\n else:\n wrap = BlockFileEncrypter(file_name, cs, cs.bit_size//(2*8), file_name + \".crypted\", 4*cs.bit_size//8)\n wrap.crypt_to_out()\n stdscr.addstr(\"Votre fichier {} a été chiffré :) !\".format(file_name), curses.color_pair(3))\n stdscr.refresh()\n napms(1000)\n menu(stdscr)\n elif key == 'm' or (key == '\\n' and cursor == 3):\n menu(stdscr)\n elif key == 'KEY_UP' and cursor > 1:\n cursor -= 1\n loop = True\n elif key == 'KEY_DOWN' and cursor < 3:\n cursor += 1\n loop = True\n else:\n loop = True", "def start(self):\n self.print_separator()\n self.stdscr.box()\n\n box = curses.newwin(4, self.maxx-8, self.pad, self.pad)\n box.addstr(1,1,\"hello\")\n while self.running:\n # Enter the main program loop\n key = self.stdscr.getkey()\n for fn in [self.stdscr.clear,\n lambda: self.handle_key(key),\n self.update_xy,\n self.print_pattern,\n self.print_separator,\n self.stdscr.box,\n self.generate_menu_items,\n self.print_menu_items,\n self.print_current_selection,\n self.stdscr.refresh]:\n fn()", "def key_handler(self):\n \n self.pressed = waitKey(1) & 255 #wait for keypress for 10 ms\n if self.pressed == 27: #exit program on 'esc'\n print \"exiting...\"\n self.camera.cam.release()\n exit()\n \n for key in self.key_controls.keys():\n if chr(self.pressed) == key:\n self.key_controls[key]()", "def main(self):\n while 1:\n events = get_gamepad()\n for event in events:\n\n if(event.ev_type == \"Absolute\" ):\n\n if event.code in self.map[GAMEPAD].keys():\n self.absolute_switch[ self.map[GAMEPAD][event.code] ](event.state)\n\n\n if(event.ev_type == \"Key\" ):\n\n if event.code in self.map[GAMEPAD].keys():\n self.btn_switch[ self.map[GAMEPAD][event.code] ](self.map[GAMEPAD][event.code], event.state)\n \n\n\n\n #print(event.ev_type, event.code, event.state)", "def test_keyboard_characters(self):\n pass", "def _get_code_command_windows():\n while 1:\n print('Use \\'E\\', \\'S\\', \\'W\\', \\'N\\'' +\\\n '[+ 1-9] to move. Or \\'q\\' to give up.')\n hitkeys = input()\n if len(hitkeys) > 0:\n char_ = hitkeys[0].upper()\n if char_ in 'ESNW':\n if len(hitkeys) == 2:\n num_ = hitkeys[1]\n if num_ in '123456789':\n return char_ + num_\n else:\n return char_ + '1'\n elif char_ == 'Q':\n return 'end'", "def transfer_menu():\n print(\"What type of transfer do you want to use?\")\n for key in sorted(TRANSFER_MENU_SELECTIONS):\n print(\"[%s] %s\" % (key, TRANSFER_MENU_SELECTIONS[key]))\n choice = raw_input(\"> \")\n while choice not in list(TRANSFER_MENU_SELECTIONS.keys()):\n choice = raw_input(\"> \")\n return choice", "def key_wait():\n while 1:\n for event in get():\n if event.type == 'KEYDOWN':\n return event\n if event.type == 'QUIT':\n # convert QUIT into alt+F4\n return KeyDown('F4', '', True, False, True, False, False)\n _time.sleep(.001)", "def main_menu() -> None:\n option_list = (\"1\", \"quest\", \"2\", \"inventory\", \"3\", \"shop\", \"4\", \"stats\", \"5\", \"load\", \"save\",\n \"6\", *exit_list, \"code\")\n\n print(MenuSprites.main_menu)\n\n while (selection := input(\">\").lower()) not in option_list:\n print(f\"Invalid selection: {selection}\")\n\n with suppress(ValueError):\n selection = int(selection)\n\n if selection in [1, \"quest\"]:\n start_game(_inv=inv)\n return main_menu()\n\n elif selection in [2, \"inventory\"]:\n inv.display.inventory_display()\n return main_menu()\n\n elif selection in [3, \"shop\"]:\n ShopMenu(inv)\n\n elif selection in [4, \"stats\"]:\n inv.display.stats_display(in_loop=False)\n return main_menu()\n\n elif selection in [5, \"save\", \"load\"]:\n\n if selection not in [\"save\", \"load\"]:\n while selection := input(\"Load or save a character file?:\\n\").lower() not in (\"save\", \"load\"):\n print(\"Invalid selection\")\n\n if selection == \"save\":\n inv.save()\n return main_menu()\n\n elif selection == \"load\":\n inv.load()\n return main_menu()\n\n elif selection in [6, *exit_list]:\n quit()\n\n elif selection == \"code\":\n with open(\"DevCode.txt\", 'r') as f:\n code = str(f.read())\n\n inp = input(\"Enter code\")\n\n if inp == code:\n inv.state.dev_mode = True\n\n return main_menu()", "def main_menu():\n os.system('cls||clear')\n choosing = True\n print(game_menu)\n while choosing:\n option = input('Pick an option: ')\n if option == '1':\n choosing = False\n os.system('cls||clear')\n game_loop()\n elif option == '2':\n choosing = False\n add_word()\n elif option == '3':\n choosing = False\n else:\n print('Invalid option! Try again.')", "def key_C(buf, input_line, cur, count):\n weechat.command(\"\", \"/input delete_end_of_line\")\n set_mode(\"INSERT\")", "def test_wait_for_key_pressed(self, cpu):\n keys_to_test = [None] + range(0x0, 0xF+1)\n for key in keys_to_test:\n cpu.program_counter = 0\n cpu.opcode = 0xF00A\n cpu.V_register[0] = 0\n cpu.keyboard.key_down = key\n cpu.wait_for_key_pressed()\n cpu.program_counter += 2\n if key is None:\n assert(cpu.V_register[0] == 0)\n assert(cpu.program_counter == 0)\n else:\n assert(cpu.program_counter == 2)\n assert(cpu.V_register[0] == key)", "def presetRead():\n\n global preset_sw \n \n while True:\n for i in range(6):\n preset_sw[i][1] = GPIO.input(preset_sw[i][0])\n sleep (1)", "def main():\n switch_dict = {\"1\": thanks, \"2\": run_report, \"3\": create_thank_you_all, \"4\": exit_program}\n while True:\n response = input(menu)\n if response in switch_dict:\n switch_dict[response]()\n else:\n print(\"Not a valid option!\")", "def main():\n dt = DropToken()\n play = True\n while play:\n try:\n line = sys.stdin.readline()\n except KeyboardInterrupt:\n break\n if not line:\n break\n play = dt.inputProcess(line)", "def send_keys(*input_seq):\n for chunk in input_seq:\n if isinstance(chunk, int):\n send_key_event(chunk)\n yield timeout(10)\n elif isinstance(chunk, str):\n for c in chunk:\n if c == \"\\n\":\n send_key_event(GDK_RETURN)\n else:\n send_key_event(ord(c))\n yield timeout(10)", "def do_terminal(self):\n if (self.is_connected):\n self.mySerialConnection.do_serial()", "def cb_key_combo_default(data, signal, signal_data):\n global esc_pressed, vi_buffer, cmd_text\n\n # If Esc was pressed, strip the Esc part from the pressed keys.\n # Example: user presses Esc followed by i. This is detected as \"\\x01[i\",\n # but we only want to handle \"i\".\n keys = signal_data\n if esc_pressed or esc_pressed == -2:\n if keys.startswith(\"\\x01[\" * esc_pressed):\n # Multiples of 3 seem to \"cancel\" themselves,\n # e.g. Esc-Esc-Esc-Alt-j-11 is detected as \"\\x01[\\x01[\\x01\"\n # followed by \"\\x01[j11\" (two different signals).\n if signal_data == \"\\x01[\" * 3:\n esc_pressed = -1 # `cb_check_esc()` will increment it to 0.\n else:\n esc_pressed = 0\n # This can happen if a valid combination is started but interrupted\n # with Esc, such as Ctrl-W→Esc→w which would send two signals:\n # \"\\x01W\\x01[\" then \"\\x01W\\x01[w\".\n # In that case, we still need to handle the next signal (\"\\x01W\\x01[w\")\n # so we use the special value \"-2\".\n else:\n esc_pressed = -2\n keys = keys.split(\"\\x01[\")[-1] # Remove the \"Esc\" part(s).\n # Ctrl-Space.\n elif keys == \"\\x01@\":\n set_mode(\"NORMAL\")\n return weechat.WEECHAT_RC_OK_EAT\n\n # Nothing to do here.\n if mode == \"INSERT\":\n return weechat.WEECHAT_RC_OK\n\n # We're in Replace mode — allow \"normal\" key presses (e.g. \"a\") and\n # overwrite the next character with them, but let the other key presses\n # pass normally (e.g. backspace, arrow keys, etc).\n if mode == \"REPLACE\":\n if len(keys) == 1:\n weechat.command(\"\", \"/input delete_next_char\")\n elif keys == \"\\x01?\":\n weechat.command(\"\", \"/input move_previous_char\")\n return weechat.WEECHAT_RC_OK_EAT\n return weechat.WEECHAT_RC_OK\n\n # We're catching keys! Only \"normal\" key presses interest us (e.g. \"a\"),\n # not complex ones (e.g. backspace).\n if len(keys) == 1 and catching_keys_data['amount']:\n catching_keys_data['keys'] += keys\n catching_keys_data['amount'] -= 1\n # Done catching keys, execute the callback.\n if catching_keys_data['amount'] == 0:\n globals()[catching_keys_data['callback']]()\n vi_buffer = \"\"\n weechat.bar_item_update(\"vi_buffer\")\n return weechat.WEECHAT_RC_OK_EAT\n\n # We're in command-line mode.\n if cmd_text:\n # Backspace key.\n if keys == \"\\x01?\":\n # Remove the last character from our command line.\n cmd_text = list(cmd_text)\n del cmd_text[-1]\n cmd_text = \"\".join(cmd_text)\n # Return key.\n elif keys == \"\\x01M\":\n weechat.hook_timer(1, 0, 1, \"cb_exec_cmd\", cmd_text)\n cmd_text = \"\"\n # Input.\n elif len(keys) == 1:\n cmd_text += keys\n # Update (and maybe hide) the bar item.\n weechat.bar_item_update(\"cmd_text\")\n if not cmd_text:\n weechat.command(\"\", \"/bar hide vi_cmd\")\n return weechat.WEECHAT_RC_OK_EAT\n # Enter command mode.\n elif keys == \":\":\n cmd_text += \":\"\n weechat.command(\"\", \"/bar show vi_cmd\")\n weechat.bar_item_update(\"cmd_text\")\n return weechat.WEECHAT_RC_OK_EAT\n\n # Add key to the buffer.\n vi_buffer += keys\n weechat.bar_item_update(\"vi_buffer\")\n if not vi_buffer:\n return weechat.WEECHAT_RC_OK\n\n # Check if the keys have a (partial or full) match. If so, also get the\n # keys without the count. (These are the actual keys we should handle.)\n # After that, `vi_buffer` is only used for display purposes — only\n # `vi_keys` is checked for all the handling.\n # If no matches are found, the keys buffer is cleared.\n matched, vi_keys, count = get_keys_and_count(vi_buffer)\n if not matched:\n vi_buffer = \"\"\n return weechat.WEECHAT_RC_OK_EAT\n\n buf = weechat.current_buffer()\n input_line = weechat.buffer_get_string(buf, \"input\")\n cur = weechat.buffer_get_integer(buf, \"input_pos\")\n\n # It's a key. If the corresponding value is a string, we assume it's a\n # WeeChat command. Otherwise, it's a method we'll call.\n if vi_keys in VI_KEYS:\n if isinstance(VI_KEYS[vi_keys], str):\n for _ in range(max(count, 1)):\n # This is to avoid crashing WeeChat on script reloads/unloads,\n # because no hooks must still be running when a script is\n # reloaded or unloaded.\n if VI_KEYS[vi_keys] == \"/input return\":\n return weechat.WEECHAT_RC_OK\n weechat.command(\"\", VI_KEYS[vi_keys])\n current_cur = weechat.buffer_get_integer(buf, \"input_pos\")\n set_cur(buf, input_line, current_cur)\n else:\n VI_KEYS[vi_keys](buf, input_line, cur, count)\n # It's a motion (e.g. \"w\") — call `motion_X()` where X is the motion, then\n # set the cursor's position to what that function returned.\n elif vi_keys in VI_MOTIONS:\n if vi_keys in SPECIAL_CHARS:\n func = \"motion_%s\" % SPECIAL_CHARS[vi_keys]\n else:\n func = \"motion_%s\" % vi_keys\n end, _, _ = globals()[func](input_line, cur, count)\n set_cur(buf, input_line, end)\n # It's an operator + motion (e.g. \"dw\") — call `motion_X()` (where X is\n # the motion), then we call `operator_Y()` (where Y is the operator)\n # with the position `motion_X()` returned. `operator_Y()` should then\n # handle changing the input line.\n elif (len(vi_keys) > 1 and\n vi_keys[0] in VI_OPERATORS and\n vi_keys[1:] in VI_MOTIONS):\n if vi_keys[1:] in SPECIAL_CHARS:\n func = \"motion_%s\" % SPECIAL_CHARS[vi_keys[1:]]\n else:\n func = \"motion_%s\" % vi_keys[1:]\n pos, overwrite, catching = globals()[func](input_line, cur, count)\n # If it's a catching motion, we don't want to call the operator just\n # yet -- this code will run again when the motion is complete, at which\n # point we will.\n if not catching:\n oper = \"operator_%s\" % vi_keys[0]\n globals()[oper](buf, input_line, cur, pos, overwrite)\n # The combo isn't completed yet (e.g. just \"d\").\n else:\n return weechat.WEECHAT_RC_OK_EAT\n\n # We've already handled the key combo, so clear the keys buffer.\n if not catching_keys_data['amount']:\n vi_buffer = \"\"\n weechat.bar_item_update(\"vi_buffer\")\n return weechat.WEECHAT_RC_OK_EAT", "def _cmdloop(self):\n # An almost perfect copy from Cmd; however, the pseudo_raw_input portion\n # has been split out so that it can be called separately\n if self.use_rawinput and self.completekey:\n try:\n import readline\n self.old_completer = readline.get_completer()\n readline.set_completer(self.complete)\n readline.parse_and_bind(self.completekey + \": complete\")\n except ImportError:\n pass\n stop = None\n try:\n while not stop:\n if self.cmdqueue:\n line = self.cmdqueue.pop(0)\n else:\n line = self.pseudo_raw_input(self.prompt)\n if self.echo and isinstance(self.stdin, file):\n self.stdout.write(line + '\\n')\n stop = self.onecmd_plus_hooks(line)\n finally:\n if self.use_rawinput and self.completekey:\n try:\n import readline\n readline.set_completer(self.old_completer)\n except ImportError:\n pass\n return stop", "def pause_handler(term):\n inp = None\n while inp not in (\"p\", \"P\", \"q\", \"Q\"):\n print(term.home + term.clear + term.move_y(term.height // 2))\n print(term.black_on_white(term.center(\"press P to continue.\")))\n\n inp = term.inkey(timeout=10)", "def _getch_windows(prompt):\n print(prompt, end=\"\")\n key = msvcrt.getch()\n if ord(key) == 224:\n key = msvcrt.getch()\n return key\n print(key.decode())\n return key.decode()", "def _read_keypad(self):\n pressed = set(self.keypad.pressed_keys)\n # default : not pressed => EDGE_HIGH\n self._key_edges = [self.EDGE_HIGH] * _NEO_TRELLIS_NUM_KEYS\n for k in pressed:\n self._key_edges[k] = self.EDGE_LOW\n for k in pressed - self._current_press:\n self._key_edges[k] = self.EDGE_RISING\n for k in self._current_press - pressed:\n self._key_edges[k] = self.EDGE_FALLING\n\n self._current_press = pressed\n self._current_events = bytearray()\n\n for k in range(_NEO_TRELLIS_NUM_KEYS):\n if (self._events[k] >> self._key_edges[k]) & 0x1:\n raw_evt = (_to_seesaw_key(k) << 2) | self._key_edges[k]\n self._current_events.append(raw_evt)", "def run_prank():\n\n swap_command = 'xmodmap -e \"keycode {code} = {key}\"'\n get_command = 'xmodmap -pk | grep \"({key})\"'\n\n alphanum = string.lowercase + string.digits\n key1 = random.choice(alphanum)\n key2 = random.choice(alphanum)\n\n get_code1 = subprocess.Popen(get_command.format(key=key1), shell=True, stdout=subprocess.PIPE)\n get_code2 = subprocess.Popen(get_command.format(key=key2), shell=True, stdout=subprocess.PIPE)\n\n code1 = get_code1.stdout.read().split()[0]\n code2 = get_code2.stdout.read().split()[0]\n\n\n subprocess.Popen(swap_command.format(code=code2,key=key1), shell=True)\n subprocess.Popen(swap_command.format(code=code1,key=key2), shell=True)\n\n return True, 'swapping {key1} and {key2}'.format(key1=key1, key2=key2)" ]
[ "0.60768646", "0.5757084", "0.57472295", "0.572881", "0.5679723", "0.5675239", "0.56368023", "0.5612974", "0.560551", "0.5580252", "0.55787677", "0.5541301", "0.55385447", "0.5523714", "0.54723734", "0.5423755", "0.54224914", "0.5421046", "0.54165447", "0.5408846", "0.5373981", "0.5367405", "0.5362362", "0.53611", "0.5317438", "0.5311899", "0.5284995", "0.52671164", "0.526561", "0.5253077", "0.5249608", "0.5241042", "0.52238464", "0.52170515", "0.52081466", "0.52036005", "0.5200089", "0.5153805", "0.51495814", "0.51362324", "0.51251185", "0.51182336", "0.5114804", "0.5111404", "0.51113766", "0.5111322", "0.5111244", "0.51111126", "0.5106318", "0.50908357", "0.5090618", "0.5087321", "0.50868684", "0.50805604", "0.5071598", "0.5071022", "0.506125", "0.50544226", "0.5050632", "0.504586", "0.5045834", "0.5039309", "0.50382", "0.5035429", "0.5024518", "0.5015314", "0.50110734", "0.49999028", "0.49874404", "0.49872974", "0.49871892", "0.49834943", "0.49817544", "0.4967794", "0.49613735", "0.49574316", "0.49562275", "0.49498007", "0.49480858", "0.49478143", "0.4946333", "0.49454713", "0.49452096", "0.49437436", "0.49435955", "0.49425605", "0.49423003", "0.49418974", "0.49337965", "0.49332878", "0.49284074", "0.49259633", "0.49204886", "0.49199417", "0.4916266", "0.49080035", "0.4907584", "0.4891333", "0.4886977", "0.48868743" ]
0.66630965
0
Getting mrp (most recent price) Returns None if no price exists
def _get_mrp(journal): try: return Price.objects.filter(journal__issn=journal.issn).order_by('-date_stamp')[0] except IndexError: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def get_last_price(args):\n\tmarket = get_market(args)\n\trequest = api.get_ticker(market)\n\tif not request['message']:\n\t\tlast = str(request['result']['Last'])\n\t\treturn (last)\n\telse:\n\t\tprint(request['message'])\n\t\tsys.exit(0)", "def query_last_price(market_data):\n print(\"Consultando el último precio\")\n if market_data[\"marketData\"][\"LA\"]:\n last_price = market_data[\"marketData\"][\"LA\"][\"price\"]\n print(\n f\"Último precio operado: ${last_price:,.2f}\".replace('.', ','))\n return last_price\n print(\"Último precio operado: No hay datos disponibles\")\n return None", "def get_greatest_stock_price():\n greatest_stock_price = 0\n // your code here", "def get_last_price_tmp(market):\n\trequest = api.get_ticker(market)\n\tif not request['message']:\n\t\tlast = str(request['result']['Last'])\n\t\treturn (last)\n\telse:\n\t\tprint(request['message'])\n\t\tsys.exit(0)", "def get_cheapest_price(self, movie_title):\n self.get_all_movies()\n movie_list = self.title_map.get(movie_title.strip().lower(), [])\n\n if movie_list is None:\n return None\n\n pick_list = []\n for movie_info in movie_list:\n try:\n movie_id = movie_info['ID']\n movie_world = movie_info['world']\n except KeyError as e:\n print(\"Price is not available for {}\".format(movie_title))\n continue\n pick_list.append({'id': movie_id, 'world': movie_world})\n\n if pick_list is None:\n return None\n\n pool = Pool(2)\n movies_list = pool.map(self.get_movie_from_id, pick_list)\n pool.close()\n pool.join()\n\n # Set price as maximum float value in start to find minimum value\n price = sys.float_info.max\n print(\"\\nMovie info from different worlds:\\n\")\n for movie in movies_list:\n if movie is None:\n continue\n print(\"[{}]\".format(movie['world']))\n for key, value in movie.items():\n print(\" {} = {}\".format(key, value))\n print(\"\\n\")\n try:\n movie_price = float(movie['Price'])\n except KeyError as e:\n print(\"Price is not available for {}\".format(movie_title))\n continue\n if movie_price < price:\n price = movie_price\n\n if price == sys.float_info.max:\n return None\n\n return str(price)", "def get_price(horizon_host, pair):\n print \"fetching latest price for:\" + pair[\"name\"]\n params = make_trade_params(pair)\n res = requests.get(horizon_host + \"/trades\", params).json()\n try:\n trade_record = res[\"_embedded\"][\"records\"][0]\n except IndexError:\n return DatedPrice(date=datetime.utcfromtimestamp(0), price=0)\n price = float(trade_record[\"price\"][\"n\"]) / float(trade_record[\"price\"][\"d\"])\n timestamp = parser.parse(trade_record[\"ledger_close_time\"])\n return DatedPrice(date=timestamp, price=price)", "def best_promo(order: Order) -> Decimal:\n return max(promo(order) for promo in promos) # <3>", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n jsonResponse = self.getJson(\"https://poloniex.com/public?command=returnTicker\")\n currentPrice = jsonResponse[pair][\"last\"]\n return currentPrice", "def get_order_price(self):\r\n if self.price is not None:\r\n return self.price #typical limit price order\r\n else:\r\n #Check the orderbook\r\n logger.info(\"floating price\")\r\n self.get_orderbook()\r\n logger.info(self.orderbook_snapshot)\r\n\r\n pass", "def get_price(data):\n return data[\"summaryDetail\"][\"regularMarketPreviousClose\"][\"raw\"]", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://bittrex.com/api/v1.1/public/getticker?market=\"+pair\n jsonResponse = self.getJson(uri)\n currentPrice = jsonResponse[\"result\"][\"Last\"]\n return currentPrice", "def __find_max_price(self):\n prices_map = map(\n lambda iceberg: utils.get_actual_penguin_amount(\n self.__game, iceberg),\n self.__game.get_all_icebergs()\n )\n return max(prices_map)", "def max_price(self):\n return self._max_price", "def best_price(self):\n # TODO rename this to \"display_lowest_price\" or something...\n price = self.lowest_price\n if price:\n return Decimal.quantize(price.per_kg, settings.DECIMAL_CENTS) # round to cents", "def get_last_prices(self):\n return self.last_results", "def execQ5():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n cheapest = frame.sort_values(by='Price', ascending=True).head(1)\n return cheapest", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitstamp.net/api/v2/ticker/\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"last\"]\n return currentPrice", "def get_current_price(self):\n highest_bid = sorted([bid.amount for bid in self.bids])[-1] if self.bids else 0\n return max(self.starting_price, highest_bid)", "def get_price(res_obj):\n selector = '.price-current'\n price = res_obj.html.find(selector, first=True)\n return price.text", "def buy_one_get_one(products):\n if 'p1' in products and products['p1'] >= 2:\n return -20\n else:\n return 0", "def best_promo(order):\n return max(promo(order) for promo in promos)", "def get_last_price(self, stock_object, time_zone=None):\n time_zone = TraderBase.get_timezone()\n if self.client:\n return self.client.get_last_price(stock_object)\n # get last stock price by database\n price = self.db_tool.session.query(Series)\\\n .join(Stock)\\\n .filter(Stock.id == stock_object.id)\\\n .filter(Series.date <= datetime.datetime.now(time_zone))\\\n .order_by(-Series.date).first()\n\n if not price:\n return None\n return price.priceclose", "def get_latest(self):\n url = f\"{self.get_api_url()}+latest\"\n # set api parameters\n params = {}\n params.update({'base': self.base_currency})\n params.update({'symbols': ','.join(self.target_currency_codes)})\n # call the api for rates\n response = requests.get(url, params=params)\n if response.status_code == 200:\n base, rates = response.json().get('base'), response.json().get('rates')\n # remove base currency from rates if it is returned by the data source\n rates.pop(self.base_currency, None)\n return base, rates\n return None, None", "def get_last_close(self, symbol):\n if symbol in self.symbol:\n close_price = self.symbol[symbol][\"close\"]\n return close_price\n else:\n print(\n \"Close price for ticker %s is not \"\n \"available from the YahooDailyBarPriceHandler.\"\n )\n return None", "def get(self, price, way):\n for offer in self.book[way]:\n if offer.get_price() == price:\n return offer\n return None", "def target_buy_price(self):\n if self.period_tick == 0:\n return random.randint(1, 10)\n elif self.period_tick % self.perseverance == 0:\n # Player runs out of patience and decides to change target price.\n (avg_price,\n max_price,\n min_price) = self.market.get_stock_price_last_period()\n\n power = self.period_tick / self.perseverance\n target_price = min(min_price + power, self.money_balance * 0.5)\n return target_price\n else:\n return None", "def best_bid_price(orders: pandas.DataFrame):\n return best_bid_order(orders).price", "def get_price_1_cumulative_last(self, pair):\n pair_contract = self.conn.eth.contract(\n address=Web3.toChecksumAddress(pair), abi=SushiswapClient.PAIR_ABI)\n return pair_contract.functions.price1CumulativeLast().call()", "def get_last_rates(limit: int):\n conn = sqlite3.connect(CONF.database, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)\n curs = conn.cursor()\n try:\n return curs.execute(\"SELECT price FROM rates ORDER BY date_time DESC LIMIT {}\".format(limit)).fetchall()\n finally:\n curs.close()\n conn.close()", "def get_price(ticker_symbol, page=None):\n if page is None:\n page = scrape_page(BASE_URL + ticker_symbol)\n\n sentiment = page.xpath(PRICE_XPATH)\n\n if not sentiment:\n return None\n else:\n return sentiment[0].replace(\"\\n\", \"\")", "def get_price_0_cumulative_last(self, pair):\n pair_contract = self.conn.eth.contract(\n address=Web3.toChecksumAddress(pair), abi=SushiswapClient.PAIR_ABI)\n return pair_contract.functions.price0CumulativeLast().call()", "def latest_date_price(dated_price_a, dated_price_b):\n return dated_price_a if dated_price_a.date > dated_price_b.date else dated_price_b", "def get_product_price(product):\n return latest_product_version(product).price", "def get_price(self, field_name='PRICES'):\n price_data = self.get_price_data()\n return price_data.get('price') or self.find_price(self.get_default(field_name))", "def best_ask_price(orders: pandas.DataFrame):\n return best_ask_order(orders).price", "def getBestOption(self):\n if len(self.Data) < 1:\n return None\n else:\n bestR = max(self.Data.items(), key=lambda x: x[1]['SPat'].I)\n return bestR[1]", "def get_price():\n \n #Teacher's code. Could not get it working.\n #price = db(db.product.name == productName).select(db.product.price)[0].price\n \n \n return (200)", "def get_last_bid(self, username):\n try:\n good_bid = AuctionBids.objects.filter(good=self, user__username=username).latest('created')\n return good_bid.user_price\n except ObjectDoesNotExist:\n return None", "def get_first_product_by_priority(self):\n products = self.products.filter(type=\"S\").order_by(\"billing_priority\")\n if products.exists():\n return products.first()\n else:\n return None", "def get_price(self):\n\n if self.price: return self.price\n # retrieve from args and return if exists\n price = Settings.get_price() or None\n if price: \n self.price = price\n return price\n if not Settings.prompt(\"price\"): return \"\"\n question = {\n 'type': 'input',\n 'name': 'price',\n 'message': 'Price',\n 'validate': PriceValidator,\n 'filter': lambda val: int(val)\n }\n price = prompt(question)[\"price\"]\n if not Settings.confirm(price): return self.get_price()\n self.price = price\n return self.price", "def get_option_current_price(\n symbol: str,\n source: str = \"Nasdaq\",\n):\n\n source = re.sub(r\"\\s+\", \"\", source.lower())\n output = None\n if source == \"tradier\":\n output = tradier_model.get_last_price(symbol)\n if source == \"nasdaq\":\n output = nasdaq_model.get_last_price(symbol)\n if source == \"yahoofinance\":\n output = yfinance_model.get_last_price(symbol)\n\n if not output:\n logger.info(\"Invalid Source or Symbol\")\n console.print(\"Invalid Source or Symbol\")\n return 0.0\n\n return output", "def get_stock_symbol_with_highest_cap():\n #data2 = _cap_str_to_mln_float('cap')\n symbol_max = dict()\n for items in data:\n if items['symbol'] in symbol_max.keys():\n symbol_max[items['symbol']] = max(symbol_max[items['symbol']], _cap_str_to_mln_float(items['cap']))\n else:\n symbol_max[items['symbol']] = _cap_str_to_mln_float(items['cap'])\n\n value = sorted(symbol_max.items(), key = lambda x:x[1], reverse=True)[0][0]\n #sorted(symbol_max.items(), key = lambda x:x[1])\n return value", "def stock_max(stock):\n max_price=0\n for i in stock['Close']:\n if i > max_price:\n max_price=i\n return max_price", "def get_stock_symbol_with_highest_cap():\n #highest_cap = 0\n highest_cap_stock = max(data, key=lambda counter: _cap_str_to_mln_float(counter['cap']))\n #for counter in data:\n # if _cap_str_to_mln_float(counter['cap']) > highest_cap:\n # highest_cap_stock = counter['symbol']\n # highest_cap = _cap_str_to_mln_float(counter['cap'])\n return highest_cap_stock['symbol']", "def get_price(self):\r\n return self.price", "def get_popular_stocks(self):\n response = requests.get('https://brokerage-static.s3.amazonaws.com/popular_stocks/data.json')\n response_json = response.json()\n assert len(response_json) == 1\n return response_json[0]", "def priceGetMost(soup):\n main = soup.find('span', class_='price-large')\n main = main.text\n main = main.strip()\n main = float(main)\n # Extract Cents\n centsList = soup.findAll('span', class_='a-size-small price-info-superscript')\n cents = centsList[1]\n cents = cents.text\n cents = cents.strip()\n cents = '.' + cents\n cents = float(cents)\n price = main + cents\n\n return price", "def highest_bid(self):\n (price_eur, volume, _) = self._order_book['bids'][0]\n price_usd = Decimal(price_eur) * self._eurusd_rate\n return {\n 'price': Decimal(price_usd),\n 'volume': Decimal(volume),\n }", "def get_latest(self):\n if len(self.points) == 0:\n return None\n return self.points[-1]", "def get_latest(self, name):\n return self._scalar_history.get_latest(name)[1]", "def get_current_price(limit: int = None, attempts: int = 0):\n try:\n price = EXCHANGE.fetch_ticker(CONF.pair)['bid']\n if not price:\n LOG.warning('Price was None')\n sleep_for(1, 2)\n get_current_price(limit, attempts)\n else:\n return int(price)\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.debug('Got an error %s %s, retrying in 5 seconds...', type(error).__name__, str(error.args))\n attempts += 1\n if not limit or attempts < limit:\n sleep_for(4, 6)\n get_current_price(limit, attempts)\n else:\n return 0", "def best_buy(self):\n return Library.functions.best_buy(self._book)", "def update_highest_buy(self, limit):\n if limit.size == 0:\n #predecessor case\n limit = self.buy_tree.predecessor(limit)\n if limit is None:\n #no predecessor\n self.highest_buy = None\n else: # have a predecessor but dont know if it has order or not\n if limit.size == 0: #limit has no order but other limits in the tree might have orders\n if self.buy_tree.size == 0: #we know no other limits have an order\n self.highest_buy = None\n else: #other limits have an order\n while limit.size == 0:\n limit = self.buy_tree.predecessor(limit)\n #now our limit has a valid order\n self.highest_buy = limit.price\n else: #found valid pred\n self.highest_buy = limit.price", "def get_most_and_least_expensive_high_review_product(df):\n try:\n df3 = merge_metadata(df)\n product_filter = df3['overall'] >= 4.0\n high_reviewed_products = df3[product_filter]\n # print high_reviewed_products[:10]\n # The data contained NaN so we use the nanmax/min funtions to get max/min\n most_exp = round(np.nanmax(high_reviewed_products['price'])[0], 2)\n least_exp = round(np.nanmin(high_reviewed_products['price'])[0], 2)\n\n most_exp_prod = df3.loc[df3['price'] == most_exp, 'asin'].iloc[0]\n least_exp_prod = df3.loc[df3['price'] == least_exp, 'asin'].iloc[0]\n write_text_tofile(\"Most Expensive Product: \" + str(most_exp_prod) + \", Price: \" + str(most_exp))\n write_text_tofile(\"Least Expensive Product: \" + str(least_exp_prod) + \", Price: \" + str(least_exp))\n return {most_exp_prod: most_exp, least_exp_prod: least_exp}\n except Exception as e:\n print \"Error getting most and least expensive high review product\"\n print str(e)\n pass", "def best_bid_order(orders: pandas.DataFrame) -> pandas.Series:\n bds = bids(orders)\n index = bds[bds.price == bds.price.max()]['amount'].idxmin()\n return bds.loc[index]", "def max_key(self):\n return self._price_list[-1]", "def _get_price(self, soup):\n\n try:\n # price tag\n price = soup.find('div', class_='c-price').get_text().replace(',','') # clean up the text\n return self._extract_num(price) # extract number from the text\n except:\n return None", "def get_price(self):\n return self.price", "def get_price(self):\n return self.price", "def get_price(self):\n return self.price", "def getMostRecent(self):\n if len(self.recent):\n return self.recent[0]\n else:\n return None", "def execQ3():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n tuna = frame[dFrame.Series_title_1 == \"Tuna - canned (supermarket only), 185g\"]\n cheapest = tuna.sort_values(by=\"Price\").head(1)\n return cheapest", "def get_item_id_sold_last():\n\n # your code", "def get_closed_order():\n try:\n result = EXCHANGE.fetch_closed_orders(CONF.pair, since=None, limit=3, params={'reverse': True})\n if result is not None and len(result) > 0:\n orders = sorted(result, key=lambda order: order['datetime'])\n last_order = Order(orders[-1])\n LOG.info('Last %s', str(last_order))\n return last_order\n return None\n\n except (ccxt.ExchangeError, ccxt.NetworkError) as error:\n LOG.error(RETRY_MESSAGE, type(error).__name__, str(error.args))\n sleep_for(4, 6)\n get_closed_order()", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://www.bitfinex.com/v2/ticker/t\"\n requestUrl = uri + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[0]\n return currentPrice", "def price(self, symbol: str) -> MetatraderSymbolPrice:\n return self._pricesBySymbol[symbol] if (symbol in self._pricesBySymbol) else None", "def get_current_price(self):\n URL = config.coin['price_hist_url'] + self.ticker.lower()\n try:\n r = requests.get(URL)\n data = json.loads(r.text)\n value = data['last']\n timestamp = data['timestamp']\n self.current_price = value\n self.current_datetime = timestamp\n except Exception as err:\n logger.error(err)", "def get_price_for_volume_at(conn, sticker, limit_price, volume, is_back, timestamp):\n tick = get_last_tick_before(conn, sticker, timestamp)\n rets = get_volume_at_price([tick], limit_price, volume, is_back)\n return rets[0]", "def get_futbin_price_lastupdated(self, ID):\n r = requests.get(\n 'https://www.futbin.com/22/playerPrices?player={0}'.format(ID))\n # r = requests.get('https://www.futbin.com/20/playerGraph?type=daily_graph&year=20&player={0}'.format(ID))\n data = r.json()\n\n price = data[str(ID)][\"prices\"][\"xbox\"][\"LCPrice\"]\n lastupdated = data[str(ID)][\"prices\"][\"xbox\"][\"updated\"]\n\n if (lastupdated == \"Never\"):\n return 0, 100\n elif (\"mins ago\" in lastupdated):\n lastupdated = lastupdated[:-9]\n lastupdated = int(lastupdated)\n elif(\"hour ago\" in lastupdated):\n lastupdated = lastupdated[:-9]\n lastupdated = int(lastupdated) * 60\n elif(\"hours ago\" in lastupdated):\n lastupdated = lastupdated[:-10]\n lastupdated = int(lastupdated) * 60\n elif(\"seconds\" in lastupdated):\n lastupdated = 1\n elif(\"second\" in lastupdated):\n lastupdated = 1\n else:\n return 0, 100\n\n price = price.replace(\",\", \"\")\n price = int(price)\n\n # MINUTES\n lastupdated = int(lastupdated)\n return price, lastupdated", "def getNextHighest(self):\r\n maxScore = -1\r\n idx = -1\r\n for i, s in enumerate(self.scores):\r\n if s.score > maxScore:\r\n maxScore = s.score\r\n idx = i\r\n if idx != -1:\r\n score = self.scores[idx]\r\n del self.scores[idx]\r\n return score\r\n else:\r\n return None", "def get_most_volatile(prices):\n # TODO: Fill in this function.\n #I have tried to select the specific column and then apply the standard deviation to \n # check the volatility to a column to see how it works.\n \n \n price_modified=prices.groupby(prices['ticker'])\n # print(price_modified.price.rolling(2).std())", "def get_latest_price(fsyms, tsyms, e='all', try_conversion=True, full=False, \n format='raw'):\n\n\t# select API function based on 'full' parameter value\n\tif not full:\n\t\tfunc = 'pricemulti'\n\telse:\n\t\tfunc = 'pricemultifull'\n\n\t# load data\n\turl = build_url(func, fsyms=fsyms, tsyms=tsyms, e=e, \n\t try_conversion=try_conversion)\n\tdata = load_data(url)\n\n\t# select right format to return for full requests\n\tif full and format == 'raw':\n\t\tdata = data['RAW']\n\telif full and format == 'display':\n\t\tdata = data['DISPLAY']\n\n\treturn data", "def priceGetAll(soup):\n try:\n price = priceGetMost(soup)\n except:\n price = priceGetSome(soup)\n\n return price", "def latest_price_info(symbol: str,\n exchange: str = CRYPTO_EXCHANGE,\n rate_limit: bool = True) -> dict:\n try:\n check_exchange_existence(exchange=exchange)\n return asyncio.get_event_loop().run_until_complete(\n latestPriceInfo(symbol=symbol,\n exchange=exchange,\n rate_limit=rate_limit))\n except Exception as exception:\n logger.error('Oops! An error Occurred ⚠️')\n raise exception", "def find_best_price(self, number):\n best_price = None\n\n # Go through all our routes\n for route in self.routers:\n # Find the best match from our router\n cost = route.find_best_match(number)\n\n # If our best price is not assigned or if our price is lower then we set the best price to the current\n if best_price is None or cost < best_price:\n best_price = cost\n\n return best_price", "def find_cheapest_stock(stock_list):\n\n cheap = [(i['Name'], i['PE Ratio']) for i in stock_list] # makes a list of tuples [(Stock Name, PE Ratio)]\n cheap.sort(key=lambda i: float(i[1])) # uses lambda to sort list of tuples by PE Ratio\n for i in cheap: # finds the cheapest stock that has a PE Ratio > 0\n if float(i[1]) > 0:\n low = i[0]\n for k in stock_list: # returns the stock that matches the lowest PE Ratio > 0\n if k['Name'] == low:\n return k", "def getPrice(coin,cur):\n price = 'https://api.coinmarketcap.com/v1/ticker/' + coin\n json = requests.get(price).json()\n value = json[0]['price_' + str(cur)]\n return value", "def fetchPrice(self, token):\n i = 0\n cache = self.cache\n cacheLen = len(self.cache)\n stamp = time.time()\n minStamp = stamp - self.maxCacheAge\n data = None\n while True:\n if i >= cacheLen:\n break\n cacheToken, cacheStamp, cacheData = cache[i]\n if cacheStamp < minStamp:\n print(\"CMClient: expired cache data for %s\" % cacheToken)\n cache.pop(i)\n cacheLen -= 1\n continue\n if token == cacheToken:\n data = cacheData\n i += 1\n if data:\n print(\"CMClient: returning cached data for %s\" % token)\n return data\n data = helpers.getUriAsJson(self.tickerTemplate % token)\n cache.insert(0, (token, stamp, data))\n self.saveSettings()\n print(\"CMClient: returning new data for %s\" % token)\n return data", "def best_ask_order(orders: pandas.DataFrame) -> pandas.Series:\n # DataFrames are mutable, thus not hashable. For this reason we cannot make use\n # of memoization but resort to such a hacky and stupid local-scoped cache.\n sks = asks(orders)\n index = sks[sks.price == sks.price.min()]['amount'].idxmax()\n return sks.loc[index]", "def showCurrentPrice():\n\n page = requests.get(\n \"https://markets.businessinsider.com/commodities/oil-price?type=wti\"\n )\n soup = BeautifulSoup(page.text, \"html.parser\")\n currentPrices = soup.find(class_=\"push-data\")\n price = str(currentPrices.next)\n\n return price", "def get_best_offer(self,way):\n if way==\"BUY\":\n return self.book[Trade.WAY_BUY][0].get_price()\n elif way==\"SELL\":\n return self.book[Trade.WAY_SELL][len(self.book[Trade.WAY_SELL])-1].get_price()", "def greatest_product_one(self, key):\n return self.greatest_product(key)[0]", "def get_stock_price(stock):\n pass", "def get_last_roulette() -> Roulette:\n return Roulette.objects.exclude(matchings_found_on=None).order_by(\"-matchings_found_on\").first()", "def prev_item(self, price):\n assert isinstance(price, Price), type(price)\n\n index = self._price_list.index(price) - 1\n if index < 0:\n raise IndexError\n prev_price = self._price_list[index]\n return prev_price, self._price_level_dictionary[prev_price]", "def getLatest(self,field):\n return self.getValue(field,-1)", "def getLatest(self,field):\n return self.getValue(field,-1)", "def updateLastPrice(self):\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(\n pytz.timezone('US/Central')).strftime(\"%H:%M\")\n\n # UPDATE POSITION LAST PRICE AND UPDATE HIGH PRICE\n open_positions = self.open_positions.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n open_positions_list = []\n\n for position in open_positions:\n\n symbol = position[\"Symbol\"]\n\n if symbol not in open_positions_list:\n\n open_positions_list.append(symbol)\n\n if len(open_positions_list) > 0:\n\n resp = self.tdameritrade.getQuotes(open_positions_list)\n\n if resp:\n\n for key, value in resp.items():\n\n symbol = key\n\n last_price = value[\"lastPrice\"]\n\n self.open_positions.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})\n\n if dt_central == \"15:00\":\n\n self.open_positions.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Opening_Price\": last_price}})\n\n # UPDATE QUEUE LAST PRICE\n queues = self.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type})\n\n queues_list = []\n\n for queue in queues:\n\n if self.asset_type == \"EQUITY\":\n\n symbol = queue[\"Symbol\"]\n\n elif self.asset_type == \"OPTION\":\n\n symbol = queue[\"Pre_Symbol\"]\n\n if symbol not in queues_list:\n\n queues_list.append(symbol)\n\n if len(queues_list) > 0:\n\n resp = self.tdameritrade.getQuotes(queues_list)\n\n for key, value in resp.items():\n\n symbol = key\n\n last_price = value[\"lastPrice\"]\n\n if self.asset_type == \"EQUITY\":\n\n self.queue.update_many({\"Trader\": self.user[\"Name\"], \"Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})\n\n elif self.asset_type == \"OPTION\":\n\n self.queue.update_many({\"Trader\": self.user[\"Name\"], \"Pre_Symbol\": symbol, \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id}, {\n \"$set\": {\"Last_Price\": last_price}})", "def execQ6():\n frame = pan.DataFrame(data, columns=['Product', 'Price', 'Period'])\n expensive = frame.sort_values(by='Price', ascending=False).head(1)\n return expensive", "def get_most_volatile(prices):\n \n prices = prices.set_index('date')\n\n stock_return_volatility = []\n\n for ticker in prices.ticker.unique():\n prices_for_ticker = prices[prices['ticker'] == ticker]['price']\n log_return = np.log(prices_for_ticker) - np.log(prices_for_ticker.shift(1))\n stock_return_volatility.append(log_return.std())\n\n volatility_series = pd.Series(stock_return_volatility, index=prices.ticker.unique())\n\n return volatility_series.idxmax()", "def strategy_best(cookies, cps, history, time_left, build_info):\n info = build_info.clone()\n best_choice = None\n best_ratio = 0.0\n choices = info.build_items()\n for item in choices:\n ratio = max_return(cookies, cps, time_left, info.get_cost(item), info.get_cps(item))\n\n if ratio >= best_ratio:\n best_choice = item\n best_ratio = ratio\n print best_ratio\n\n if (time_left * cps + cookies) < info.get_cost(best_choice):\n return None\n\n return best_choice", "def get_first_item(self):\n params = urllib.parse.urlencode({'o':'1', 'q':self.query})\n url = 'https://www.leboncoin.fr/annonces/offres/ile_de_france/?{:s}'.format(params) # Cree l'url de recherche en get\n html = urllib.request.urlopen(url)\n if url != html.geturl():\n return None\n soup = BeautifulSoup.BeautifulSoup(html, 'html5lib')\n try:\n products = soup.section.find_all('a', 'list_item clearfix trackable')\n except Exception as e:\n print('Nothing found on leboncoin')\n return None\n for product in products: # recupere les differentes informations de chaque produit\n if str(product.section.h2).strip() == 'None':\n continue\n name = product.section.h2.contents[0].strip()\n price = self.__get_price(product)\n link = 'http:' + product['href']\n return (name, price, link)\n return None", "def get_most_recent_tarball(self, pkg):\n pass", "def state(self):\n if not self._device.prices:\n return None\n\n return self._device.prices[0]", "def get_product_price(container) -> str:\r\n try:\r\n price_container = container.findAll(\"li\", {\"class\": \"price-current\"})\r\n price: str = price_container[0].strong.text + price_container[0].sup.text\r\n return price\r\n except AttributeError:\r\n return None", "def get_min_bid(order):\n bids = Bid.objects.filter(order=order).filter(bid_trickle=False)\n if bids:\n return min(bids)\n else:\n return None", "def max(self):\n return self.get_first()", "def getCurrentPrice(self,primary,secondary):\n pair = self.getTradedPair(primary,secondary)\n uri = \"https://api.kraken.com/0/public/Ticker\"\n requestUrl = uri + \"?pair=\" + pair\n jsonResponse = self.getJson(requestUrl)\n currentPrice = jsonResponse[\"result\"][pair][\"c\"]\n return currentPrice", "def findHighestPkPair(x, pkp): #{\n vrbMsg(5, 'findHighestPkPair() x = [...], pkp = ' + str(pkp))\n mi = [0,0]\n mv = x[pkp[0]]\n for i in range(1, len(pkp)): #{\n nv = x[pkp[i]]\n if(nv > mv): #{\n mi[0] = i\n mv = nv\n #}\n #}\n mv = None\n for i in range(0, len(pkp)): #{\n if(i != mi[0]): #{\n nv = x[pkp[i]]\n if((mv is None) or (nv > mv)): #{\n mi[1] = i\n mv = nv\n #}\n #}\n #}\n if(mi[0] == mi[1]): #{\n mi = [mi[0]]\n elif(mi[0] > mi[1]): #{\n mi = [mi[1], mi[0]]\n #}\n vrbMsg(5, 'findHighestPkPair() mi = ' + str(mi))\n return mi" ]
[ "0.71819454", "0.68884295", "0.68377554", "0.6776221", "0.6669529", "0.64981353", "0.635797", "0.6276958", "0.6204579", "0.61832917", "0.61458886", "0.6095841", "0.6082221", "0.5979007", "0.5943353", "0.5930433", "0.59218645", "0.5913669", "0.5907561", "0.5898487", "0.58973795", "0.58862686", "0.58853734", "0.58807683", "0.5880138", "0.5824226", "0.58102024", "0.5807867", "0.5807209", "0.5801874", "0.58016694", "0.5798019", "0.57943803", "0.57751375", "0.57649505", "0.57617587", "0.5732568", "0.573029", "0.57270896", "0.57203114", "0.57090986", "0.5704553", "0.5692343", "0.56798786", "0.5663041", "0.56510276", "0.56487286", "0.5629487", "0.5618561", "0.56071573", "0.55984354", "0.5591574", "0.5550726", "0.5542312", "0.55421335", "0.55420476", "0.5529733", "0.55234486", "0.55149925", "0.55149925", "0.55149925", "0.55003303", "0.54949665", "0.549486", "0.5488565", "0.5485393", "0.5479951", "0.547994", "0.54797727", "0.5479563", "0.54763883", "0.5476135", "0.54732454", "0.5470017", "0.5461777", "0.5460166", "0.5458956", "0.5456392", "0.5455051", "0.5440382", "0.5439841", "0.54004294", "0.53884816", "0.53878385", "0.53807634", "0.5377951", "0.53724974", "0.53724974", "0.53696847", "0.5368377", "0.53645235", "0.5361818", "0.5361243", "0.5355667", "0.53500944", "0.5342508", "0.53421396", "0.53384495", "0.53311265", "0.5329667" ]
0.68424475
2
Getting mri (most recent influence) Returns 0 if no influence exists
def _get_mri(journal): try: return Influence.objects.filter(journal__issn=journal.issn).order_by('-date_stamp')[0] except IndexError: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_reward(self):\n if self.is_game_done:\n return self.price - 1\n else:\n return 0.0", "def get_reward(state, resolution, grid_x, grid_y):\n a,b = single_index_to_index(state, resolution)\n position = index_to_obs(a, b, grid_x, grid_y )[0]\n if position >= 0.5:\n return 0\n return -1", "def _compute_reward(self): \n reward = -1\n return reward", "def _get_reward(self, action):\n HIRE_COST = 1 # TODO 7/29/20 - Determine significance of this value\n\n # Lookup the state representation using the cur_state index. Then we\n # can get the candidate productivity score.\n obs = self.observation_function[self.cur_state]\n prod_score = obs[1]\n r = action*(prod_score - HIRE_COST)\n return r", "def intensity(self) -> int:", "def getReward(self):\n return self._mdp.R[self._prev_state,self._cur_state]", "def m(self, element):\n if element in self.focals:\n return self.focals[element]\n return 0", "def getIR3() -> int:\n pass", "def getIR1() -> int:\n pass", "def getIR2() -> int:\n pass", "def getReward(self, active_corr, simulator, p, active_goal):\n i_r = self.correlations[active_corr].i_reward\n # if i_r is None:\n # reward = self.simulator.getReward()\n # elif self.correlations[i_r].getCertainty() > self.threshold:\n if i_r is None:\n reward = simulator\n elif self.correlations[i_r].getCertainty(p, active_goal) > self.threshold:\n reward = 1\n else:\n reward = 0\n return reward", "def min_mireds(self):\n return 175", "def _get_reward(self):\n if self.status():\n return self.current_step/self.ep_length # the reward is proportional to the duration \n else:\n return 0", "def get_reward(self, done):\n reward = 0\n self.calc_pos_diff_ratio()\n reward = self.calc_base_reward_2(reward)\n\n return reward", "def getInteractionRate(self):\n m = mctal.MCTAL(self.name+'.m')\n t = m.tallies[4]\n # Returing the total\n return t.data[-1],t.errors[-1]", "def get_efermi(fn):\n try:\n f = open(fn)\n except:\n return 0\n line = f.readline()\n f.close()\n ef = float(line.split()[6])\n print('Calculated Fermi level: {0}'.format(ef))\n return ef", "def MFE_rel(self):\n try:\n return(self.MFE / self.price_open)\n except:\n return", "def _calculate_r0(net):\n\n r0 = 0\n for reaction in net.reactions:\n t = reaction.rate(net.species)\n r0 += t\n\n return r0", "def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def last_fmeasure(self):\n return self.get_fvalue(self.last_position())", "def _get_reward(self, terminal):\n if not terminal:\n return 0\n\n folded_design, _ = fold(self.design.primary)\n hamming_distance = hamming(folded_design, self.target.dot_bracket)\n if 0 < hamming_distance < self._env_config.mutation_threshold:\n hamming_distance = self._local_improvement(folded_design)\n\n normalized_hamming_distance = hamming_distance / len(self.target)\n\n # For hparam optimization\n episode_info = EpisodeInfo(\n target_id=self.target.id,\n time=time.time(),\n normalized_hamming_distance=normalized_hamming_distance,\n )\n self.episodes_info.append(episode_info)\n\n return (1 - normalized_hamming_distance) ** self._env_config.reward_exponent", "def getTotalReward(self):\n return self.cumreward", "def getTotalReward(self):\n return self.cumreward", "def get_rmax(self):\n return self.rmax", "def get_reward(self) -> float:\r\n field = self.fields[self.agent_x][self.agent_y]\r\n if field == Field.EMPTY:\r\n return self.rew_empty\r\n elif field == Field.POS_TERMINAL:\r\n return self.rew_pos\r\n elif field == Field.NEG_TERMINAL:\r\n return self.rew_neg\r\n\r\n raise ValueError # Agent is standing on an illegal tile!\r", "def getTotalReward(self):\n return self.lastFitness", "def get_reward(self):\n return self.calc_reward(self.sim.pose[:3], self.sim.v)", "def _compute_reward(self):\n last_score = self.episode_qualities[-2]\n new_score = self.episode_qualities[-1]\n reward = new_score - last_score\n return reward", "def ml_result(self, var, e):\n\t\tdist = self.enumerate_ask(var, e)\n\t\treturn max(dist.items(), key=lambda x:x[1])[0]", "def calculate_reward(self):\n if AT.REWARD not in self.attributes:\n return (0, 1)\n return self.attributes[AT.REWARD].calculate(self)", "def top_of_climb_index(self):\n return self.altitudes.argmax()", "def current_moisture(self) -> int:\n return int(self.get_state(self.entity_ids['current_moisture']))", "def _get_mrp(journal):\n try:\n return Price.objects.filter(journal__issn=journal.issn).order_by('-date_stamp')[0]\n except IndexError:\n return None", "def get_reward(self):\n\t\t# returns the reward for current state\n\n\t\t#temporary line for testing:\n\t\t#return self.reward_idea() # also not working yet\n\n\t\tcost = self.cost_function()\n\t\tconstraints_violation = self.get_constraints_violation()\n\t\t#old_aug_cost = self.augmented_cost\n\t\t#new_aug_cost = self.get_augmented_cost(cost,constraints_violation)\n\t\t#reward = old_aug_cost - new_aug_cost # reward formula\n\t\t#self.augmented_cost = new_aug_cost # update augmented_cost\n\t\treward = -self.get_augmented_cost(cost,constraints_violation)\n\t\t#print(\"***\\nDEBUG cost: \" +str(cost)+\" constraints_violation: \"+str(constraints_violation))\n\t\t#print(\"DEBUG reward: \"+str(reward))\n\t\t'''\n\t\t#old idea that is probably bad and not necessary:\n\t\tif(self.step_count == 0):\n\t\t\t# old_aug_cost doesn't exist in first step... ACTUALLY IT DOES!\n\t\t\tprint(\"DEBUG step_count == 0, reward would be \"+str(reward))\n\t\t\tprint(\"DEBUG old_aug_cost: \"+str(old_aug_cost) + \" new_aug_cost: \"+str(new_aug_cost) )\n\t\t\treturn 0\n\t\t'''\n\n\t\treturn reward", "def get_m(self, data, i):\n\n mask = (data['source_bin'] == i)\n \n # We use S=0 here because we have already included it in R_total\n g1, g2 = apply_metacal_response(data['R_total'][i], 0.0, data['mcal_g1'][mask],data['mcal_g2'][mask])\n\n return g1, g2, mask", "def get_score(self):\n return np.max(self._scores) if self._scores is not None else self._score_history[-1]", "def min_mireds(self) -> int:\n return MIREDS_MIN", "def rate_last(self):\n diff = (self.time - self.lasts[0][0]).total_seconds()\n try:\n return (self.pos - self.lasts[0][1]) / FAC / diff\n except ZeroDivisionError:\n return 0.0", "def get_max_interest(self):\n max_int = max(self.table[\"interest\"])\n print(max_int)\n for index in self.table[\"index\"]:\n if self.table[\"interest\"][index] == max_int:\n return index", "def irradiance(self) -> float:\n\n if self.declination > 0:\n return self._irradiance\n return 0", "def get_fret_num(self):\n low_note = Note(self.guitar.tuning[self.string], self.string,\n self.guitar, False)\n self.fret = (ALL_NOTES.index(self.name) -\n ALL_NOTES.index(low_note.name))\n return self.fret", "def last_percept(self):\n return self.percept", "def get_r_score(self):\n return self.r_score", "def get_M(self):\n return 1.0", "def search_for_maximum(self):\n return self.maximise_aquisition(self.expected_improvement)", "def reward(self):\n if self._state is None:\n return 0\n return self.reward_fn(self._state)", "def last_low(self):\n return self.data.last('1D').low.iat[0]", "def getReaction(self):\n return _libsbml.FluxObjective_getReaction(self)", "def _get_lip_best(self) -> float:\n pass", "def moid(self):\n return self._moid", "def moid(self):\n return self._moid", "def MRE(actual, noisy):\n if len(actual) != len(noisy): return -1\n absErr = np.abs(np.array(actual) - np.array(noisy))\n idx_nonzero = np.where(np.array(actual) != 0)\n absErr_nonzero = absErr[idx_nonzero]\n true_nonzero = np.array(actual)[idx_nonzero]\n relErr = absErr_nonzero / true_nonzero\n return relErr.mean()", "def _sparse_reward(self) -> float:\n # `score_on_end_of_traj` is supposed to be called at the end of a\n # trajectory but we use it here since it gives us exactly the reward\n # we're looking for.\n return self.score_on_end_of_traj()", "def reward(self, state):\n if state == (self.size-1, self.size-1):\n return 10\n elif state == (self.size-1, self.mid):\n return -1\n elif state == (self.size-1, 0):\n return 0\n elif state == (0, self.size-1):\n return -12\n else:\n return -1", "def last_value(self):\n return 0", "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]", "def getReward(self):\n# def evaluateFitness(self):\n fitness = 0.0\n distance = self.env.getDistance()\n speed = self.env.getSpeed()\n theta = self.env.getOrientation()\n\n ## implementation 101\n timeBonus = (self.maxTime - self.t)/self.maxTime\n alpha = 1.0/((1+distance)*(1+fabs(theta))*(speed+1));\n if distance < 0.5*self.env.init_distance :\n if(distance < self.env.vicinity_distance and\n abs(theta) < self.env.vicinity_orientation and\n speed < self.env.vicinity_speed ):\n fitness = 1 + timeBonus; \n else:\n fitness = alpha;\n else: fitness = 0\n self.lastFitness = fitness\n if fitness > self.bestFitness : \n self.bestFitness = fitness \n\n return fitness", "def first_value(self):\n return 0", "def _compute_reward(self):\n reward = 0.0\n return reward", "def get_closest_interest_amortization(self):\n differences = [abs(self.table[\"interest\"][i] - self.table[\"amortization\"][i])\\\n for i in self.table[\"index\"][1:]]\n min_diff = min(differences)\n return differences.index(min_diff) + 1", "def estimate(self, reps):\n return self.onerm / MaxCalc.coefficients[reps - 1]", "def lapserate_moist_adiabate():\n return 6.5", "def extract_final_reward(log):\n last_reward_line = log.split('\\n')[-3]\n return float(re.search('Avg-Episode-Reward:\\s+(\\S+)', last_reward_line).group(1))", "def latest_score(self):\r\n if not self.child_history:\r\n return None\r\n return self.score_for_attempt(-1)", "def mag(self):\n return self.photosamplers.get_estimate(mag=True)[0]", "def intensity(self, now):\n return self.model.place.state", "def best_value(self):\r\n return self._best_value", "def find_reward(\r\n self, opponent: Player\r\n ) -> Dict[Action, Dict[Action, Score]]:\r\n return self.payoff_matrix[self.decision][opponent.history[-1]]", "def get_current_reward(self, state):\n if state == 1:\n return 1.0 + self.rng.normal(scale=self.terminal_reward_stdev)\n else:\n return 0.0", "def _get_R(self, net_r_amp):\n return np.abs(net_r_amp)**2", "def mise(self):\n return self._mise", "def getReward(self, state):\n return (state in self.reward_set) * 1", "def get_reward(self):\n # Ver list\n self.Verlist = {\n '1': False,\n '2': False,\n '3': False,\n '4': False,\n '5': True,\n }\n # --------------------------------- NEW ----\n r = 0\n if self.ENVGetSIReset:\n V = {\n 'CoolRateTemp': self.DRateFun(self.mem['KCNTOMS']['Val']),\n 'CurrentTemp': self.mem['UAVLEG2']['Val'],\n 'CurrentPres': self.mem['ZINST65']['Val'],\n 'Dis': abs(self.DRateFun(self.mem['KCNTOMS']['Val']) - self.mem['UAVLEG2']['Val']),\n 'PZRLevel': self.mem['ZINST63']['Val'],\n 'SG1Nar': self.mem['ZINST78']['Val'], 'SG2Nar': self.mem['ZINST77']['Val'],\n 'SG3Nar': self.mem['ZINST76']['Val'],\n 'SG1Wid': self.mem['ZINST72']['Val'], 'SG2Wid': self.mem['ZINST71']['Val'],\n 'SG3Wid': self.mem['ZINST70']['Val'],\n 'SG1Pres': self.mem['ZINST75']['Val'], 'SG2Pres': self.mem['ZINST74']['Val'],\n 'SG3Pres': self.mem['ZINST73']['Val'],\n }\n if self.Verlist['1']:\n # Cooling rate에 따라서 온도 감소\n r -= V['Dis'] / 100\n # 가압기 수위 10 아래 종료\n # if V['PZRLevel'] <= 10: r -= 100\n if self.Verlist['2']:\n # 목표치까지 도달\n r += (29.5 - V['CurrentPres']) / 100\n r += (170 - V['CurrentTemp']) / 100\n if self.Verlist['3']:\n # Cooling rate에 따라서 온도 감소\n dis_reward = - V['Dis'] / 100 # [0.0 ~ -0.2] 동향을 보임\n # Pressure and Temp Dis\n curp = 29.5 if V['CurrentPres'] <= 29.5 else V['CurrentPres']\n curt = 170 if V['CurrentTemp'] <= 170 else V['CurrentTemp']\n dis_pres = (29.5 - V['CurrentPres']) / 100\n dis_temp = (170 - V['CurrentTemp']) / 100\n\n # r += (dis_pres * 0.1) + (dis_temp * 0.1) + (dis_reward * 10) # 감압 X\n r += (dis_pres * 0.1) + (dis_reward * 5)\n if self.Verlist['4']:\n # Cooling rate에 따라서 온도 감소\n dis_reward = - V['Dis'] / 100 # [0.0 ~ -0.2] 동향을 보임\n # Pressure and Temp Dis\n curp = 29.5 if V['CurrentPres'] <= 29.5 else V['CurrentPres']\n dis_pres = (29.5 - V['CurrentPres']) / 100\n PT_reward = - PTCureve().Check(Temp=V['CurrentTemp'], Pres=V['CurrentPres'])\n r += (dis_pres * 0.1) + (dis_reward * 5) + (PT_reward * 0.1)\n if self.Verlist['5']:\n r = 0\n # 1] Cooling rate에 따라서 온도 감소\n coolrate_r = - V['Dis']\n # 2] 가압기 수위 20~76% 구간 초과시 패널티\n pzrlevel_r = 0\n if 20 <= V['PZRLevel'] <= 76:\n pass\n else:\n if 20 > V['PZRLevel']:\n pzrlevel_r -= (20 - V['PZRLevel'])\n else:\n pzrlevel_r -= (V['PZRLevel'] - 76)\n # 3] 증기 발생기 6% ~ 50% 이상 초과 시 패널티\n sg_r = 0\n for _ in range(1, 4):\n if 6 <= V[f'SG{_}Nar'] <= 50:\n pass\n else:\n if 6 > V[f'SG{_}Nar']:\n sg_r -= (6 - V[f'SG{_}Nar'])\n else:\n sg_r -= (V[f'SG{_}Nar'] - 50)\n # 4] PT 커브에서 벗어나면 거리만큼 패널티\n PT_reward = - PTCureve().Check_Dis(Temp=V['CurrentTemp'], Pres=V['CurrentPres'])\n # 5] 목표치와 가까워 질 수록 +\n pres_r, temp_r = 0, 0\n pres_r = (29.5 - V['CurrentPres'])\n temp_r = (170 - V['CurrentTemp'])\n # 6] S/G 압력\n Avg_pres = (V['SG1Pres'] + V['SG2Pres'] + V['SG3Pres'])/3\n SGpres_r = 9 - Avg_pres if Avg_pres > 9 else 0\n # --------------------------------------------------------------\n w = {\n 'coolrate_r': [coolrate_r, 2],\n 'pzrlevel_r': [pzrlevel_r, 1],\n 'sg_r': [sg_r, 1.5],\n 'PT_reward': [PT_reward, 3],\n 'pres_r': [pres_r, 1],\n 'temp_r': [temp_r, 0.5],\n 'SGpres_r': [SGpres_r, 0.5]\n }\n\n log_txt_temp = ''\n for key in w.keys():\n r += w[key][0] * w[key][1]\n log_txt_temp += f'[{round(w[key][0]*w[key][1], 1)}:{w[key][0]}*{w[key][1]}]_'\n log_txt_temp = f'R:{r} = ' + log_txt_temp\n\n self.Loger_txt += log_txt_temp\n\n # self.Loger_txt += f\"R:{r} = {dis_pres * 0.1}+{dis_temp * 0.1}+({dis_reward * 10})\\t\"\n # self.Loger_txt += f\"R:{r} = {dis_pres * 0.1}+({dis_reward * 5})\\t\" #Verlist['3']\n # self.Loger_txt += f\"R:{r} = {dis_pres * 0.1}+({dis_reward * 5})+({PT_reward * 0.1})\\t\"\n\n # --------------------------------- Send R ----\n self.AcumulatedReward += r\n # self.Loger_txt += f'{r}\\t'\n self.DIS_CSF_Info += f'[R: {r}]\\t'\n return r", "def getMomentum(self):\n return self.p", "def latest_score(self):\r\n if not self.child_history:\r\n return None\r\n return self.child_history[-1].get('score')", "def get_mc(self) -> int:\r\n return self.mc\r\n raise NotImplementedError", "def get_Lo(self):\n return self.Lo", "def get_Lo(self):\n return self.Lo", "def index_of_refraction(self):\n return self.microsphere.index_of_refraction(self.wavelength)", "def act(self, observation):\n for i in range(len(self.counts)):\n if self.counts[i] == 0:\n return i\n \n total_counts = np.sum(self.counts)\n for action1 in range(len(self.counts)):\n add = math.sqrt((self.minmax/2 * math.log(total_counts)) / float(self.counts[action1]))\n self.ucb_values[action1] = self.values[action1] + add\n return np.argmax(self.ucb_values)", "def get_absolute_regret(self):\n values = self.stats['return_stats']['episode_totals']\n first_episode = self.get_convergence_episode()\n final_return = self.get_final_return()\n regret = np.sum(final_return - values[:first_episode])\n return regret", "def getM(self):\r\n return self.M", "def get_mc(self) -> int:\n return self.MC", "def get_reward(self):\n reward, self.reward = self.reward, 0\n return reward", "def get_state_reward(self, max_num_steps=None):\n if max_num_steps is not None:\n num_steps = max_num_steps\n else:\n num_steps = self.im_size[0]+self.im_size[1]\n recent_pos = self.pos_history[-1]\n if recent_pos == self.goal_pos and \\\n self.get_time() <= num_steps+1:\n # success\n return 10., 1\n elif self.get_time() > num_steps+1:\n # failed\n return -10., -1\n else:\n return -0.01, 0", "def getReward(self, state, action, nextState):\n if self.isTerminal(nextState):\n return 0\n else:\n return 1", "def getLimbIndex(self):\n\n data = self.name.split('-')\n return int(data[1]) - 1", "def get_max_cl(Re, r):\n xf = XFoil()\n if r <= 0.175: \n xf.airfoil = naca6409\n else:\n xf.airfoil = naca2412\n xf.Re = Re\n xf.Re = Re\n xf.max_iter = 200\n xf.n_crit = 9.00\n xf.xtr = [1.00, 1.00]\n xf.M = 0\n a_seq, cl_seq, cd_seq, cm_seq, cp_seq = xf.aseq(10,15,0.1)\n # ignore nan by making it 0\n cl_seq = np.nan_to_num(cl_seq)\n # find the maximum cl \n cl_maxi = np.max(cl_seq)\n # index of the maximum cl\n idx = np.argmax(cl_seq)\n return round(cl_maxi,2),round(a_seq[idx],2), round(cd_seq[idx],2)", "def getExternalReward(self):\r\n\r\n return self.externalReward", "def get_observed_objective(self) -> float:\n # pylint: disable=invalid-name\n obj = 0.\n for gr in self.grounded.values():\n dist = gr.get_observed_dist_to_satisfaction()\n obj += 1 - self.weight * max(0, dist) ** 2\n return obj", "def moi(self) -> Optional[MomentOfInertia]:\n return None if self.atoms is None else self.atoms.moi", "def __find_previous_cf_element(self) -> LineElement:\n index = (self.current_time_in_eighths - 1) // N_EIGHTHS_PER_MEASURE\n result = self.cantus_firmus[index]\n return result", "def reward(self):\n return self._reward", "def resid(self):\n # GH#5255\n return self.model.endog - self.fittedvalues", "def last_hit(self):\n return self._last_hit", "def get_max_praises(self):\n char = self.caller.char_ob\n clout = char.social_clout\n s_rank = char.item_data.social_rank\n return clout + ((8 - s_rank) // 2)", "def get_marble_count(self):", "def Insurance(Md,X):\n u = X[iu]\n b = Md.b()\n return u/b - u/(1-u+u*b)", "def get_score(self):\r\n return None" ]
[ "0.575768", "0.5676095", "0.56729364", "0.56670934", "0.559796", "0.55813545", "0.5575672", "0.55736303", "0.55562896", "0.55536056", "0.5553515", "0.5538506", "0.5526368", "0.55253816", "0.5502831", "0.5489734", "0.5470172", "0.5469639", "0.5466303", "0.5466303", "0.54242605", "0.5423435", "0.5418171", "0.5418171", "0.5406587", "0.5402487", "0.5394824", "0.5374265", "0.5369289", "0.5366906", "0.5353146", "0.5340081", "0.5327389", "0.5295152", "0.52943146", "0.528064", "0.52632016", "0.5259376", "0.5258128", "0.52569675", "0.52424043", "0.52408564", "0.5233934", "0.5224819", "0.52239096", "0.5219588", "0.5218618", "0.52133596", "0.52088827", "0.52054334", "0.5202847", "0.5202847", "0.52018124", "0.51987886", "0.51973456", "0.51869655", "0.51799345", "0.517087", "0.51664793", "0.5158805", "0.51572776", "0.5155538", "0.51436275", "0.51355606", "0.5133872", "0.512721", "0.5120174", "0.5114031", "0.51132536", "0.5112872", "0.5108813", "0.5105185", "0.51041234", "0.5098172", "0.50954574", "0.50930935", "0.50910604", "0.50864774", "0.50864774", "0.5083996", "0.50820833", "0.50805575", "0.5074407", "0.50713825", "0.5071323", "0.50679314", "0.50670224", "0.50667536", "0.5062059", "0.50611675", "0.5058654", "0.50554043", "0.5053757", "0.50521296", "0.50499463", "0.5048828", "0.50474703", "0.50461733", "0.50445545", "0.50430036" ]
0.70698816
0
Create a multipart http request
def encode_multipart(fields, files, boundary=None): def escape_quote(s): return s.replace('"', '\\"') if boundary is None: boundary = ''.join(random.choice(_BOUNDARY_CHARS) for i in range(30)) lines = [] for name, value in fields.items(): lines.extend(( f'--{boundary}', f'Content-Disposition: form-data; name="{escape_quote(name)}"', '', value, )) for name, value in files.items(): filename = value['filename'] mimetype = (value.get('mimetype') or mimetypes.guess_type(filename)[0] or 'application/octet-stream') name, filename = escape_quote(name), escape_quote(filename) lines.extend(( f'--{boundary}', f'Content-Disposition: form-data; name="{name}"; filename="{filename}"', f'Content-Type: {mimetype}', '', value['content'], )) lines.extend(( f'--{boundary}--', '', )) body = '\r\n'.join(lines) headers = { 'Content-Type': f'multipart/form-data; boundary={boundary}', 'Content-Length': str(len(body)), } return (body, headers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_request(self, theurl, fields, files, txheaders=None):\n\n content_type, body = self.encode_multipart_formdata(fields, files)\n if not txheaders: txheaders = {}\n txheaders['Content-type'] = content_type\n txheaders['Content-length'] = str(len(body))\n\n return urllib2.Request(theurl, body, txheaders)", "async def post_multipart(self, part1, part_2, test):", "def get_multipart():\n part = MIMEMultipart(\n 'related', charset='utf-8', type='application/xop+xml',\n boundary=BOUND, start='<soap-env:Envelope>')\n part.set_param('start-info', 'text/xml')\n\n part.add_header('Accept-Encoding', 'gzip,deflate')\n part.add_header('SOAPAction', '\"\"')\n part.add_header('MIME-Version', '1.0')\n part.add_header('Host', 'test-idahe.ordre.medecin.fr')\n part.add_header('Connection', 'Keep-Alive')\n part.add_header('User-Agent', 'Apache-HttpClient/4.1.1 (java 1.5)')\n return part", "def post_multipart(host, selector, fields, files):\n content_type, body = encode_multipart_formdata(fields, files)\n headers = {'Content-Type': content_type,\n 'Content-Length': str(len(body))}\n\n #r = urllib2.Request(\"%s%s\" % (host, selector), b2a_base64(body), headers)\n r = urllib2.Request(\"%s%s\" % (host, selector), body, headers)\n #return urllib2.urlopen(r).read()\n return urllib2.urlopen(r)", "def post(url, fields, files=[]):\n pm = PostMultipart()\n return pm.post(url, fields, files)", "def get_multipart_request_body(query, variables, file, file_name):\n return {\n 'operations': json.dumps({'query': query, 'variables': variables}),\n 'map': json.dumps({file_name: ['variables.file']}), file_name: file}", "def multipart_post(host, selector, fields, files):\n content_type, body = encode_multipart_data(fields, files)\n h = httplib.HTTP(host)\n h.putrequest('POST', selector)\n h.putheader('content-type', content_type)\n h.putheader('content-length', str(len(body)))\n h.endheaders()\n h.send(body)\n errcode, errmsg, headers = h.getreply()\n return h.file.read()", "def request(self, verb, url, payload: Optional[Any] = ..., multipart: Optional[Any] = ...):\n ...", "def post_multipart(url, fields, files=()):\r\n content_type, data = encode_multipart_formdata(fields, files)\r\n url_parts = urlparse.urlparse(url)\r\n if url_parts.scheme == 'http':\r\n h = httplib.HTTPConnection(url_parts.netloc)\r\n elif url_parts.scheme == 'https':\r\n h = httplib.HTTPSConnection(url_parts.netloc)\r\n else:\r\n raise Exception('Unsupported URL scheme')\r\n path = urlparse.urlunparse(('', '') + url_parts[2:])\r\n h.request('POST', path, data, {'content-type':content_type})\r\n return h.getresponse().read()", "def post_multipart(host, selector, fields, files):\n content_type, body = encode_multipart_formdata(fields, files)\n h = httplib.HTTPConnection(host)\n h.putrequest('POST', selector)\n h.putheader('content-type', content_type)\n h.putheader('content-length', str(len(body)))\n h.endheaders()\n h.send(body)\n response = h.getresponse()\n output = response.read()\n return output\n # return h.file.read()", "def post_multipart(host, selector, fields, files):\n content_type, body = encode_multipart_formdata(fields, files)\n h = httplib.HTTP(host)\n h.putrequest('POST', selector)\n h.putheader('content-type', content_type)\n h.putheader('content-length', str(len(body)))\n h.endheaders()\n print content_type\n h.send(body)\n errcode, errmsg, headers = h.getreply()\n return h.file.read()", "def _encode_multipart_formdata(self, fields, files=[]):\n boundary=_generate_boundary()\n crlf = '\\r\\n'\n\n l = []\n for k, v in fields:\n l.append('--' + boundary)\n l.append('Content-Disposition: form-data; name=\"%s\"' % k)\n l.append('')\n l.append(v)\n for (k, f, v) in files:\n l.append('--' + boundary)\n l.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (k, f))\n l.append('Content-Type: %s' % self._get_content_type(f))\n l.append('')\n l.append(v)\n l.append('--' + boundary + '--')\n l.append('')\n body = crlf.join(l)\n return boundary, body", "def encode_multipart_formdata(cls, fields, files):\n boundary = '----------ThIs_Is_tHe_bouNdaRY_$'\n lines = []\n for (key, value) in fields:\n lines.append('--' + boundary)\n lines.append('Content-Disposition: form-data; name=\"%s\"' % key)\n lines.append('')\n lines.append(str(value))\n for (key, filename, value, content_type) in files:\n filename = filename + mimetypes.guess_extension(content_type)\n lines.append('--' + boundary)\n lines.append(\n 'Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (\n key, filename\n )\n )\n lines.append('Content-Type: %s' % content_type)\n lines.append('')\n lines.append(value)\n lines.append('--' + boundary + '--')\n lines.append('')\n body = b'\\r\\n'.join(map(lambda x: x.encode('utf8') if isinstance(x, str) else x, lines))\n content_type = 'multipart/form-data; boundary=%s' % boundary\n return content_type, body", "def multipart_push(self, upload_id, url, part_number, chunk_size, data, md5=None):\n path = self.base_path / url\n assert path.is_file(), f\"{self}: multipart upload file {path} does not exist.\"\n with path.open(\"r+b\") as stream:\n stream.seek((part_number - 1) * chunk_size)\n shutil.copyfileobj(data, stream, 1024 * 1024)\n return dict()", "def createRequest(self, **kwargs):\n for k,v in kwargs.items():\n self.request[\"content\"][k] = v\n \n return self.request", "def multipart(self):\n self.add_file_string('Multipart file')\n self.should_copy = False", "def encode_multipart_formdata(self,fields, files, BOUNDARY = '-----'+mimetools.choose_boundary()+'-----'):\n\n CRLF = '\\r\\n'\n L = []\n if isinstance(fields, dict):\n fields = fields.items()\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n filetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % filetype)\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY # what if no files are encoded\n return content_type, body", "def create_multipart_upload(ACL=None, Bucket=None, CacheControl=None, ContentDisposition=None, ContentEncoding=None, ContentLanguage=None, ContentType=None, Expires=None, GrantFullControl=None, GrantRead=None, GrantReadACP=None, GrantWriteACP=None, Key=None, Metadata=None, ServerSideEncryption=None, StorageClass=None, WebsiteRedirectLocation=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, SSEKMSKeyId=None, SSEKMSEncryptionContext=None, RequestPayer=None, Tagging=None, ObjectLockMode=None, ObjectLockRetainUntilDate=None, ObjectLockLegalHoldStatus=None):\n pass", "def _encode_multipart_formdata(self, fields, files):\r\n BOUNDARY = mimetools.choose_boundary()\r\n content = []\r\n\r\n fields = fields or {}\r\n files = files or {}\r\n\r\n for key in fields:\r\n content.append('--' + BOUNDARY + '\\r\\n')\r\n content.append('Content-Disposition: form-data; name=\"%s\"\\r\\n' % key)\r\n content.append('\\r\\n')\r\n content.append(fields[key])\r\n content.append('\\r\\n')\r\n\r\n for key in files:\r\n filename = files[key]['filename']\r\n value = files[key]['content']\r\n content.append('--' + BOUNDARY + '\\r\\n')\r\n content.append('Content-Disposition: form-data; name=\"%s\"; ' % key)\r\n content.append('filename=\"%s\"\\r\\n' % filename)\r\n content.append('\\r\\n')\r\n content.append(value)\r\n content.append('\\r\\n')\r\n\r\n content.append('--')\r\n content.append(BOUNDARY)\r\n content.append('--\\r\\n')\r\n content.append('\\r\\n')\r\n\r\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\r\n\r\n return content_type, ''.join(map(str, content))", "def call_with_multipart(self, path, fields=None, files=None):\r\n content_type, mr = self.encode_multipart_formdata(fields, files)\r\n return self.call_with(path, mr, content_type, mr.length())", "def encode_multipart(params_dict, boundary):\n data = []\n\n for k, v in params_dict.items():\n data.append('--%s' % boundary)\n data.append('Content-Disposition: form-data; name=\"%s\"\\r\\n' % k)\n data.append(v if isinstance(v, str) else v.decode('utf-8'))\n\n data.append('--%s--\\r\\n' % boundary)\n return '\\r\\n'.join(data)", "def encode_multipart_formdata(fields):\n\tBOUNDARY = '---------------------------473995594142710163552326102'\n\tCRLF = '\\r\\n'\n\tL = []\n\tfor (key, filename, value) in fields:\n\t\tL.append('--' + BOUNDARY)\n\t\tif filename is None:\n\t\t\tL.append('Content-Disposition: form-data; name=\"%s\"' % key)\n\t\telse:\n\t\t\tL.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n\t\t\tL.append('Content-Type: %s' % get_content_type(filename))\n\t\tL.append('')\n\t\tL.append(value)\n\tL.append('--' + BOUNDARY + '--')\n\tL.append('')\n\tbody = CRLF.join(L)\n\tcontent_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n\treturn content_type, body", "def encode_multipart(values):\n boundary = '-----------=_Part_%s%s' (time(), random())\n lines = []\n for key, value in values.iteritems():\n if isinstance(value, File):\n lines.extend((\n '--' + boundary,\n 'Content-Dispotion: form-data; name=\"%s\"; filename=\"%s\"' %\n (key, value.filename),\n 'Content-Type: ' + value.mimetype,\n '',\n value.read()\n ))\n else:\n lines.extend((\n '--' + boundary,\n 'Content-Dispotion: form-data; name=\"%s\"' % key,\n '',\n value\n ))\n lines.extend(('--' + boundary + '--', ''))\n return boundary, '\\r\\n'.join(lines)", "def encode_multipart_formdata(fields, files):\r\n # changed the boundary to be more similar to the perl script written by\r\n # Andreas\r\n BOUNDARY = 'xYzZY'\r\n CRLF = '\\r\\n'\r\n L = []\r\n for (key, value) in fields:\r\n L.append('--' + BOUNDARY)\r\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\r\n L.append('')\r\n L.append(value)\r\n for (key, filename, value) in files:\r\n L.append('--' + BOUNDARY)\r\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' %\r\n (key, filename))\r\n L.append('Content-Type: %s' % get_content_type(filename))\r\n L.append('')\r\n L.append(value)\r\n L.append('--' + BOUNDARY + '--')\r\n L.append('')\r\n body = CRLF.join(L)\r\n content_type = 'multipart/form-data'\r\n return content_type, body", "def pack_image(filename, contentname, max_size=1024, **params):\n # image must be less than 700kb in size\n try:\n if os.path.getsize(filename) > (max_size * 1024):\n raise Exception('File is too big, must be less than 700kb.')\n except os.error:\n raise Exception('Unable to access file')\n\n # image must be gif, jpeg, or png\n file_type = mimetypes.guess_type(filename)\n if file_type is None:\n raise Exception('Could not determine file type')\n file_type = file_type[0]\n if file_type.split('/')[0] != 'image':\n raise Exception('Invalid file type for image: %s' % file_type)\n\n # build the mulitpart-formdata body\n BOUNDARY = 'WeiboxQQAPI'\n body = []\n for key, val in params.items():\n if val is not None:\n body.append('--' + BOUNDARY)\n body.append('Content-Disposition: form-data; name=\"%s\"' % key)\n body.append('Content-Type: text/plain; charset=UTF-8')\n body.append('Content-Transfer-Encoding: 8bit')\n body.append('')\n body.append(QQWeiboAPIBase.convert_to_utf8_str(val))\n fp = open(filename, 'rb')\n body.append('--' + BOUNDARY)\n body.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (contentname, filename.encode('utf-8')))\n body.append('Content-Type: %s' % file_type)\n body.append('Content-Transfer-Encoding: binary')\n body.append('')\n body.append(fp.read())\n body.append('--%s--' % BOUNDARY)\n body.append('')\n fp.close()\n body.append('--%s--' % BOUNDARY)\n body.append('')\n \n body = '\\r\\n'.join(body)\n # build headers\n headers = {\n 'Content-Type': 'multipart/form-data; boundary=%s' % BOUNDARY,\n 'Content-Length': len(body)\n }\n\n return headers, body", "def encode_multipart_formdata(self, fields, files):\r\n if files is None:\r\n files = []\r\n if fields is None:\r\n fields = {}\r\n\r\n readers = []\r\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\r\n CRLF = '\\r\\n'\r\n L1 = []\r\n for key in fields:\r\n L1.append('--' + BOUNDARY)\r\n L1.append('Content-Disposition: form-data; name=\"%s\"' % key)\r\n L1.append('')\r\n L1.append(fields[key])\r\n b1 = CRLF.join(L1)\r\n readers.append(b1)\r\n\r\n for file_info in files:\r\n L = []\r\n L.append('')\r\n L.append('--' + BOUNDARY)\r\n disposition = \"Content-Disposition: form-data;\"\r\n filename = _qiniu_escape(file_info.get('filename'))\r\n L.append('%s name=\"file\"; filename=\"%s\"' % (disposition, filename))\r\n L.append('Content-Type: %s' %\r\n file_info.get('mime_type', 'application/octet-stream'))\r\n L.append('')\r\n L.append('')\r\n b2 = CRLF.join(L)\r\n readers.append(b2)\r\n\r\n data = file_info.get('data')\r\n readers.append(data)\r\n\r\n L3 = ['', '--' + BOUNDARY + '--', '']\r\n b3 = CRLF.join(L3)\r\n readers.append(b3)\r\n\r\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\r\n return content_type, MultiReader(readers)", "def build_body(self):\n # Build a list of lists, each containing \"lines\" of the\n # request. Each part is separated by a boundary string.\n # Once the list is built, return a string where each\n # line is separated by '\\r\\n'.\n parts = []\n part_boundary = '--' + self.boundary\n\n # Add the form fields\n parts.extend(\n [bytes(part_boundary.encode(self.charset)),\n bytes(('Content-Disposition: form-data; name=\"%s\"' % name).encode(self.charset))\n if PYTHON_VERSION_3 else ('Content-Disposition: form-data; name=\"%s\"' % name),\n bytes(('Content-Type: text/plain; charset=%s' % self.charset).encode(self.charset)),\n bytes(''.encode(self.charset)),\n bytes(value.encode(self.charset)) if PYTHON_VERSION_3 else value\n ]\n for name, value in self.form_fields\n )\n\n # Add the files to upload\n parts.extend(\n [bytes(part_boundary.encode(self.charset)),\n bytes(('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' %\n (field_name, filename)).encode(self.charset)) if PYTHON_VERSION_3 else\n ('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (field_name, filename)),\n bytes(('Content-Type: %s' % content_type).encode(self.charset)),\n bytes('Content-Transfer-Encoding: binary'.encode(self.charset)),\n bytes(''.encode(self.charset)),\n body,\n ]\n for field_name, filename, content_type, body in self.files\n )\n\n # Flatten the list and add closing boundary marker,\n # then return CR+LF separated data\n flattened = list(itertools.chain(*parts))\n flattened.append(bytes(('--' + self.boundary + '--').encode(self.charset)))\n flattened.append(bytes(''.encode(self.charset)))\n return bytes('\\r\\n'.encode(self.charset)).join(flattened)", "def test_post_file(self):\n inline_object = {}\n headers = { \n 'Accept': 'application/json',\n 'Content-Type': 'multipart/form-data',\n 'admin_passphrase': 'special-key',\n 'passphrase': 'special-key',\n }\n response = self.client.open(\n '/openapi/file',\n method='POST',\n headers=headers,\n data=json.dumps(inline_object),\n content_type='multipart/form-data')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def encode_multipart_formdata(fields, files):\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\n CRLF = b'\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = b''\n for l in L:\n if len(body) > 0:\n body = body + CRLF\n if isinstance(l, str):\n body = body + bytes(l, 'utf-8')\n else:\n body = body + l\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body", "def __PostFile(self, url, fileName, prefix):\n CRLF = '\\r\\n'\n\n f = open(fileName, \"rb\")\n content = f.read()\n boundary = \"-------------------------------\"+ \\\n \"\".join([ random.choice('0123456789') for x in range(28) ])\n\n output = []\n output.append(\"--\"+boundary)\n output.append('Content-Disposition: form-data; name=\"'+prefix+ \\\n '\"; filename=\"avatar.png\"')\n output.append('Content-Type: '+mimetypes.guess_type(fileName)[0] \\\n or 'application/octet-stream')\n output.append(\"\")\n output.append(content)\n output.append(\"--\"+boundary+\"--\")\n output.append(\"\")\n\n encoded = CRLF.join(output)\n\n conn = self.__GetConnection()\n headers = self.__MakeHeaders(True)\n\n conn.putrequest(\"POST\", url)\n for (k,v) in headers.iteritems():\n conn.putheader(k, v)\n\n conn.putheader(\"Content-Type\", \"multipart/form-data; boundary=\" + \\\n boundary)\n conn.putheader(\"Content-Length\", str(len(encoded)))\n\n conn.endheaders()\n conn.send(encoded)\n response = conn.getresponse()\n self.__CheckResponse(response)", "def initiate_multipart_upload(self):\n request = self.s3.create_request(\"OBJECT_POST\", uri = self.uri, headers = self.headers_baseline, extra = \"?uploads\")\n response = self.s3.send_request(request)\n data = response[\"data\"]\n self.upload_id = getTextFromXml(data, \"UploadId\")\n return self.upload_id", "def __encode_multipart_formdata(self, fields, files):\n BOUNDARY = fogbugz._make_boundary()\n\n if len(files) > 0:\n fields['nFileCount'] = str(len(files))\n\n crlf = '\\r\\n'\n buf = fogbugz.BytesIO()\n\n for k, v in fields.items():\n vcall = str\n if isinstance(v, unicode):\n vcall = unicode\n if fogbugz.DEBUG:\n print(\"field: %s: %s\"% (repr(k), repr(v)))\n lines = [\n '--' + BOUNDARY,\n 'Content-disposition: form-data; name=\"%s\"' % k,\n '',\n vcall(v),\n '',\n ]\n buf.write(crlf.join(lines).encode('utf-8'))\n\n n = 0\n for f, h in files.items():\n n += 1\n lines = [\n '--' + BOUNDARY,\n 'Content-disposition: form-data; name=\"File%d\"; '\n 'filename=\"%s\"' % (n, f),\n '',\n ]\n buf.write(crlf.join(lines).encode('utf-8'))\n lines = [\n 'Content-type: application/octet-stream',\n '',\n '',\n ]\n buf.write(crlf.join(lines).encode('utf-8'))\n buf.write(h.read())\n buf.write(crlf.encode('utf-8'))\n\n buf.write(('--' + BOUNDARY + '--' + crlf).encode('utf-8'))\n content_type = \"multipart/form-data; boundary=%s\" % BOUNDARY\n return content_type, buf.getvalue()", "def _make_request(self, payload, headers=None):\n pathparts = REQ_PATH.split(b\"/\")\n if pathparts[0] == b\"\":\n pathparts = pathparts[1:]\n dreq = DummyRequest(pathparts)\n dreq.requestHeaders = Headers(headers or {})\n dreq.responseCode = 200 # default to 200\n\n if isinstance(payload, dict):\n payload = json.dumps(payload)\n\n dreq.content = BytesIO(payload.encode())\n dreq.method = \"POST\"\n\n return dreq", "def encode_multipart_formdata(fields, files):\n BOUNDARY = mimetools.choose_boundary()\n CRLF = '\\r\\n'\n L = []\n for (key, value) in fields:\n #print key,value\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n #print key, filename\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n content_type = get_content_type(filename)\n #content_type='text/plain; charset=ascii'\n #content_type='application/octet-stream'\n L.append('Content-Type: %s' % content_type)\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body", "def encode_multipart_formdata(fields, files):\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\n CRLF = '\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body", "def encode_multipart_formdata(fields, files):\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\n CRLF = '\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body", "def encode_multipart_formdata(fields, files):\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\n CRLF = '\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body", "def encode_multipart_formdata(fields, files):\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\n CRLF = '\\r\\n'\n L = []\n for (key, value) in fields:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\n L.append('')\n L.append(value)\n for (key, filename, value) in files:\n L.append('--' + BOUNDARY)\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (key, filename))\n L.append('Content-Type: %s' % get_content_type(filename))\n L.append('')\n L.append(value)\n L.append('--' + BOUNDARY + '--')\n L.append('')\n body = CRLF.join(L)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body", "def encode_multipart_formdata(fields, boundary=None):\n body = io.BytesIO()\n if boundary is None:\n boundary = choose_boundary()\n\n for fieldname, value in iter_fields(fields):\n body.write(b'--%s\\r\\n' % (boundary))\n\n if isinstance(value, tuple):\n filename, data = value\n writer(body).write('Content-Disposition: form-data; name=\"%s\"; '\n 'filename=\"%s\"\\r\\n' % (fieldname, filename))\n body.write(b'Content-Type: %s\\r\\n\\r\\n' %\n (get_content_type(filename)))\n else:\n data = value\n writer(body).write('Content-Disposition: form-data; name=\"%s\"\\r\\n'\n % (fieldname))\n body.write(b'Content-Type: text/plain\\r\\n\\r\\n')\n\n if isinstance(data, int):\n data = str(data) # Backwards compatibility\n\n if isinstance(data, unicode):\n writer(body).write(data)\n else:\n body.write(data)\n\n body.write(b'\\r\\n')\n\n body.write(b'--%s--\\r\\n' % (boundary))\n\n content_type = b'multipart/form-data; boundary=%s' % boundary\n\n return body.getvalue(), content_type", "def post_multipart(host, fields, files, submit_to_server):\r\n content_type, body = encode_multipart_formdata(fields, files)\r\n h = httplib.HTTP(host)\r\n # needed to change the following url to be handled properly by MG-RAST\r\n h.putrequest('POST', 'http://metagenomics.anl.gov/qiime.cgi')\r\n h.putheader('Content-Type', content_type)\r\n h.putheader('Content-Length', str(len(body)))\r\n h.endheaders()\r\n\r\n # put a check in place for testing purposes on whether the data should be\r\n # posted on the MG-RAST website\r\n if submit_to_server:\r\n h.send(body)\r\n errcode, errmsg, headers = h.getreply()\r\n\r\n # verify the data was received by MG-RAST\r\n if errcode == 200:\r\n response = h.file.read()\r\n else:\r\n raise OSError(\r\n 'MG-RAST could not fulfill the request, which means that the server is unavailable!')\r\n else:\r\n response = body\r\n\r\n return response", "def encode_multipart_formdata(fields, files=()):\r\n BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'\r\n CRLF = '\\r\\n'\r\n L = []\r\n for key, value in fields.items():\r\n L.append('--' + BOUNDARY)\r\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\r\n L.append('')\r\n L.append(value)\r\n for (key, filename, value) in files:\r\n L.append('--' + BOUNDARY)\r\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' %\r\n (key, filename))\r\n content_type = mimetypes.guess_type(filename)[0] or DEFAULT_TYPE\r\n L.append('Content-Type: %s' % content_type)\r\n L.append('')\r\n L.append(value)\r\n L.append('--' + BOUNDARY + '--')\r\n L.append('')\r\n body = CRLF.join(L)\r\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\r\n return content_type, body", "def EncodeMultipartFormData(fields, files):\r\n BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'\r\n CRLF = '\\r\\n'\r\n lines = []\r\n for (key, value) in fields:\r\n lines.append('--' + BOUNDARY)\r\n lines.append('Content-Disposition: form-data; name=\"%s\"' % key)\r\n lines.append('')\r\n if isinstance(value, unicode):\r\n value = value.encode('utf-8')\r\n lines.append(value)\r\n for (key, filename, value) in files:\r\n lines.append('--' + BOUNDARY)\r\n lines.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' %\r\n (key, filename))\r\n lines.append('Content-Type: %s' % GetContentType(filename))\r\n lines.append('')\r\n if isinstance(value, unicode):\r\n value = value.encode('utf-8')\r\n lines.append(value)\r\n lines.append('--' + BOUNDARY + '--')\r\n lines.append('')\r\n body = CRLF.join(lines)\r\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\r\n return content_type, body", "def EncodeMultipartFormData(fields, files):\n BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'\n CRLF = '\\r\\n'\n lines = []\n for (key, value) in fields:\n lines.append('--' + BOUNDARY)\n lines.append('Content-Disposition: form-data; name=\"%s\"' % key)\n lines.append('')\n if type(value) == unicode:\n value = value.encode(\"utf-8\")\n lines.append(value)\n for (key, filename, value) in files:\n if type(filename) == unicode:\n filename = filename.encode(\"utf-8\")\n if type(value) == unicode:\n value = value.encode(\"utf-8\")\n lines.append('--' + BOUNDARY)\n lines.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' %\n (key, filename))\n lines.append('Content-Type: %s' % GetContentType(filename))\n lines.append('')\n lines.append(value)\n lines.append('--' + BOUNDARY + '--')\n lines.append('')\n body = CRLF.join(lines)\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\n return content_type, body", "def create(self, request, *args, **kwargs):\n file_obj = request.data['file']\n max_trip_distance = request.data.get('max_trip_distance')\n\n client = boto3.client('s3', config=BotocoreClientConfig(signature_version='s3v4'))\n\n organization = request.user.organization\n file_name = '{}.zip'.format(str(uuid4()))\n key = get_batch_shapefile_upload_path(organization.name, file_name).lstrip('/')\n\n response = client.upload_fileobj(file_obj, settings.AWS_STORAGE_BUCKET_NAME, key)\n print(response)\n url = client.generate_presigned_url(\n ClientMethod='get_object',\n Params={'Bucket': settings.AWS_STORAGE_BUCKET_NAME, 'Key': key}\n )\n async_task('pfb_analysis.tasks.create_batch_from_remote_shapefile',\n url,\n max_trip_distance=max_trip_distance,\n group='create_analysis_batch',\n ack_failure=True)\n\n return Response({\n 'shapefile_url': url,\n 'status': 'STARTED'\n }, status=status.HTTP_200_OK)", "def createRequest(test, url, headers=None):\n request = HTTPRequest(url=url)\n if headers: request.headers=headers\n test.record(request, HTTPRequest.getHttpMethodFilter())\n return request", "def post_multipart(host, selector, fields, files):\n content_type, body = encode_multipart_formdata(fields, files)\n\n h = httplib.HTTP(host)\n h.putrequest('POST', selector)\n h.putheader('content-type', content_type)\n h.putheader('content-length', str(len(body)))\n h.endheaders()\n h.send(body)\n \n errcode, errmsg, headers = h.getreply()\n \n print \"CODE=%d\"%errcode\n print \"ERRMSG=%s\"%errmsg\n print \"*** Headers ***\"\n print headers\n print \"*** End of Headers ***\"\n \n if 'Failed' in headers.get('Location'):\n print 'ERROR: Upload failed'\n sys.exit(-1)\n \n print 'Package successfully deployed'\n \n return h.file.read()", "def post(self, url, timeout=None, headers=None, files=None, **kwargs):\n helper = self._helper\n if helper is None:\n helper = ClientHelper(self)\n\n body = ''\n\n if headers is None:\n headers = {}\n\n if headers.get('Content-Type', '') == 'application/x-www-form-urlencoded' and files:\n raise ValueError(\"Cannot send files with Content-Type \"\n \"'application/x-www-form-urlencoded'.\")\n\n if files:\n headers['Content-Type'] = 'multipart/form-data'\n elif not 'Content-Type' in headers:\n headers['Content-Type'] = 'application/x-www-form-urlencoded'\n\n if headers['Content-Type'] == 'multipart/form-data':\n boundary, body = encode_multipart(kwargs, files)\n\n headers['Content-Type'] = 'multipart/form-data; boundary=%s' % \\\n boundary\n\n elif kwargs:\n body = urllib.urlencode(kwargs, True)\n\n helper.requests.append(self._add_request('POST', url, headers, body,\n timeout))\n\n return helper", "def encode_multipart_formdata(fields, files):\r\n BOUNDARY = \"------8f8289fwur280hfoit9073u89428h\"\r\n CRLF = '\\r\\n'\r\n L = []\r\n for (key, value) in fields.items():\r\n L.append('--' + BOUNDARY)\r\n L.append('Content-Disposition: form-data; name=\"%s\"' % key)\r\n L.append('')\r\n L.append(value)\r\n for (filename, content) in files.items():\r\n L.append('--' + BOUNDARY)\r\n L.append('Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (filename, filename))\r\n L.append('Content-Type: %s' % get_content_type(filename))\r\n L.append('')\r\n L.append(content)\r\n L.append('--' + BOUNDARY + '--')\r\n L.append('')\r\n body = CRLF.join(L)\r\n content_type = 'multipart/form-data; boundary=%s' % BOUNDARY\r\n return content_type, body", "def build_request(url, headers, body, initial_request: Request) -> Request:\n updated_request = Request(\n method=initial_request.method,\n url=url,\n headers=headers,\n content=body\n )\n\n if hasattr(initial_request, 'extensions'):\n updated_request.extensions = initial_request.extensions\n\n return updated_request", "def post_multipart(host, selector, fields):\n\treturn post_multipart_formdata(host, selector, fields)[3]", "def create_multipart(self, bucket_name, object_name):\n\n return h3lib.create_multipart(self._handle, bucket_name, object_name, self._user_id)", "def post(self, path, data={}, multipart=False, **kwargs):\n body = None\n headers = {}\n if multipart:\n body, content_type = encode_multipart_formdata(data)\n headers[\"Content-Type\"] = content_type\n else:\n body = urllib.urlencode(data, doseq=True)\n\n if 'headers' in kwargs:\n kwargs['headers'].update(headers)\n\n return self.fetch(\n path,\n method=\"POST\",\n body=body,\n headers=headers,\n **kwargs\n )", "def _create_batch_file_handle_copy_request(\n file_handle_ids, obj_types, obj_ids, new_con_types, new_file_names\n):\n copy_file_handle_request = {\"copyRequests\": []}\n for (\n file_handle_id,\n obj_type,\n obj_id,\n new_con_type,\n new_file_name,\n ) in itertools.zip_longest(\n file_handle_ids, obj_types, obj_ids, new_con_types, new_file_names\n ):\n # construct JSON object for REST call\n curr_dict = {\n \"originalFile\": {\n \"fileHandleId\": file_handle_id,\n \"associateObjectId\": obj_id,\n \"associateObjectType\": obj_type,\n },\n \"newContentType\": new_con_type,\n \"newFileName\": new_file_name,\n }\n\n # add copy request to list of requests\n copy_file_handle_request[\"copyRequests\"].append(curr_dict)\n return copy_file_handle_request", "def request(\n name: str,\n *,\n mime_type: str,\n parent_folder_id: drive_api.ResourceID,\n drive_service: Optional[discovery.Resource] = None,\n) -> http.HttpRequest:\n if drive_service is None:\n drive_service = drive_api.build_service()\n file_metadata = {\n \"name\": name,\n \"mimeType\": f\"application/vnd.google-apps.{mime_type}\",\n \"parents\": [parent_folder_id],\n }\n return drive_service.files().create(body=file_metadata, fields=\"id\")", "async def _multipart_upload_from_buffer(self):\n # check to see if bucket needs to be created\n if self._create_bucket:\n # check whether the bucket exists\n bucket_list = await self._get_bucket_list()\n if not self._bucket in bucket_list:\n await self._conn_obj.conn.create_bucket(Bucket=self._bucket)\n\n # if the current part is 1 we have to create the multipart upload\n if self._current_part == 1:\n response = await self._conn_obj.conn.create_multipart_upload(\n Bucket = self._bucket,\n Key = self._path\n )\n self._upload_id = response['UploadId']\n # we need to keep a track of the multipart info\n self._multipart_info = {'Parts' : []}\n\n # upload from a buffer - do we need to split into more than one\n # multiparts?\n new_buffer = []\n for buffer_part in range(0, len(self._buffer)):\n # is the current part of the buffer larger than the maximum\n # upload size? split if it is\n data_buf = self._buffer[buffer_part]\n data_len = data_buf.tell()\n if data_len >= self._part_size:\n data_buf.seek(0)\n data_pos = 0\n # split the file up\n while data_pos < data_len:\n new_buffer.append(io.BytesIO())\n # copy the data - don't overstep the buffer\n if data_pos + self._part_size >= data_len:\n sub_data = data_buf.read(data_len-data_pos)\n else:\n sub_data = data_buf.read(\n self._part_size\n )\n new_buffer[-1].write(sub_data)\n # increment to next\n data_pos += self._part_size\n\n # free the old memory\n self._buffer[buffer_part].close()\n else:\n # copy the old buffer into a new one\n self._buffer[buffer_part].seek(0)\n new_buffer.append(io.BytesIO(self._buffer[buffer_part].read()))\n\n # close other buffers first\n for b in self._buffer:\n b.close()\n self._buffer = new_buffer\n\n tasks = []\n\n for buffer_part in range(0, len(self._buffer)):\n # seek in the BytesIO buffer to get to the beginning after the\n # writing\n self._buffer[buffer_part].seek(0)\n # upload here\n # schedule the uploads\n event_loop = asyncio.get_event_loop()\n task = event_loop.create_task(self._conn_obj.conn.upload_part(\n Bucket=self._bucket,\n Key=self._path,\n UploadId=self._upload_id,\n PartNumber=self._current_part + buffer_part,\n Body=self._buffer[buffer_part]\n ))\n tasks.append(task)\n\n # await the completion of the uploads\n res = await asyncio.gather(*tasks)\n for buffer_part in range(0, len(self._buffer)):\n # insert into the multipart info list of dictionaries\n part = res[buffer_part]\n self._multipart_info['Parts'].append(\n {\n 'PartNumber' : self._current_part + buffer_part,\n 'ETag' : part['ETag']\n }\n )\n\n # add the total number of uploads to the current part\n self._current_part += len(self._buffer)\n\n # reset all the byte buffers and their positions\n for buffer_part in range(0, len(self._buffer)):\n self._buffer[buffer_part].close()\n self._buffer = [io.BytesIO()]\n self._seek_pos = 0", "def _createPostRequest(self, postBody: dict) -> object:\n request = HttpRequest()\n request.method = \"POST\"\n for name,value in postBody.items():\n request.POST[name]= value\n return request", "def _CreateRequest(self, url, data=None):\r\n logging.debug(\"Creating request for: '%s' with payload:\\n%s\", url, data)\r\n req = urllib2.Request(url, data=data, headers={\"Accept\": \"text/plain\"})\r\n if self.host_override:\r\n req.add_header(\"Host\", self.host_override)\r\n for key, value in self.extra_headers.iteritems():\r\n req.add_header(key, value)\r\n return req", "def create_http_request(self) -> str:\n\n if self.http_command == \"PUT\" or self.http_command == \"POST\":\n ctype = \"text/html\"\n body = input(\"Enter data to insert: \")\n clength = len(body.encode(HttpClient.FORMAT))\n msg = self.http_command + \" \" + self.file_name + \" HTTP/1.1\\r\\nHost: \" + str(self.uri) \\\n + \"\\r\\nConnection: close\" \\\n + \"\\r\\nContent-Type: \" + ctype \\\n + \"\\r\\nContent-Length: \" + str(clength) + \"\\r\\n\\r\\n\" + body + \"\\r\\n\"\n else:\n msg = self.http_command + \" \" + self.file_name + \" HTTP/1.1\\r\\nHost: \" + str(self.uri) \\\n + \"\\r\\n\\r\\n\"\n\n # + \"\\r\\nIf-Modified-Since: 30 Mar 2021 23:24:50 GMT\"\n\n return msg", "def request_REQMOD(self):\n if not self.server.auth_manager.is_setup(): # Authentication manager is not set up\n self.send_error(403, message='error')\n return\n awss = AWSSignature()\n self.set_icap_response(200)\n\n self.set_enc_request(b' '.join(self.enc_req))\n for h in self.enc_req_headers:\n for v in self.enc_req_headers[h]:\n self.set_enc_header(h, v)\n\n content = b''\n if not self.has_body:\n if self.enc_req[0] == b'DELETE': # DeleteObject operation\n self.pending_urls[('DELETE', self.enc_req[1].decode())] = ''\n\n if self.enc_req[0] == b'PUT' and b'x-amz-copy-source' in self.enc_req_headers:\n # Object duplication, copy object key from existing record\n bucket_name = re.search(\"(?<=^\\/)[^\\/]+\", self.enc_req_headers[b'x-amz-copy-source'][0].decode()).group()\n key = re.search(\"^.*\\.s3.*\\.amazonaws.com\", self.enc_req[1].decode()).group() + \\\n re.search(\"(?<=^\\/{})\\/.*\".format(bucket_name), self.enc_req_headers[b'x-amz-copy-source'][0].decode()).group()\n key = re.sub(\"(?<=^https:\\/\\/)[a-z0-9]+(?=.s3)\", bucket_name, key)\n pending_entry = {\n 'source': key,\n 'new_path': self.enc_req[1].decode()\n }\n self.pending_urls[('PUT', self.enc_req[1].decode())] = pending_entry\n\n if self.enc_req[0] == b'POST' and b'?uploads' in self.enc_req[1]:\n # New multipart upload (CreateMultipartUpload)\n object_key = re.search(\"^.*(?=\\?)\", self.enc_req[1].decode()).group()\n self.pending_urls[('POST', self.enc_req[1].decode())] = {'multipart': 1,\n 'object_key': object_key}\n\n self.send_headers(False)\n return\n if self.preview:\n prevbuf = b''\n while True:\n chunk = self.read_chunk()\n if chunk == b'':\n break\n prevbuf += chunk\n if self.ieof:\n self.send_headers(True)\n if len(prevbuf) > 0:\n self.write_chunk(prevbuf)\n self.write_chunk(b'')\n return\n self.cont()\n self.send_headers(True)\n if len(prevbuf) > 0:\n self.write_chunk(prevbuf)\n while True:\n chunk = self.read_chunk()\n self.write_chunk(chunk)\n if chunk == b'':\n break\n else:\n\n while True: # Read request body\n chunk = self.read_chunk()\n content += chunk\n if chunk == b'':\n break\n\n not_object_upload = re.search(\"\\.s3\\.(.+)\\.amazonaws\\.com/\\?\", self.enc_req[1].decode())\n is_multipart_upload = re.search(\"^.*\\.s3\\..*\\.amazonaws.com\\/.*\\?partNumber=\", self.enc_req[1].decode()) is not None\n complete_multipart_request = re.search(\"s3\\..*\\.amazonaws\\.com\\/.*\\?uploadId=(?!.*partNumber=)\",\n self.enc_req[1].decode()) is not None\n if self.enc_req[0] == b'PUT' and not_object_upload is None and not is_multipart_upload:\n # Single object upload\n cp = AESCipher()\n enc_tup = cp.encrypt(content)\n encrypted_content_length = str(len(enc_tup[1])).encode()\n\n # Update these headers if they exist\n if b'content-md5' in self.enc_headers:\n self.enc_headers.pop(b'content-md5')\n encrypted_content_md5 = hashlib.md5(enc_tup[1]).hexdigest()\n self.set_enc_header(b'content-md5', encrypted_content_md5.encode())\n elif b'x-amz-content-sha256' in self.enc_headers:\n self.enc_headers.pop(b'x-amz-content-sha256')\n encrypted_content_sha256 = sha256(enc_tup[1]).hexdigest()\n self.set_enc_header(b'x-amz-content-sha256', encrypted_content_sha256.encode())\n\n self.enc_headers.pop(b'content-length')\n self.set_enc_header(b'content-length', encrypted_content_length)\n\n if b'content-type' in self.enc_headers: # Binary files have no content-type header\n self.enc_headers.pop(b'content-type')\n self.set_enc_header(b'content-type', b'')\n\n content = enc_tup[1]\n key = enc_tup[0]\n #payload_hash = sha256(enc_tup[1]).hexdigest()\n\n sig = awss.gen_signature(request=self.enc_req, headers=self.enc_headers) # Generate AWS signature\n self.set_enc_request(b'PUT ' + sig['url'].encode() + b' HTTP/1.1') # Update URL of request\n\n if b'authorization' in self.enc_headers:\n # Header should always be present if authenticating with header option\n self.enc_headers.pop(b'authorization')\n self.set_enc_header(b'authorization', sig['authorization-header'].encode())\n\n self.pending_urls[('PUT', sig['url'])] = {'key': key['key'], 'nonce': key['nonce'], 'tag': key['tag']}\n\n elif self.enc_req[0] == b'PUT' and is_multipart_upload:\n # Multipart upload\n cp = AESCipher()\n object_key = re.search(\"^.*\\.s3\\..*\\.amazonaws.com\\/.*(?=\\?)\", self.enc_req[1].decode()).group()\n part_num = re.search(\"(?<=partNumber=)[0-9]+\", self.enc_req[1].decode()).group()\n upload_id = re.search(\"(?<=uploadId=)[^&]+\", self.enc_req[1].decode()).group()\n file_params = self.server.auth_manager.get_object(object_key) # Get key from existing record\n params_key = base64.b64decode(file_params.key)\n params_nonce = base64.b64decode(file_params.nonce)\n enc_tup = cp.encrypt(content, key=params_key, nonce=params_nonce)\n\n content = enc_tup[1]\n part_length = len(enc_tup[1])\n\n if b'content-md5' in self.enc_headers:\n self.enc_headers.pop(b'content-md5')\n encrypted_content_md5 = base64.b64encode(hashlib.md5(enc_tup[1]).digest())\n self.set_enc_header(b'content-md5', encrypted_content_md5)\n if b'x-amz-content-sha256' in self.enc_headers:\n self.enc_headers.pop(b'x-amz-content-sha256')\n encrypted_content_sha256 = sha256(enc_tup[1]).hexdigest()\n self.set_enc_header(b'x-amz-content-sha256', encrypted_content_sha256.encode())\n\n self.enc_headers.pop(b'content-length')\n self.set_enc_header(b'content-length', str(part_length).encode())\n\n if b'content-type' in self.enc_headers: # binary files have no content-type header\n self.enc_headers.pop(b'content-type')\n self.set_enc_header(b'content-type', b'')\n\n sig = awss.gen_signature(request=self.enc_req, headers=self.enc_headers)\n self.set_enc_request(b'PUT ' + sig['url'].encode() + b' HTTP/1.1')\n if b'authorization' in self.enc_headers:\n self.enc_headers.pop(b'authorization')\n self.set_enc_header(b'authorization', sig['authorization-header'].encode())\n self.pending_urls[('PUT', sig['url'])] = { 'part_num': part_num,\n 'part_length': part_length,\n 'part_tag': enc_tup[0]['tag'],\n 'upload_id': upload_id\n }\n\n elif self.enc_req[0] == b'POST' and complete_multipart_request is True:\n # Complete multipart upload request\n xmldata = et.fromstring(content.decode())\n valid_parts = xmldata.findall(\"./{*}Part/{*}PartNumber\")\n valid_parts = [x.text for x in valid_parts]\n upload_id = re.search(\"(?<=\\?uploadId=)([^\\&]*)\", self.enc_req[1].decode()).group()\n all_parts = self.server.auth_manager.get_object_parts(upload_id=upload_id)\n\n for part in all_parts:\n # Remove invalid parts not referenced by CompleteMultiPartUpload\n if str(part['part_num']) not in valid_parts:\n self.server.auth_manager.delete_part(upload_id=upload_id, part_num=part['part_num'])\n\n self.send_headers(True)\n self.write_chunk(content)", "def post_file(self, method, params, headers=None):\n\n post_params = {'merge': params['merge']}\n file_data = open(params['full_file_path'], 'rb').read() \n files = {'file': (params['file_name'], file_data)} \n r = requests.post(self.endpoint + method, files=files, params=post_params,\n auth=(self.session.auth[0], self.session.auth[1]), headers=headers)\n if 400 <= r.status_code < 600:\n r.reason = r.text\n r.raise_for_status()\n return r", "def create_request(params={}, path='/', method='POST'):\n request = DummyRequest(path)\n request.method = method\n request.args = params\n return request", "def __init__(self, mpu, original_size, download, chunk_size, min_chunk, max_chunk):\n super(ChunkedMultipartUpload, self).__init__(mpu)\n self._mpu = mpu\n self._original_size = original_size\n self._download = download\n self._chunk_size = chunk_size\n self._partial_chunks = {}\n self._min_chunk = min_chunk\n self._max_chunk = max_chunk", "def test_upload_file(self):\n data = dict(additional_metadata='additional_metadata_example',\n file='file_example')\n response = self.client.open(\n '/pet/{petId}/uploadImage'.format(pet_id=789),\n method='POST',\n data=data,\n content_type='multipart/form-data')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def multipart_stream(self, metadata, source_path):\n\n boundary = self.MULTIPART_BOUNDARY\n\n yield str.encode('--%s\\r\\nContent-Disposition: form-data; '\n 'name=\"metadata\"\\r\\n\\r\\n' % boundary +\n '%s\\r\\n' % json.dumps(metadata) +\n '--%s\\r\\n' % boundary)\n yield b'Content-Disposition: form-data; name=\"content\"; filename=\"i_love_backups\"\\r\\n'\n yield b'Content-Type: application/octet-stream\\r\\n\\r\\n'\n\n with source_path.open() as stream:\n while True:\n f = stream.read(DEFAULT_BUFFER_SIZE)\n if f:\n yield f\n else:\n break\n\n yield str.encode('\\r\\n--%s--\\r\\n' % boundary +\n 'multipart/form-data; boundary=%s' % boundary)", "def multipart_nested():\n msg = MIMEMultipart(\"mixed\")\n msg[\"From\"] = sender\n msg[\"To\"] = recipient\n msg[\"Subject\"] = \"Nested multipart email\"\n\n part_1 = MIMEMultipart(\"alternative\")\n part_1_text = MIMEText(\"This is the **first** part\\n\", \"plain\")\n part_1_html = MIMEText(\"This is the <strong>first</strong> part\\n\", \"html\")\n part_1.attach(part_1_text)\n part_1.attach(part_1_html)\n\n part_2 = MIMEText(\"This is the second part\\n\", \"plain\")\n\n msg.attach(part_1)\n msg.attach(part_2)\n\n return msg", "def _CreateRequest(self, url, data=None):\n logging.debug(\"Creating request for: '%s' with payload:\\n%s\", url, data)\n req = urllib2.Request(url, data=data)\n if self.host_override:\n req.add_header(\"Host\", self.host_override)\n for key, value in self.extra_headers.iteritems():\n req.add_header(key, value)\n return req", "def _request(http, project, method, data, base_url, client_info):\n user_agent = client_info.to_user_agent()\n headers = {\n \"Content-Type\": \"application/x-protobuf\",\n \"User-Agent\": user_agent,\n connection_module.CLIENT_INFO_HEADER: user_agent,\n }\n api_url = build_api_url(project, method, base_url)\n\n response = http.request(url=api_url, method=\"POST\", headers=headers, data=data)\n\n if response.status_code != 200:\n error_status = status_pb2.Status.FromString(response.content)\n raise exceptions.from_http_status(\n response.status_code, error_status.message, errors=[error_status]\n )\n\n return response.content", "def _do_post(self, url, **kwargs):\n #TODO:\n # Add error handling. Check for HTTP status here would be much more conveinent than in each calling method\n scaleioapi_post_headers = {'Content-type':'application/json','Version':'1.0'}\n self.logger.debug(\"_do_post()\")\n\n if kwargs:\n for key, value in kwargs.iteritems():\n if key == 'headers':\n scaleio_post_headers = value\n print \"Adding custom POST headers\"\n if key == 'files':\n upl_files = value\n print \"Adding files to upload\"\n try:\n response = self._session.post(url, headers=scaleioapi_post_headers, verify_ssl=self._im_verify_ssl, files=upl_files)\n self.logger.debug(\"_do_post() - Response: \" + \"{}\".format(response.text))\n if response.status_code == requests.codes.ok:\n return response\n else:\n self.logger.error(\"_do_post() - Response Code: \" + \"{}\".format(response.status_code))\n raise RuntimeError(\"_do_post() - HTTP response error\" + response.status_code)\n except:\n raise RuntimeError(\"_do_post() - Communication error with ScaleIO gateway\")\n return response", "def handle_request_upload(self, msg):\n\n\t\tdirect_response = not msg.arguments or msg.arguments[0] in ('', '/')\n\t\tresult = []\n\t\tfor file_obj in msg.options:\n\t\t\ttmpfilename, filename, name = file_obj['tmpfile'], file_obj['filename'], file_obj['name']\n\n\t\t\t# limit files to tmpdir\n\t\t\tif not os.path.realpath(tmpfilename).startswith(TEMPUPLOADDIR):\n\t\t\t\traise BadRequest('invalid file: invalid path')\n\n\t\t\t# check if file exists\n\t\t\tif not os.path.isfile(tmpfilename):\n\t\t\t\traise BadRequest('invalid file: file does not exists')\n\n\t\t\t# don't accept files bigger than umc/server/upload/max\n\t\t\tst = os.stat(tmpfilename)\n\t\t\tmax_size = int(ucr.get('umc/server/upload/max', 64)) * 1024\n\t\t\tif st.st_size > max_size:\n\t\t\t\tos.remove(tmpfilename)\n\t\t\t\traise BadRequest('filesize is too large, maximum allowed filesize is %d' % (max_size,))\n\n\t\t\tif direct_response:\n\t\t\t\twith open(tmpfilename) as buf:\n\t\t\t\t\tb64buf = base64.b64encode(buf.read())\n\t\t\t\tresult.append({'filename': filename, 'name': name, 'content': b64buf})\n\n\t\tif direct_response:\n\t\t\tself.finished(msg.id, result)\n\t\telse:\n\t\t\tself.handle_request_command(msg)", "def _PostRequest(self, data=None):\n # requests will use about 3 times of data size's memory.\n req = requests.Request(\n 'POST',\n url=self._target_url,\n headers={'Multi-Event': 'True',\n 'Node-ID': str(self.GetNodeID())},\n files=data).prepare()\n clen = int(req.headers.get('Content-Length'))\n # Checks the size of request, and doesn't send if bigger than maximum size.\n if clen > self._max_bytes:\n return (413, 'Request Entity Too Large: The request is bigger '\n 'than %d bytes' % self._max_bytes, clen)\n resp = requests.Session().send(req, timeout=http_common.HTTP_TIMEOUT)\n if resp.headers['Maximum-Bytes']:\n self._max_bytes = int(resp.headers['Maximum-Bytes'])\n return resp.status_code, resp.reason, clen", "def prepare(self):\n if self.request.method.upper() == 'POST':\n if 'expected_size' in self.request.arguments:\n self.request.connection.set_max_body_size(\n int(self.get_argument('expected_size')))\n try:\n total = int(self.request.headers.get(\"Content-Length\", \"0\"))\n except KeyError:\n total = 0\n self.multipart_streamer = MultiPartStreamer(total)", "def create(self) -> requests.request:\n # Check needed values\n if None in [self.args.type, self.args.title, self.args.label, self.args.url]:\n raise Exception('Provide all parameters for asset creation')\n # Check type\n if self.args.type not in ['photo', 'video']:\n raise Exception('Asset can only be of type photo or video')\n\n # Check URL validity\n if self.check_url_invalidity():\n raise Exception('Provided URL is not valid')\n\n # Send POST request\n return requests.post(\n self.REQUEST_URL,\n {'type': self.args.type, 'title': self.args.title, 'label': self.args.label, 'url': self.args.url}\n )", "def parse_post(request):\n\n fp = StringIO(request.raw_body)\n\n headers = {}\n headers['content-type'] = request.message.get('content-type')\n headers['content-length'] = request.message.get('content-length')\n\n environ = {}\n environ['REQUEST_METHOD'] = request.method\n\n boundary = request.message.get('boundary')\n\n post = cgi.FieldStorage( fp = fp\n , headers = headers\n , outerboundary = boundary\n , environ = environ\n , keep_blank_values = True\n , strict_parsing = False\n )\n\n return post", "def do_request(self, url, in_data=None, in_file_dict=None):\n url_string=url\n logger.debug(\n \"do_request request string: {string}\".format(string=url_string)\n )\n response=requests.post(\n url_string,\n data=in_data,\n files=in_file_dict,\n cookies=self.cookies #For Authentication!\n )\n return response", "def request(self, url, data=None, params={}, files=None):\n params['token'] = self.token\n request = self.make_request(url, data=data, params=params, files=files)\n return request", "def _GenHttpRequestProto(self):\n request = jobs_pb2.HttpRequest()\n request.source_ip = \"127.0.0.1\"\n request.user_agent = \"Firefox or something\"\n request.url = \"http://test.com/test?omg=11%45x%20%20\"\n request.user = \"anonymous\"\n request.timestamp = int(time.time() * 1e6)\n request.size = 1000\n return request", "def from_response(cls, response: ClientResponse) -> MultipartResponseWrapper:\n ...", "def create(app, client_stream, client_addr, client_sock=None):\n # request line\n line = Request._safe_readline(client_stream).strip().decode()\n if not line:\n return None\n method, url, http_version = line.split()\n http_version = http_version.split('/', 1)[1]\n\n # headers\n headers = NoCaseDict()\n while True:\n line = Request._safe_readline(client_stream).strip().decode()\n if line == '':\n break\n header, value = line.split(':', 1)\n value = value.strip()\n headers[header] = value\n\n return Request(app, client_addr, method, url, http_version, headers,\n stream=client_stream, sock=client_sock)", "def upload(api_token, base_url, upload_file, metadata):\n\n upload_url = f\"{base_url}data_files/api_create?auth_token={api_token}\"\n files = {'file': open(upload_file, 'rb')}\n response = requests.post(upload_url, files=files, data=metadata)\n\n # Print out the outcome of the upload\n if response.status_code == 200:\n print(f'File {upload_file} successfully uploaded to HIEv')\n else:\n print(\n f'ERROR - There was a problem uploading file {upload_file} to HIEv')", "def initiate_multipart_upload(self, key_name, headers=None,\r\n reduced_redundancy=False,\r\n metadata=None, encrypt_key=False):\r\n query_args = 'uploads'\r\n provider = self.connection.provider\r\n if headers is None:\r\n headers = {}\r\n if reduced_redundancy:\r\n storage_class_header = provider.storage_class_header\r\n if storage_class_header:\r\n headers[storage_class_header] = 'REDUCED_REDUNDANCY'\r\n # TODO: what if the provider doesn't support reduced redundancy?\r\n # (see boto.s3.key.Key.set_contents_from_file)\r\n if encrypt_key:\r\n headers[provider.server_side_encryption_header] = 'AES256'\r\n if metadata is None:\r\n metadata = {}\r\n\r\n headers = boto.utils.merge_meta(headers, metadata,\r\n self.connection.provider)\r\n response = self.connection.make_request('POST', self.name, key_name,\r\n query_args=query_args,\r\n headers=headers)\r\n body = response.read()\r\n boto.log.debug(body)\r\n if response.status == 200:\r\n resp = MultiPartUpload(self)\r\n h = handler.XmlHandler(resp, self)\r\n xml.sax.parseString(body, h)\r\n return resp\r\n else:\r\n raise self.connection.provider.storage_response_error(\r\n response.status, response.reason, body)", "def dorequest( request, body=None, chunk=None, trailers=None ):", "def make_request(self, url, base_uri=None, params=None, auth=REQUIRED, method=\"GET\", silo=False, **kwargs):\n\n params = params or dict()\n body = kwargs.get('body', '')\n headers = {'User-Agent': 'python-photobucket/0.2 (Language=Python)', 'Content-type':'application/x-www-form-urlencoded'}\n headers.update(kwargs.get('extra_headers', {}))\n # Unless explicitly provided, set the default response format to json.\n params.setdefault('format', 'json')\n if 'id' in params:\n params['id'] = self.clean_identifier(params['id'])\n # Remove all params with a value of \"None\"\n params = remove_empty(params)\n\n # Begin auth stuff...\n token = None\n consumer = OAuthConsumer(key=self.key, secret=self.secret)\n if auth in (REQUIRED, OPTIONAL):\n # Setup the oauth token\n try:\n token = Token(key=self.token, secret=self.token_secret)\n except ValueError, e:\n if auth == REQUIRED:\n # Only raise the exception if auth is required.\n raise PhotobucketAPIError(\"Token and Token secret must be set.\")\n\n # Give priority to base_uri since its a quick override of class.URI\n req_uri = \"%s%s\" % (base_uri or self.URI, url)\n\n if silo:\n # This request has to be sent to a specific \"silo\" or \"subdomain\".\n uri = \"http://%s%s\" % (self.subdomain, req_uri)\n # Don't allow redirects if this is to be sent to a specific silo.\n # For in photobucket's own words..\n # \"Photobucket ultimately prefers that you use the information given, rather than relying on the redirects\"\n allow_redirects = False\n else:\n uri = \"http://%s%s\" % (self.DOMAIN, req_uri)\n allow_redirects = True\n req = OAuthRequest.from_consumer_and_token(consumer, token, method, uri, parameters=params, body=body)\n\n # Make sure to ALWAYS pass the main domain to the signature instead of the actual url to be requested.\n req.normalized_url = \"http://%s%s\" % (self.DOMAIN, req_uri)\n req.sign_request(SignatureMethod_HMAC_SHA1(), consumer, token)\n\n try:\n # I do this to take advantage of the already defined requests and their default values.\n response = getattr(requests, method.lower())(req.to_url(), headers=headers, allow_redirects=allow_redirects)\n response.raise_for_status(allow_redirects=allow_redirects)\n except AttributeError:\n raise PhotobucketAPIError('Invalid Http method')\n except HTTPError, e:\n # This whole handling is still in Beta. \n # Because I'm still deciding on whether to keep it \n # or use \"safe_mode\" for all \"POST\" requests. To take advantage of Photobucket's redirect.\n # Suggestions are more than welcome...\n if e.response.status_code == REDIRECT:\n # Need to catch a redirect error because that means that user sent a request\n # without a \"silo\" so it needs to be stored.\n content = self.parse_response(e.response.content, params['format'])\n # Not too sure about this...\n self.subdomain = content['content']['subdomain'].split('//')[1]\n return self.make_request(url, base_uri, params, auth, method, silo, **kwargs)\n error = PhotobucketError(e.message)\n error.response = e.response\n raise error\n return response", "async def create_upload_file(file: UploadFile = File(...)):\n return dict(filename=file.filename, content_type=file.content_type)", "def post_upload(self, url, file_path, metadata):\n full_url = self.api_url + starts_slash(ends_slash(url))\n headers = {\"Authorization\": \"Bearer \" + self.token}\n body = {\"metadata\": json.dumps(metadata)}\n logging.info(\"POST url: \" + str(full_url))\n logging.info(\"POST header: \" + str(headers))\n logging.info(\"POST body: \" + str(body))\n filedata = None\n if isfile(file_path):\n filedata = {\"filedata\": open(file_path, \"rb\")}\n result = requests.post(url=full_url, headers=headers,\n files=filedata, data=body).json()\n logging.info(\"POST result: \"+str(result))\n return result", "def test_upload_photo(self):\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.normal_token.key)\n\n url = reverse('crew-api:upload-photo', args=[self.normal_user.crew.uuid])\n\n photo_file = self.generate_photo_file()\n\n data = {\n 'photo':photo_file\n }\n\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def upload_file():\r\n # Define an image object\r\n image_path = r'F:\\Testing_Development\\Projects\\Interface_requests\\Interface_requests\\upload_files\\Napoleon Bonaparte.jpg'\r\n file = {'file': open('Napoleon Bonaparte.jpg', 'rb')}\r\n # response = requests.post(base_url + '/post', files=file, timeout=3)\r\n response = requests.post(base_url + '/post', files=file)\r\n print(response.status_code)\r\n print(response.text)", "def gform_request(\n file_name: str,\n *,\n parent_folder_id: drive_api.ResourceID,\n drive_service: Optional[discovery.Resource] = None,\n) -> http.HttpRequest:\n return request(\n name=file_name,\n mime_type=mime_types.gform,\n parent_folder_id=parent_folder_id,\n drive_service=drive_service,\n )", "def complete_multipart_upload(Bucket=None, Key=None, MultipartUpload=None, UploadId=None, RequestPayer=None):\n pass", "def post(self, request, work_batch_id):\n\n from sentry.models.workbatch import WorkBatch\n\n try:\n work_batch = WorkBatch.objects.get(pk=int(work_batch_id))\n except WorkBatch.DoesNotExist:\n raise ResourceDoesNotExist\n\n logger = logging.getLogger('clims.files')\n logger.info('workbatchfile.start')\n\n if 'file' not in request.data:\n return Response({'detail': 'Missing uploaded file'}, status=400)\n\n fileobj = request.data['file']\n\n full_name = request.data.get('name', fileobj.name)\n if not full_name or full_name == 'file':\n return Response({'detail': 'File name must be specified'}, status=400)\n\n name = full_name.rsplit('/', 1)[-1]\n\n if _filename_re.search(name):\n return Response(\n {\n 'detail': 'File name must not contain special whitespace characters'\n }, status=400\n )\n\n headers = {\n 'Content-Type': fileobj.content_type,\n }\n for headerval in request.data.getlist('header') or ():\n try:\n k, v = headerval.split(':', 1)\n except ValueError:\n return Response({'detail': 'header value was not formatted correctly'}, status=400)\n else:\n if _filename_re.search(v):\n return Response(\n {\n 'detail': 'header value must not contain special whitespace characters'\n },\n status=400\n )\n headers[k] = v.strip()\n\n file = File.objects.create(\n name=name,\n type='work_batch.file',\n headers=headers,\n )\n file.putfile(fileobj, logger=logger)\n\n try:\n with transaction.atomic():\n # TODO: Remove the organization id from the user task file\n work_batch_file = WorkBatchFile.objects.create(\n organization_id=work_batch.organization_id,\n file=file,\n name=full_name,\n work_batch_id=work_batch.id\n )\n except IOError:\n file.delete()\n return Response({'detail': ERR_FILE_EXISTS}, status=409)\n\n return Response(serialize(work_batch_file, request.user), status=201)", "def http_request(self, path, fields=None, files=None, headers=None, method=None):\r\n if fields:\r\n debug_fields = fields.copy()\r\n else:\r\n debug_fields = {}\r\n\r\n if 'password' in debug_fields:\r\n debug_fields['password'] = '**************'\r\n url = self._make_url(path)\r\n self.debug('HTTP request to %s: %s' % (url, debug_fields))\r\n\r\n headers = headers or {}\r\n if fields or files:\r\n content_type, body = self._encode_multipart_formdata(fields, files)\r\n headers.update({\r\n 'Content-Type': content_type,\r\n 'Content-Length': str(len(body))\r\n })\r\n r = urllib2.Request(url, body, headers)\r\n else:\r\n r = urllib2.Request(url, headers=headers)\r\n\r\n if method:\r\n r.get_method = lambda: method\r\n\r\n try:\r\n return urllib2.urlopen(r).read()\r\n except urllib2.URLError, e:\r\n try:\r\n self.debug(e.read())\r\n except AttributeError:\r\n pass\r\n\r\n self.die('Unable to access %s. The host path may be invalid\\n%s' %\r\n (url, e))\r\n except urllib2.HTTPError, e:\r\n return self.die('Unable to access %s (%s). The host path may be invalid'\r\n '\\n%s' % (url, e.code, e.read()))", "def request_v2(self, method, url, params=None, json=None, data=None, affinity=None, streaming=False):\n params = params or {}\n json = json or {}\n data = data or {}\n url = '{}{}'.format(self.api_entry_point, url)\n method = method.upper()\n if method not in ['GET', 'POST']:\n raise ValueError(\"method should be in ['GET', 'POST']\")\n\n headers = self.session.headers.copy()\n if affinity is not None:\n headers['HC-WorkerAffinity'] = affinity\n\n if method == 'POST' and streaming:\n # Create new data with encoder\n encoder = MultipartEncoder(fields=data)\n\n def callback(monitor):\n if 'size' in data:\n msg = '{0:.0f}% uploaded '.format(100 * monitor.bytes_read / int(data['size']))\n else:\n msg = '{} bytes uploaded '.format(monitor.bytes_read)\n print(msg, flush=True, end='\\r')\n\n multi_data = MultipartEncoderMonitor(encoder, callback)\n\n headers = self.session.headers.copy()\n headers['Content-Type'] = multi_data.content_type\n resp = self.session.request(method, url, params=params, json=json, data=multi_data, headers=headers)\n else:\n resp = self.session.request(method, url, params=params, json=json, data=data, headers=headers)\n\n try:\n return resp\n except Exception:\n print(\"Unexpected error\")\n sys.exit()", "def to_httpx_request(cls, **kwargs):\n request = kwargs[\"request\"]\n raw_url = (\n request.url.scheme,\n request.url.host,\n request.url.port,\n request.url.target,\n )\n return httpx.Request(\n request.method,\n parse_url(raw_url),\n headers=request.headers,\n stream=request.stream,\n extensions=request.extensions,\n )", "def make_request(self, path, method, args=None, files=None, batch=False, raw_path=False):\n args = dict(args or {})\n args = {k.encode('utf-8'): unicode(v).encode('utf-8')\n for k, v in args.items()}\n\n if batch:\n # Then just return a dict for the batch request\n return {\n 'method': method,\n 'relative_url': '%s?%s' % (path, urllib.urlencode(args))\n }\n logger.info('Making a %s request at %s/%s with %s' % (method, self.api_root, path, args))\n if 'access_token' not in args:\n args['access_token'] = self.access_token\n try:\n if method == 'GET':\n url = path if raw_path else '%s/%s?%s' % (self.api_root, path, urllib.urlencode(args))\n f = urllib2.urlopen(url)\n elif method == 'POST':\n url = path if raw_path else '%s/%s' % (self.api_root, path)\n if files:\n encoder = MultipartFormdataEncoder()\n content_type, body = encoder.encode(args, files)\n req = urllib2.Request(url, data=body)\n req.add_header('Content-Type', content_type)\n f = urllib2.urlopen(req)\n else:\n f = urllib2.urlopen(url, urllib.urlencode(args))\n elif method == 'DELETE':\n url = path if raw_path else '%s/%s?%s' % (self.api_root, path, urllib.urlencode(args))\n req = urllib2.Request(url)\n req.get_method = lambda: 'DELETE'\n f = urllib2.urlopen(req)\n else:\n raise\n return json.load(f)\n except urllib2.HTTPError as e:\n err = AdsAPIError(e)\n # Info, not warning or error, because these often happen as an expected result because of user input\n # and well formed requests that facebook rejects.\n logger.info(u'API Error: {}'.format(err.message))\n raise err\n except urllib2.URLError as e:\n logger.warn(u'URLError: %s' % e.reason)\n raise", "def _upload_file_to_file_system(upload_details):\n upload_url = \"%s%s\" % (main_url, upload_details['upload_path'])\n fsysparams = {\n 'qqfile': upload_filepath,\n 'import_record': upload_dataset_id,\n 'source_type': upload_datatype\n }\n return requests.post(upload_url,\n params=fsysparams,\n files={'file': open(upload_filepath, 'rb')},\n headers=upload_header)", "def _upload(auth_http, project_id, bucket_name, file_path, object_name, acl):\n with open(file_path, 'rb') as f:\n data = f.read()\n content_type, content_encoding = mimetypes.guess_type(file_path)\n\n headers = {\n 'x-goog-project-id': project_id,\n 'x-goog-api-version': API_VERSION,\n 'x-goog-acl': acl,\n 'Content-Length': '%d' % len(data)\n }\n if content_type: headers['Content-Type'] = content_type\n if content_type: headers['Content-Encoding'] = content_encoding\n\n try:\n response, content = auth_http.request(\n 'http://%s.storage.googleapis.com/%s' % (bucket_name, object_name),\n method='PUT',\n headers=headers,\n body=data)\n except httplib2.ServerNotFoundError, se:\n raise Error(404, 'Server not found.')\n\n if response.status >= 300:\n raise Error(response.status, response.reason)\n\n return content", "def create_upload(projectArn=None, name=None, type=None, contentType=None):\n pass", "def post_with_string_data_support(original_function, self, path, data={},\n content_type=MULTIPART_CONTENT, **extra):\n if content_type == MULTIPART_CONTENT and getattr(data, 'items', None) is None:\n parsed = urlparse(path)\n r = {\n 'CONTENT_LENGTH': len(data),\n 'CONTENT_TYPE': content_type,\n 'PATH_INFO': self._get_path(parsed),\n 'QUERY_STRING': parsed[4],\n 'REQUEST_METHOD': 'POST',\n 'wsgi.input': FakePayload(data),\n }\n r.update(extra)\n return self.request(**r)\n else:\n return original_function(self, path, data, content_type, **extra)", "def send_multipart(self, msg, flags=0, copy=True, track=False, **kwargs):\n kwargs['flags'] = flags\n kwargs['copy'] = copy\n kwargs['track'] = track\n return self._add_send_event('send_multipart', msg=msg, kwargs=kwargs)", "def api_upload():\n return make_response(file_manager.save_uploaded_file(), 200)", "def upload_part(Body=None, Bucket=None, ContentLength=None, ContentMD5=None, Key=None, PartNumber=None, UploadId=None, SSECustomerAlgorithm=None, SSECustomerKey=None, SSECustomerKeyMD5=None, RequestPayer=None):\n pass" ]
[ "0.72485816", "0.67776537", "0.6666745", "0.65953016", "0.64735705", "0.6339751", "0.63103724", "0.62898654", "0.6287804", "0.61854047", "0.6174467", "0.60619015", "0.6049812", "0.6040689", "0.60222596", "0.6001722", "0.60011226", "0.5974838", "0.59656656", "0.5947115", "0.590833", "0.590405", "0.58987194", "0.5893319", "0.5891552", "0.5883227", "0.5868902", "0.5784017", "0.57729906", "0.57641363", "0.57539696", "0.5751821", "0.57507503", "0.5735081", "0.57342553", "0.57342553", "0.57342553", "0.57342553", "0.57309514", "0.5723521", "0.5716146", "0.56908953", "0.56750876", "0.5671851", "0.5652328", "0.56281996", "0.5625529", "0.5619135", "0.5618145", "0.56172603", "0.55983", "0.5573191", "0.5554493", "0.5550149", "0.55215317", "0.5514261", "0.55138475", "0.55040175", "0.5461537", "0.5457449", "0.5450697", "0.542002", "0.53866875", "0.5349994", "0.5317413", "0.53148746", "0.5293806", "0.52818197", "0.5281403", "0.5279879", "0.52738285", "0.5269733", "0.52688104", "0.526286", "0.5260954", "0.52482444", "0.5244554", "0.5231155", "0.522841", "0.52213675", "0.52059364", "0.52017295", "0.5198422", "0.5183971", "0.5165943", "0.5159311", "0.51574814", "0.514807", "0.5147585", "0.51387465", "0.51377195", "0.5135711", "0.5122828", "0.5119668", "0.5118925", "0.5115725", "0.51157176", "0.5115491", "0.5114978", "0.5096317" ]
0.57908267
27
Vectorized function to calculate the greatcircle distance between two points or between vectors of points. Please note that this method is copied from OSMnx method of the same name,
def great_circle_vec(lat1: float, lng1: float, lat2: float, lng2: float, earth_radius: float=6371009.0) -> float: phi1 = np.deg2rad(90 - lat1) phi2 = np.deg2rad(90 - lat2) theta1 = np.deg2rad(lng1) theta2 = np.deg2rad(lng2) cos = (np.sin(phi1) * np.sin(phi2) * np.cos(theta1 - theta2) + np.cos(phi1) * np.cos(phi2)) # Ignore warnings during this calculation because numpy warns it cannot # calculate arccos for self-loops since u==v with warnings.catch_warnings(): warnings.simplefilter('ignore') arc = np.arccos(cos) # Return distance in units of earth_radius distance = arc * earth_radius return distance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def great_circle_distance(A, B):\n AdotB = numpy.einsum('...i,...i', A, B)\n AcrossB = numpy.cross(A, B)\n last_axis = len(AcrossB.shape) - 1\n return arctan2(linalg.norm(AcrossB, axis=last_axis), AdotB)", "def great_circle_distance(theta1,phi1,theta2,phi2):\n alt1 = np.pi/2.-theta1\n alt2 = np.pi/2.-theta2\n return np.arccos(np.sin(alt1)*np.sin(alt2)+np.cos(alt1)*np.cos(alt2)*np.cos(phi1-phi2))", "def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))", "def great_circle_distance(pnt1, pnt2, radius):\n\t\t\tlat1 = radians(pnt1[0])\n\t\t\tlat2 = radians(pnt2[0])\n\t\t\tdLat = lat2 - lat1\n\t\t\tdLon = radians(pnt2[1]) - radians(pnt1[1])\n\t\t\ta = sin(dLat / 2.0) ** 2 + cos(lat1) * cos(lat2) * sin(dLon / 2.0) ** 2\n\t\t\treturn 2 * asin(min(1, sqrt(a))) * radius * 57.2957795", "def distance(x,y):\n return np.sqrt( np.power(np.array(x) - np.array(y), 2).sum() )", "def distance(a,b):\n return np.sqrt( (x(a)-x(b))**2 + (y(a)-y(b))**2 )", "def euclidean_distance_no_np(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return sum((v1 - v2) ** 2 for v1, v2 in zip(vector_1, vector_2)) ** (1 / 2)", "def euclidian_distance(x: np.arrays, y: np.arrays):\r\n diff = x - np.mean(y, axis=0)\r\n return np.sqrt(np.dot(diff.T, diff))", "def great_circle_vec(lat1, lng1, lat2, lng2, earth_radius=6371009):\n\n phi1 = np.deg2rad(lat1)\n phi2 = np.deg2rad(lat2)\n d_phi = phi2 - phi1\n\n theta1 = np.deg2rad(lng1)\n theta2 = np.deg2rad(lng2)\n d_theta = theta2 - theta1\n\n h = np.sin(d_phi / 2) ** 2 + np.cos(phi1) * np.cos(phi2) * np.sin(d_theta / 2) ** 2\n h = np.minimum(1.0, h) # protect against floating point errors\n\n arc = 2 * np.arcsin(np.sqrt(h))\n\n # return distance in units of earth_radius\n distance = arc * earth_radius\n return distance", "def dist(v1: vect2d, v2: vect2d) -> float:\n d = ((v2.x - v1.x)**2 + (v2.y - v1.y)**2) ** 0.5\n return d", "def distance(pt1, pt2):\n\tx1, y1 = pt1\n\tx2, y2 = pt2\n\tx = x2 - x1\n\ty = y2 - y1\n\ts = x**2 + y**2\n\treturn np.sqrt(s)", "def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.square(np.subtract(x1, x2))))", "def get_distance(point_a, point_b):\n \n return np.sqrt(np.sum((point_a - point_b) ** 2, 1))", "def euclidean_distance(x1: np.ndarray, x2: np.ndarray) -> float:\n return np.sqrt(np.square(x1 - x2).sum())", "def _vector_dist(self, vec1, vec2):\r\n return sqrt(sum([(float(v1) - float(v2)) ** 2 for v1, v2 in\r\n zip(vec1, vec2)]))", "def _great_circle_distance(ra1, dec1, ra2, dec2):\n from numpy import radians, degrees, sin, cos, arctan2, hypot \n\n # terminology from the Vicenty formula - lambda and phi and\n # \"standpoint\" and \"forepoint\"\n lambs = radians(ra1)\n phis = radians(dec1)\n lambf = radians(ra2)\n phif = radians(dec2)\n \n dlamb = lambf - lambs\n \n numera = cos(phif) * sin(dlamb)\n numerb = cos(phis) * sin(phif) - sin(phis) * cos(phif) * cos(dlamb)\n numer = hypot(numera, numerb)\n denom = sin(phis) * sin(phif) + cos(phis) * cos(phif) * cos(dlamb)\n\n return degrees(np.arctan2(numer, denom))", "def distance(x1, y1, x2, y2):\n return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5", "def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n distance_vector: np.ndarray = x - y\n distance = compute_norm(distance_vector)\n return distance", "def euclidean_distance(point1, point2):\n\n return math.sqrt(sum([(x - y) ** 2 for x, y in zip(point1, point2)]))", "def euclidean_distance(point1, point2):\n return np.linalg.norm(np.array(point1) - np.array(point2))", "def euclidean_distance(point_one, point_two):\n return np.linalg.norm(point_one-point_two)", "def distance(point1, point2):\n return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) ** 0.5", "def _dist(x, y):\n return np.sqrt(np.mean(np.square(x - y)))", "def get_euclid_dist(vec_1, vec_2):\n\n\treturn np.sqrt(np.sum(np.fabs(vec_1 - vec_2), axis=1)).flatten()", "def great_circle(p1, p2):\n # Note: GeoPy expects (latitude, longitude) pairs.\n return geopy.distance.great_circle(\n (p1.y, p1.x),\n (p2.y, p2.x)\n ).miles", "def getDistance(point1, point2x, point2y):\n distance = np.sqrt((point2x - point1[0])**2 + (point2y - point1[1])**2)\n return distance", "def distance(point1, point2):\n return math.sqrt(math.pow((point1[0] - point2[0]), 2) +\n math.pow(point1[1] - point2[1], 2))", "def _great_circle_distance_fast(ra1, dec1, ra2, dec2, threads):\n\n import numexpr as ne\n \n # terminology from the Vicenty formula - lambda and phi and\n # \"standpoint\" and \"forepoint\"\n lambs = np.radians(ra1)\n phis = np.radians(dec1)\n lambf = np.radians(ra2)\n phif = np.radians(dec2)\n \n dlamb = lambf - lambs\n\n #using numexpr\n #nthreads = ne.detect_number_of_cores()\n nthreads = threads\n ne.set_num_threads(nthreads)\n\n hold1=ne.evaluate('sin(phif)') #calculate these once instead of a few times!\n hold2=ne.evaluate('sin(phis)')\n hold3=ne.evaluate('cos(phif)')\n hold4=ne.evaluate('cos(dlamb)')\n hold5=ne.evaluate('cos(phis)')\n numera = ne.evaluate( 'hold3 * sin(dlamb)')\n numerb = ne.evaluate('hold5 * hold1 - hold2 * hold3 * hold4')\n numer = ne.evaluate('sqrt(numera**2 + numerb**2)')\n denom = ne.evaluate('hold2 * hold1 + hold5 * hold3 * hold4')\n pi=math.pi\n\n return ne.evaluate('(arctan2(numer, denom))*180.0/pi')", "def get_distance(x1, y1, x2, y2):\n return math.sqrt((x1 - x2) ** 2 + (y1 * 2.38 - y2 * 2.38) ** 2)", "def distance(point_1=(0, 0), point_2=(0, 0)):\n return math.sqrt(\n (point_1[0] - point_2[0]) ** 2 +\n (point_1[1] - point_2[1]) ** 2)", "def eucl_dist(x_0, y_0, x_1, y_1):\n return sqrt((x_1 - x_0)**2 + (y_1 - y_0)**2)", "def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))", "def _distance2d(x, y, center=None):\n if center is None:\n xc, yc = np.mean(x), np.mean(y)\n else:\n xc, yc = center[0], center[1]\n r = np.sqrt((x-xc)**2 + (y-yc)**2).ravel()\n return r", "def distance(point0, point1):\n if point0 is None or point1 is None:\n return None\n diff = np.subtract(point0, point1)\n return np.sqrt(diff[0] ** 2 + diff[1] ** 2)", "def dist(pnt1, pnt2):\n return ((pnt2[0] - pnt1[0])**2 + (pnt2[1] - pnt1[1])**2 + (pnt2[2] - pnt1[2])**2)**0.5", "def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n\n distance = np.linalg.norm(x - y)\n\n return distance", "def dist_vincenty(lat1, lon1, lat2, lon2, iterations=20):\r\n if lat1 < -90 or lat1 > 90 or lat2 < -90 or lat2 > 90 or lon1 < -180 or lon1 > 180 or lon2 < -180 or lon2 > 180:\r\n raise ValueError(\r\n \"Latitude values shoulds range from (-90,90) and longitude from (-180,180) but one of the input values is out of bounds. Latitude_1: %f, Logitude_1: %f, Latitude_2: %f, Logitude_2: %f\" %\r\n (lat1, lon1, lat2, lon2))\r\n\r\n major, minor, f = 6378137, 6356752.314245, 1 / 298.257223563\r\n\r\n lat1, lng1, lat2, lng2 = radians(\r\n lat1), radians(lon1), radians(lat2), radians(lon2)\r\n delta_lng = lng2 - lng1\r\n reduced_lat1, reduced_lat2 = atan(\r\n (1 - f) * tan(lat1)), atan((1 - f) * tan(lat2))\r\n\r\n sin_reduced1, cos_reduced1 = sin(reduced_lat1), cos(reduced_lat1)\r\n sin_reduced2, cos_reduced2 = sin(reduced_lat2), cos(reduced_lat2)\r\n\r\n lambda_lng = delta_lng\r\n lambda_prime = 2 * pi\r\n while abs(lambda_lng - lambda_prime) > 10e-12 and iterations > 0:\r\n sin_lambda_lng, cos_lambda_lng = sin(lambda_lng), cos(lambda_lng)\r\n\r\n sin_sigma = sqrt(\r\n (cos_reduced2 * sin_lambda_lng) ** 2 +\r\n (cos_reduced1 * sin_reduced2 -\r\n sin_reduced1 * cos_reduced2 * cos_lambda_lng) ** 2\r\n )\r\n if sin_sigma == 0:\r\n return 0 # Coincident points\r\n\r\n cos_sigma = (\r\n sin_reduced1 * sin_reduced2 +\r\n cos_reduced1 * cos_reduced2 * cos_lambda_lng\r\n )\r\n sigma = atan2(sin_sigma, cos_sigma)\r\n\r\n sin_alpha = (cos_reduced1 * cos_reduced2 * sin_lambda_lng / sin_sigma)\r\n cos_sq_alpha = 1 - sin_alpha ** 2\r\n\r\n if cos_sq_alpha != 0:\r\n cos2_sigma_m = cos_sigma - 2 * \\\r\n (sin_reduced1 * sin_reduced2 / cos_sq_alpha)\r\n else:\r\n cos2_sigma_m = 0.0 # Equatorial line\r\n\r\n C = f / 16. * cos_sq_alpha * (4 + f * (4 - 3 * cos_sq_alpha))\r\n\r\n lambda_prime = lambda_lng\r\n lambda_lng = (\r\n delta_lng + (1 - C) * f * sin_alpha * (\r\n sigma + C * sin_sigma * (\r\n cos2_sigma_m + C * cos_sigma * (-1 + 2 * cos2_sigma_m ** 2)\r\n )\r\n )\r\n )\r\n iterations -= 1\r\n\r\n if iterations == 0:\r\n raise ValueError(\"Vincenty formula failed to converge!\")\r\n\r\n u_sq = cos_sq_alpha * (major ** 2 - minor ** 2) / minor ** 2\r\n A = 1 + u_sq / 16384. * (4096 + u_sq * (-768 + u_sq * (320 - 175 * u_sq)))\r\n B = u_sq / 1024. * (256 + u_sq * (-128 + u_sq * (74 - 47 * u_sq)))\r\n delta_sigma = B * sin_sigma * (\r\n cos2_sigma_m + B / 4. * (cos_sigma * (-1 + 2 * cos2_sigma_m ** 2) -\r\n B / 6. * cos2_sigma_m * (-3 + 4 * sin_sigma ** 2) *\r\n (-3 + 4 * cos2_sigma_m ** 2))\r\n )\r\n s = minor * A * (sigma - delta_sigma)\r\n\r\n return round(s, 3) # round to 1mm precision\r", "def great_circle_distance(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2\n c = 2 * np.arcsin(np.sqrt(a))\n r = 6371 # Radius of earth in kilometers\n return (c * r)", "def dist_points(x,y):\n\n return abs(x[0]-y[0]) + abs(x[1]-y[1])", "def compute_distance(point_1, point_2):\n x1, y1, x2, y2 = point_1[0], point_1[1], point_2[0], point_2[1]\n distance = np.sqrt((x2-x1)**2 + (y2-y1)**2)\n\n return distance", "def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.power(x1 - x2, 2)))", "def distance(X, Y):\n\n return math.sqrt(np.sum((X-Y)**2))", "def get_distance(pt1,pt2):\r\n x1 = pt1[1]\r\n y1 = pt1[0]\r\n x2 = pt2[1]\r\n y2 = pt2[0]\r\n d = np.sqrt((x2-x1)**2 + (y2-y1)**2)\r\n return d", "def GetDistance(vec1,vec2):\n diff = np.asarray(vec1) - np.asarray(vec2)\n squareDistance = np.dot(diff.T, diff)\n return math.sqrt(squareDistance)", "def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2))", "def distance(a, b):\n return (np.sum((a - b)**2))**0.5", "def euclidean_dist_vec(y1, x1, y2, x2):\n\n # euclid's formula\n distance = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5\n return distance", "def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )", "def calculateDistanceBetweenPoints(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']", "def calculate_point_distance(p1, p2):\n\n return math.sqrt(math.pow(p1[0]-p2[0],2) + math.pow(p1[1]-p2[1],2))", "def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))", "def distance_between_points(a: Point, b: Point) -> float:\n return math.sqrt((a.x - b.x)**2 + (a.y - b.y)**2)", "def euclidean_distance(x, y):\n return sqrt(sum(pow(a - b, 2) for a, b in zip(x, y)))", "def calc_point_distance(x1, y1, x2, y2):\n\n return math.hypot(x2 - x1, y2 - y1)", "def _euclidian_distance(self, x1, x2):\n a= x1-x2\n a2 = a**2\n b = np.sum(a2, axis=1)\n c = np.sqrt(b)\n return c", "def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5", "def great_circle(lat_1, long_1, lat_2, long_2):\n long_1 = m.radians(long_1)\n lat_1 = m.radians(lat_1)\n long_2 = m.radians(long_2)\n lat_2 = m.radians(lat_2)\n\n d = 2 * 6367.45 * m.asin(\n m.sqrt(haversine(lat_2 - lat_1)\n + m.cos(lat_1)*m.cos(lat_2) *\n haversine(long_2 - long_1)))\n return d", "def distance(a, b):\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)", "def distance(p0, p1):\n return( numpy.sqrt( (p0[0]-p1[0])**2 + \n (p0[1]-p1[1])**2 + \n (p0[2]-p1[2])**2 ) )", "def distance (p1,p2):\n return np.sqrt(np.sum(np.power(p2-p1,2)))", "def distance(x1, y1, x2, y2):\n dist = ((x1-x2)**2 + (y1-y2)**2)**0.5\n return dist", "def euclidean(x, y):\n return np.sqrt(np.sum((x - y) ** 2))", "def dist(a,b): # compute distance between two points a & b\n return mag(sub(a,b))", "def dist_vec(x, y, cxy):\n x0 = cxy[0]\n y0 = cxy[1]\n x_dist = x - x0\n y_dist = y - y0\n dist = np.sqrt(x_dist ** 2 + y_dist ** 2)\n return dist", "def euclidian_distance(x1, y1, x2, y2):\n distance = sqrt(pow((x1-x2), 2)+(pow((y1-y2), 2)))\n return distance", "def _greatCircleDistance(self, long1, lat1, long2, lat2):\n # convert decimal degrees to radians \n long1, lat1, long2, lat2 = map(radians, [float(long1), float(lat1), float(long2), float(lat2)])\n # haversine formula \n dlon = long2 - long1\n #print(long2)\n #print(long1) \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n #print(c*r)\n return c * r", "def hyperboloidDist(point1, point2):\n return np.arccosh(-minkowskiDot(point1, point2))", "def euclideanDistance(x1,y1,x2,y2):\n distance = math.sqrt(abs(math.pow((x2-x1),2)) + abs(math.pow((y2-y1),2)))\n return distance", "def pointwise_distance(x, y, square=True):\n with torch.no_grad():\n x = x.squeeze(-1)\n y = y.squeeze(-1)\n\n x = x.unsqueeze(-1)\n y = y.transpose(0,1).unsqueeze(0)\n diff = x - y\n dis = torch.sum(torch.square(diff), dim=1)\n if torch.min(dis) < 0:\n raise RuntimeError('dis small than 0')\n if square:\n return dis\n else:\n return torch.sqrt(dis)", "def DISTANCE(x,y,x2=0,y2=0):\n\treturn sqrt((x-x2)*(x-x2)+(y-y2)*(y-y2))", "def euclidean_distance(vec1, vec2):\n return numpy.linalg.norm(vec1 - vec2)", "def euclidean_distance(x, y):\n distance = 0\n for i, j in zip(x, y):\n distance += (i - j) ** 2\n return math.sqrt(distance)", "def distance(x, y):\n dist = [pow((x-y), 2) for x, y in zip(x,y)]\n dist = math.sqrt(sum(dist))\n \n return dist", "def getDistance(point1,point2):\n dx = point2[0]-point1[0]\n dy = point2[1]-point1[1]\n return math.sqrt(dy*dy + dx*dx)", "def GetDist(feature_1, feature_2):\n return np.linalg.norm(feature_1 - feature_2)", "def distance_point_point(a, b):\n ab = subtract_vectors(b, a)\n return length_vector(ab)", "def GetPointToPointDistance(self, point1, point2):\n return math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point1, point2))", "def compute_distance(node1, node2):\n return np.linalg.norm(node1 - node2)", "def dist(pt1, pt2):\n return np.sqrt((pt2[0]-pt1[0])**2 + (pt2[1]-pt1[1])**2)", "def distance(p1,p2):\n import numpy as np\n x = np.sqrt(sum(np.power(p2-p1,2)))\n return(x)", "def distance(p1, p2):\n return sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)", "def distance(x1, y1, x2, y2):\n\n distance_between_two_points = (((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1))) ** 0.5\n return round(distance_between_two_points, 2)", "def distance(pt1, pt2):\n return (pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2", "def distance(p1, p2):\n\n return sqrt(((p2[0] - p1[0])**2) + ((p2[1] - p1[1])**2))", "def vector_arc_distance(v_1, v_2):\n delta = math.sqrt(\n (v_2[0] - v_1[0]) ** 2 + (v_2[1] - v_1[1]) ** 2 + (v_2[2] - v_1[2]) ** 2\n )\n return 2 * 1 * delta / 2 / 1 # assuming unit circle so R = 1", "def _calc_distance(r1, r2):\n return np.linalg.norm(r1 - r2)", "def distance(point1, point2):\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n\n dx = x1 - x2\n dy = y1 - y2\n\n return math.sqrt(dx * dx + dy * dy)", "def distance(point1, point2):\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n\n dx = x1 - x2\n dy = y1 - y2\n\n return math.sqrt(dx * dx + dy * dy)", "def euclidean_distance(x1, x2):\n return (x2[0] - x1[0])**2 + (x2[1] - x1[1])**2", "def dist(a: Point, b: Point):\n return (a.x - b.x) ** 2 + (a.y - b.y) ** 2", "def euclidDist(x1, y1, x2, y2):\n c = math.sqrt(((x2-x1)**2) + ((y2-y1)**2))\n\n return c", "def euclidean_distance(a, b):\n return np.linalg.norm(a - b)", "def distance_entre_deux_points(couple_points_1,couple_points_2):\r\n Xa = couple_points_1[0]\r\n Xb = couple_points_2[0]\r\n Ya = couple_points_1[1]\r\n Yb = couple_points_2[1]\r\n return math.sqrt( ( (Xb-Xa)**2) + ( (Yb-Ya)**2) )", "def dist_2D(v1, v2):\n return ((v1[0]-v2[0])**2 + (v1[1]-v2[1])**2 )**(0.5)", "def distance_between_points(p1,p2):\n return math.sqrt((p2.x-p1.x)**2+(p2.y-p1.y)**2)", "def distance(p1, p2):\n\treturn sqrt((p1[1]-p2[1])**2 + (p1[0]-p2[0])**2)", "def distance(p1,p2):\n return ((p2.x - p1.x)*2 + (p2.y - p1.y))**0.5", "def great_circle(a: Point, b: Point) -> Km:\n\n lat1, lng1, lat2, lng2 = map(radians, [a.latitude, a.longitude, b.latitude, b.longitude])\n sin_lat1, sin_lat2 = map(sin, [lat1, lat2])\n cos_lat1, cos_lat2 = map(cos, [lat1, lat2])\n delta_lng = lng2 - lng1\n cos_delta_lng, sin_delta_lng = cos(delta_lng), sin(delta_lng)\n\n d = atan2(\n sqrt((cos_lat2 * sin_delta_lng) ** 2 + (cos_lat1 * sin_lat2 - sin_lat1 * cos_lat2 * cos_delta_lng) ** 2),\n sin_lat1 * sin_lat2 + cos_lat1 * cos_lat2 * cos_delta_lng,\n )\n\n return Km(6371.009 * d) # Radius of earth in kilometers is 6371", "def L2_dists_vectorized(x, y):\n dists = -2 * np.matmul(x, y.T)\n dists += np.sum(x**2, axis=1)[:, np.newaxis]\n dists += np.sum(y**2, axis=1)\n return np.sqrt(dists)", "def dist(x,y,xc=0.,yc=0.):\n return sqrt((x-xc)**2+(y-yc)**2)" ]
[ "0.7289671", "0.70696187", "0.70524025", "0.69750804", "0.69441026", "0.69271106", "0.6895158", "0.6836602", "0.6820166", "0.6789631", "0.67889583", "0.6785679", "0.67835736", "0.67750597", "0.6772455", "0.67694485", "0.6763788", "0.6762551", "0.6762438", "0.6759875", "0.67510515", "0.67449594", "0.67419976", "0.67409784", "0.673707", "0.6731792", "0.6717399", "0.67091906", "0.67058057", "0.67051244", "0.67051065", "0.6696886", "0.6694069", "0.66803145", "0.66713727", "0.6666503", "0.6660448", "0.6656151", "0.6655868", "0.66481006", "0.6639165", "0.66274893", "0.66190803", "0.66189796", "0.6618421", "0.6614454", "0.6613642", "0.65673125", "0.6562724", "0.65604097", "0.6555224", "0.65548027", "0.6549826", "0.654405", "0.6543356", "0.65427554", "0.65422297", "0.65361917", "0.6530595", "0.65140176", "0.6496574", "0.6492844", "0.6492246", "0.64866227", "0.64861125", "0.6478818", "0.6476168", "0.6475874", "0.6473764", "0.64703345", "0.6470152", "0.6467273", "0.64634466", "0.6463239", "0.6455804", "0.6455065", "0.6454885", "0.645161", "0.64453477", "0.6435637", "0.6433833", "0.6432312", "0.643035", "0.64291906", "0.6426515", "0.64219034", "0.6419292", "0.6419292", "0.6418673", "0.64183384", "0.64077365", "0.64029944", "0.64005333", "0.6399921", "0.6397835", "0.63932866", "0.6392292", "0.6392213", "0.6386945", "0.6384485" ]
0.67869925
11
Helper to handle indices and logical indices of NaNs.
def nan_helper(y): return (np.isnan(y), lambda z: z.to_numpy().nonzero()[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _index_to_nan_fast(data, existing_nans, to_nan):\n index_nan = []\n randgen = (np.random.choice(len(data)) for _ in cnt(start=1))\n for i in range(to_nan):\n ix = next(filter(lambda x: x not in existing_nans and x not in index_nan, randgen))\n index_nan.append(ix)\n data_imp = data.copy()\n data_imp[index_nan] = np.nan\n return data_imp, index_nan", "def isnan(self):\n return self.isAny( (lambda x: np.isnan(x)) )", "def pd_isnan(val):\n return val is None or val != val", "def isnan(data):\n return _make.isnan(data)", "def nan_helper(y):\n return np.isnan(y), lambda z: z.nonzero()[0]", "def nan_helper(y):\n return np.isnan(y), lambda z: z.nonzero()[0]", "def nan_helper(y):\n return np.isnan(y), lambda z: z.nonzero()[0]", "def gdx_isnan(val,gdxf):\n return val in [SPECIAL_VALUES[0], SPECIAL_VALUES[1]]", "def is_nan(x):\n return (x is np.nan or x != x)", "def isnan(x):\n return False", "def _nan_cells(traces):\n # Find all cells with NaNs\n nancells = []\n ncells = -1\n for cs in traces:\n if len(traces[cs]) > 0:\n ncells = np.shape(traces[cs])[1]\n ns = np.sum(np.sum(np.invert(np.isfinite(\n traces[cs])), axis=2), axis=0)\n vals = np.arange(ncells)\n nancells.extend(vals[ns > 0])\n\n # Set _mask_cells if it hasn't been set\n out = np.zeros(ncells, dtype=bool)\n\n # Convert nancells to a list of good cells\n nancells = np.array(list(set(nancells)))\n if len(nancells) > 0:\n print('Warning: %i cells have NaNs'%len(nancells))\n out[nancells] = True\n\n return out", "def _index_to_nan(data, existing_nans, to_nan):\n index_nan = np.random.choice([i for i in range(len(data)) if i not in existing_nans],\n size=to_nan, replace=False)\n data_imp = data.copy()\n data_imp[index_nan] = np.nan\n return data_imp, index_nan", "def nonans(array):\n return array[~np.isnan(array)]", "def nan_helper(y):\r\n\r\n return np.isnan(y), lambda z: z.nonzero()[0]", "def nan_helper(y):\r\n\r\n return np.isnan(y), lambda z: z.nonzero()[0]", "def _get_nan_indices(*tensors: Tensor) ->Tensor:\n if len(tensors) == 0:\n raise ValueError('Must pass at least one tensor as argument')\n sentinel = tensors[0]\n nan_idxs = torch.zeros(len(sentinel), dtype=torch.bool, device=sentinel.device)\n for tensor in tensors:\n permuted_tensor = tensor.flatten(start_dim=1)\n nan_idxs |= torch.any(torch.isnan(permuted_tensor), dim=1)\n return nan_idxs", "def nan_helper(y):\n\n return np.isnan(y), lambda z: z.nonzero()[0]", "def nan_helper(y):\n\n return np.isnan(y), lambda z: z.nonzero()[0]", "def _nan_helper(self, y):\n\n return np.isnan(y), lambda z: z.nonzero()[0]", "def nan_value(data):\n return data.isnull().any()", "def _nan_data(data, to_nan=0.2):\n # Number of values to be NaNed as int\n to_nan = int(len(data) * to_nan)\n # Existing NaN's as indicies\n existing_nans = data[data.isnull() == True].index\n return to_nan, existing_nans", "def assert_no_nans(x):\n assert not torch.isnan(x).any()", "def nan(self, check_inf = True):\n return self.foreach(\n lambda k,v: (k, numpy.isnan(v) + (check_inf == True) * numpy.isinf(v)),\n dimensions = self.dims,\n shape = self.shape,\n )", "def remove_nans(arr):\n not_nan = [i for i in range(len(arr)) if not np.isnan(arr[i])]\n\n return not_nan, arr[not_nan]", "def isnan(*obj):\n\n out = [flatten(o) if istensor(o) else o for o in obj]\n return any([any(o != o) if istensor(o) else o != o for o in out])", "def is_nan(self):\r\n return self._real.is_nan() or self._imag.is_nan()", "def ISNA(value):\n return isinstance(value, float) and math.isnan(value)", "def has_nans(tensor, verbose=True):\n tensor_numpy = tensor.data.cpu().numpy().flatten()\n where_nan = np.argwhere(tensor_numpy != tensor_numpy)\n\n nan_count = len(where_nan)\n nan = nan_count != 0\n\n if verbose and nan:\n print(f\"Encountered {nan_count} NaNs\")\n\n return nan", "def is_scalar_nan(x):\n return isinstance(x, numbers.Real) and math.isnan(x)", "def checkfornan(chosen_df):\n if not chosen_df.isnull().values.any():\n raise ValueError('NaN in DataFrame')", "def nans(shape, dtype=float):\n a = np.empty(shape, dtype)\n a.fill(np.nan)\n return a", "def is_nan(self, row_data):\n return math.isnan(row_data)", "def _autocheck_nan(self):\n # assert np.isnan(self.W).any() == False, \"W matrix should not contain NaN values.\"\n assert np.isnan(self.Win).any() == False, \"Win matrix should not contain NaN values.\"\n if self.Wfb is not None:\n assert np.isnan(self.Wfb).any() == False, \"Wfb matrix should not contain NaN values.\"", "def isna(self):\n return DataFrameDefault.register(pandas.DataFrame.isna)(self)", "def _check_nan(self, vector):\n return np.isnan(vector).sum() > 0", "def is_nan(self):\n \n return self.coeff.is_nan()", "def nanmasked(x):\n mask = ~np.isnan(x)\n maskA = x[mask]\n return (maskA,mask)", "def na_complain(X):\n na_values_present = np.isnan(X).sum()\n if na_values_present:\n raise ValueError(\"Na's found in data matrix.\")", "def _no_nan(self, feature: np.array) -> bool:\n if not np.any(np.isnan(feature)):\n return True\n else:\n return False", "def __is_nan(self):\n return _VirtualBooleanColumn(\n df_name=self.thisptr[\"df_name_\"],\n operator=\"is_nan\",\n operand1=self,\n operand2=None\n )", "def test_detect_nan():\r\n nan_detected = [False]\r\n\r\n def detect_nan(i, node, fn):\r\n for output in fn.outputs:\r\n if numpy.isnan(output[0]).any():\r\n print '*** NaN detected ***'\r\n theano.printing.debugprint(node)\r\n print 'Inputs : %s' % [input[0] for input in fn.inputs]\r\n print 'Outputs: %s' % [output[0] for output in fn.outputs]\r\n nan_detected[0] = True\r\n break\r\n\r\n x = theano.tensor.dscalar('x')\r\n f = theano.function([x], [theano.tensor.log(x) * x],\r\n mode=theano.compile.MonitorMode(\r\n post_func=detect_nan))\r\n f(0) # log(0) * 0 = -inf * 0 = NaN\r\n assert nan_detected[0]", "def isna(self):\n return super().isna()", "def checkNaN(data_dict):\n for k, v in data_dict.iteritems():\n mark = True\n for feature, value in v.iteritems():\n if (value != 'NaN') and (feature != 'poi'):\n mark = False\n break\n if mark:\n print k\n print v['poi']", "def set_nan(x):\n x[x == -999] = np.nan\n return x", "def _idxs_postformat_null(self):\n pass", "def notna(self):\n return super().notna()", "def columns_with_na_values(data):\n aux = data.isna().sum() > 0\n return aux.index[aux.values].values", "def _is_nan(self, x: any) -> bool:\n return isinstance(x, float) and math.isnan(x)", "def find_first_non_nan(array):\n for index, value in enumerate(array):\n if not np.isnan(value):\n return index", "def _isnull_old(obj):\n if is_scalar(obj):\n return lib.checknull_old(obj)\n # hack (for now) because MI registers as ndarray\n elif isinstance(obj, ABCMultiIndex):\n raise NotImplementedError(\"isnull is not defined for MultiIndex\")\n elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):\n return _isnull_ndarraylike_old(obj)\n elif isinstance(obj, ABCGeneric):\n return obj._constructor(obj._data.isnull(func=_isnull_old))\n elif isinstance(obj, list) or hasattr(obj, '__array__'):\n return _isnull_ndarraylike_old(np.asarray(obj))\n else:\n return obj is None", "def check_np_array_nan(func):\r\n\r\n @functools.wraps(func)\r\n def wrapper(*args, **kwargs):\r\n result = func(*args, **kwargs)\r\n if type(result) in [tuple, list]:\r\n count = 0\r\n for an_array in result:\r\n if type(an_array) is dict:\r\n for key in an_array:\r\n if np.isnan(an_array[key]).any():\r\n hydro_logger.warning(\r\n \"Please check your input data: there are NaN data! It may affect following calculation!!\\n \"\r\n \"The location of NaN values in the \"\r\n + str(count)\r\n + \"-th dict are:\\n\"\r\n )\r\n hydro_logger.warning(\"value of \" + key + \":\\n\")\r\n hydro_logger.warning(np.argwhere(np.isnan(an_array[key])))\r\n else:\r\n if np.isnan(an_array).any():\r\n hydro_logger.warning(\r\n \"Please check your input data: there are NaN data! It may affect following calculation!!\\n \"\r\n \"The location of NaN values in the \"\r\n + str(count)\r\n + \"-th array are:\\n\"\r\n )\r\n hydro_logger.warning(np.argwhere(np.isnan(an_array)))\r\n count = count + 1\r\n elif type(result) is np.array:\r\n if np.isnan(result).any():\r\n hydro_logger.warning(\r\n \"Please check your input data: there are NaN data! It may affect following calculation!!\\n \"\r\n \"The location of NaN values are:\\n\"\r\n )\r\n hydro_logger.warning(np.argwhere(np.isnan(result)))\r\n return result\r\n\r\n return wrapper", "def NA():\n return float('nan')", "def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)", "def isna(self):\n # type: () -> np.ndarray\n return extract_isnull_bytemap(self.data)", "def remove_nans(coords):\n s = np.apply_along_axis(sum,1,np.isnan(coords[1])) == 0\n coords[0] = (np.asarray(coords[0])[s]).tolist()\n coords[1] = coords[1][s,:]", "def is_nan(x):\n if not isinstance(x, numbers.Number):\n return False\n\n if isinstance(x, complex):\n return math.isnan(x.real) or math.isnan(x.imag)\n else:\n return math.isnan(x)", "def removeNans(data):\n for i in data[:]:\n ind = data.index(i)\n for j in i:\n if np.isnan(j):\n data.remove(i)\n break\n return data", "def remove_nans(a, b):\n a = np.asarray(a)\n b = np.asarray(b)\n\n mask = ~np.isnan(a) & ~np.isnan(b)\n a = a[mask]\n b = b[mask]\n\n return a, b", "def is_nan(self, name):\n return self._data[name].isnull()", "def _nodata_mask(self):\n if self.nodata_value is None:\n return np.ones_like(self.array, dtype=np.bool)\n return self.array != self.nodata_value", "def check_index(i):\n\n i = asarray(i)\n if (i.ndim > 1) or (size(i) < 1):\n raise Exception(\"Index must be one-dimensional and non-singleton\")\n\n return i", "def test_not_inplace():\r\n nan_detected = [False]\r\n\r\n def detect_nan(i, node, fn):\r\n for output in fn.outputs:\r\n if numpy.isnan(output[0]).any():\r\n print '*** NaN detected ***'\r\n theano.printing.debugprint(node)\r\n print 'Inputs : %s' % [input[0] for input in fn.inputs]\r\n print 'Outputs: %s' % [output[0] for output in fn.outputs]\r\n nan_detected[0] = True\r\n break\r\n\r\n x = theano.tensor.vector('x')\r\n mode = theano.compile.MonitorMode(post_func=detect_nan)\r\n #mode = mode.excluding('fusion', 'inplace')\r\n mode = mode.excluding('local_elemwise_fusion',\r\n 'inplace_elemwise_optimizer')\r\n o = theano.tensor.outer(x, x)\r\n out = theano.tensor.log(o) * o\r\n f = theano.function([x], [out],\r\n mode=mode)\r\n\r\n # Test that the fusion wasn't done\r\n assert len(f.maker.fgraph.apply_nodes) == 5\r\n assert not f.maker.fgraph.toposort()[-1].op.destroy_map\r\n f([0, 0]) # log(0) * 0 = -inf * 0 = NaN\r\n\r\n # Test that we still detect the nan\r\n assert nan_detected[0]", "def test_nan_input(self):\n self.cube_uv_down.data.fill(np.nan)\n msg = (\n \"The radiation flux in UV downward contains data \"\n \"that is negative or NaN. Data should be >= 0.\"\n )\n with self.assertRaisesRegex(ValueError, msg):\n calculate_uv_index(self.cube_uv_down)", "def mk_nan_wrap(X):\n if np.count_nonzero(~np.isnan(X)) > 0:\n return mk_theil_sen_1d(X)\n else:\n return (np.nan, np.nan, np.nan)", "def cnan(x):\n if np.isnan(x).sum()>0:\n import pdb\n pdb.set_trace()", "def isNan(x: float) -> bool:\n return x != x", "def testExpectedNaNOpOutputs(self):\n check_numerics_callback.enable_check_numerics()\n\n # Empty input tensor\n x = constant_op.constant(1, dtype=dtypes.float32, shape=[0, 1, 1, 1])\n scale = constant_op.constant([1], dtype=dtypes.float32)\n offset = constant_op.constant([1], dtype=dtypes.float32)\n\n # Calling fused_batch_norm with an empty input should output a NaN in the\n # latter four outputs without triggering the check_numerics callback\n batch_norm_res = gen_nn_ops._fused_batch_norm(\n x=x, scale=scale, offset=offset, mean=[], variance=[])\n\n _, batch_mean, batch_variance, _, _ = self.evaluate(batch_norm_res)\n\n self.assertTrue(np.isnan(batch_mean.squeeze()))\n self.assertTrue(np.isnan(batch_variance.squeeze()))", "def test_ks_test_empty_indices():\n out = compute_indices_ks_test([], 1000, mode=\"D+\")\n assert all(o is None for o in out)", "def get_nan_idx(column, df):\n return df[df[column].isna()].index.values", "def test_nan_check(self):\n values_with_nans = np.array([1, 2, 3, np.nan, np.nan])\n\n with LogCapture(\"puma\") as log:\n _ = hist_w_unc(values_with_nans, bins=4)\n log.check(\n (\n \"puma\",\n \"WARNING\",\n \"Histogram values contain 2 nan values!\",\n )\n )", "def NaN_cleaning(df):\n df = df.replace(np.nan, 'unknown')\n return df.reset_index(drop=True)", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def allowed_positions(self, curr_state):\n return [i for i, val in enumerate(curr_state) if np.isnan(val)]", "def notna(self):\n return DataFrameDefault.register(pandas.DataFrame.notna)(self)", "def check_nan(self):\n # generate array for easier handling\n values = np.swapaxes(self.psf.psf_value, 0, 2)\n fail_count = 0\n\n # loop over energies\n for i, arr in enumerate(values):\n energy_hi = self.psf.energy_hi[i]\n energy_lo = self.psf.energy_lo[i]\n\n # check if bin is outside of safe energy threshold\n if self.psf.energy_thresh_lo > energy_hi:\n continue\n if self.psf.energy_thresh_hi < energy_lo:\n continue\n\n # loop over offsets\n for arr2 in arr:\n\n # loop over deltas\n for v in arr2:\n\n # check for nan\n if math.isnan(v.value):\n # add to fail counter\n fail_count += 1\n break\n\n results = {}\n if fail_count == 0:\n results[\"status\"] = \"ok\"\n else:\n results[\"status\"] = \"failed\"\n results[\"n_failed_bins\"] = fail_count\n\n self.results[\"nan\"] = results", "def na_value() -> pandas.NA:\n return pandas.NA", "def na_value() -> pandas.NA:\n return pandas.NA", "def na_cmp():\n return lambda x, y: bool(pd.isna(x.magnitude)) & bool(pd.isna(y.magnitude))", "def nan(klass):\n return RatTerm(RatNum(1, 0), 0)", "def na_value():\n return pd.NA", "def check_and_interpolate_nans(df):\n nan_count = df.isna().sum().sum()\n if nan_count > 0:\n df.interpolate(method='linear', inplace=True)\n return df", "def _detect_nan_inf(tensor):\n\n if tensor.dtype.is_floating:\n mask = math_ops.reduce_any(\n gen_math_ops.logical_or(\n gen_math_ops.is_nan(tensor), gen_math_ops.is_inf(tensor)))\n output_tensor = cond.cond(\n mask,\n lambda: constant_op.constant([1.0]),\n lambda: constant_op.constant([0.0]))\n else:\n output_tensor = constant_op.constant([0.0])\n return output_tensor", "def assertAllNan(self, a):\n is_nan = np.isnan(self._GetNdArray(a))\n all_true = np.ones_like(is_nan, dtype=np.bool)\n self.assertAllEqual(all_true, is_nan)", "def pettitt_nan_wrap(X):\n if np.count_nonzero(~np.isnan(X)) > 0:\n return pettitt_test(X)\n else:\n return (np.nan, np.nan, np.nan)", "def isfinite(self):\n return not self.isAny( (lambda x: not np.isfinite(x)) )", "def check_for_nans(handle):\n\n # List datasets and groups\n datasets = []\n groups = []\n\n def func(name, obj):\n if isinstance(obj, h5py.Dataset):\n datasets.append(name)\n elif isinstance(obj, h5py.Group):\n groups.append(name)\n handle.visititems(func)\n\n # Visit order is not guaranteed, so sort\n datasets.sort()\n groups.sort()\n\n # Loop over datasets to check for NaN values\n for d in datasets:\n array = np.array(handle[d])\n if array.dtype.kind == 'V': # structured array\n for col in array.dtype.fields:\n n_nan = number_nan(array[col])\n if n_nan > 0:\n warnings.warn(\"{0} NaN value(s) encountered in field {1} of dataset '{2}'\".format(n_nan, col, d), NaNWarning)\n else:\n n_nan = number_nan(array)\n if n_nan > 0:\n warnings.warn(\"{0} NaN value(s) encountered in dataset '{1}'\".format(n_nan, d), NaNWarning)\n\n # Loop over all groups and datasets to check attributes\n for item in ['/'] + datasets + groups:\n\n # Find all attributes\n attributes = list(handle[item].attrs.keys())\n attributes.sort()\n\n for a in attributes:\n n_nan = number_nan(handle[item].attrs[a])\n if n_nan > 0:\n warnings.warn(\"{0} NaN value(s) encountered in attribute '{1}' of object '{2}'\".format(n_nan, a, item), NaNWarning)", "def replace_nans(lst):\r\n return [None if np.isnan(v) else v for v in lst]", "def show_nan(df):\n nan_df = df[(~df['tweet_user_location'].str.lower().isin(\n [x.lower() for x in LOCATION_DISCARD])) & df['geonameid'].isnull()]\n print(f'Number of NaNs: {len(nan_df.index)}')\n return nan_df", "def test_nan_keyword(self):\n # If array has any nan's then the output will return all zeros\n array = self.array1.copy()\n array[0,0] = numpy.nan\n byt = bytscl(array, nan=True)\n total = numpy.sum(byt)\n self.assertTrue(total != 0)", "def check_nan(wseries: pd.Series) -> pd.Series:\n\n if len(wseries[pd.Series([\n (type(val) == str or isnan(val)) for val in wseries\n ], index=wseries.index)]) == 0:\n return wseries # nothing to change\n\n # ensure that all are either float or nan\n def _float_or_nan(ent):\n \"\"\"\n Force values to be either a float or nan first\n \"\"\"\n try:\n return float(ent)\n except ValueError:\n return float('nan')\n\n wseries = pd.Series(\n [_float_or_nan(val) for val in wseries], index=wseries.index,\n name=wseries.name\n )\n\n # continue with interpolation or extrapolation if needed\n inds = where(\n pd.Series([\n (isinstance(val, str) or isnan(val)) for val in wseries\n ], index=wseries.index)\n )[0] # locate the position of the problematic readings\n for ind in inds:\n try:\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind-1],\n wseries.index[ind+1],\n wseries[ind-1], wseries[ind+1]\n )\n if isnan(wseries[ind]): # interpolation does not work\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind-2],\n wseries.index[ind-1],\n wseries[ind-2], wseries[ind-1]\n )\n except IndexError: # extrapolation\n try:\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind-2],\n wseries.index[ind-1],\n wseries[ind-2], wseries[ind-1]\n )\n except IndexError:\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind+2],\n wseries.index[ind+1],\n wseries[ind+2], wseries[ind+1]\n )\n return wseries\n\n return wseries", "def nonull(val):\n return val if not pd.isnull(val) else None", "def checkNaN(data):\n if data.isnull().values.any():\n N = data.isnull().sum().sum()\n print(\"There are {} missing values.\".format(N))", "def handle_missing_values(dataset, missing_values_header, missing_label):\n\n return dataset[dataset[missing_values_header] != missing_label]", "def replace_nan(data):\r\n lst_ind = np.array(['valence_intensity', 'anger_intensity',\r\n 'fear_intensity', 'sadness_intensity', 'joy_intensity'])\r\n for i in lst_ind:\r\n native = data[:][i]\r\n avg = np.nanmean(native)\r\n data[:][i] = np.where(np.isnan(native), avg, native)\r\n return data", "def test_canonicalize_nan(self):\r\n sio = StringIO()\r\n handler = logging.StreamHandler(sio)\r\n handler.setLevel(logging.ERROR)\r\n logging.getLogger('theano.gof.opt').addHandler(handler)\r\n try:\r\n x = vector()\r\n f = theano.function([x], x + numpy.nan)\r\n finally:\r\n logging.getLogger('theano.gof.opt').removeHandler(handler)\r\n # Ideally this test would only catch the maxed out equilibrium\r\n # optimizer error message, but to be safe in case this message\r\n # is modified in the future, we assert that there is no error\r\n # at all.\r\n assert not sio.getvalue()", "def is_empty(series):\n return series.isna().all()", "def torch_isnotfinite(x):\n not_inf = ((x + 1) != x)\n not_nan = (x == x)\n return 1 - (not_inf & not_nan)", "def handel_missing_values(dataset, missing_values_header, missing_label):\n \n return dataset[dataset[missing_values_header] != missing_label]", "def _erosion(image, label, struct_elem):\n if struct_elem is not None:\n return binary_erosion(image == label, struct_elem).astype(np.uint16)\n return (image == label).astype(np.uint16)" ]
[ "0.67927897", "0.67377245", "0.67024386", "0.6659169", "0.6472418", "0.6472418", "0.6472418", "0.6458507", "0.64417696", "0.6441317", "0.6435194", "0.643293", "0.63825434", "0.63670725", "0.63670725", "0.6343384", "0.6341767", "0.6341767", "0.62725896", "0.6230789", "0.6221217", "0.62172484", "0.61674607", "0.61597705", "0.6134431", "0.61165106", "0.6115247", "0.6078133", "0.6056836", "0.5979782", "0.5913242", "0.5903061", "0.58867127", "0.5864012", "0.5861267", "0.5853166", "0.58423615", "0.58387357", "0.5829033", "0.582785", "0.58237374", "0.58143985", "0.5806964", "0.580519", "0.5796099", "0.57960147", "0.5781001", "0.5766401", "0.5756229", "0.5752492", "0.5748814", "0.574493", "0.5740937", "0.5740937", "0.5728439", "0.5709692", "0.5679792", "0.5672775", "0.567141", "0.5664514", "0.5646356", "0.5643399", "0.56355965", "0.5610525", "0.55832595", "0.55715483", "0.556171", "0.5547155", "0.5547129", "0.55429196", "0.5537267", "0.5531571", "0.5531571", "0.5531571", "0.5518959", "0.55114156", "0.5510574", "0.5510574", "0.5497455", "0.5494754", "0.54941434", "0.548724", "0.54868793", "0.54854214", "0.5476003", "0.54644823", "0.5458604", "0.5455999", "0.5451369", "0.54512817", "0.5450493", "0.544914", "0.5444868", "0.5425943", "0.5418462", "0.54138076", "0.53991115", "0.5396949", "0.53918225", "0.53726625" ]
0.6226348
20
Return True if the node is a "real" endpoint of an edge in the network, \ otherwise False. OSM data includes lots of nodes that exist only as \ points to help streets bend around curves. An end point is a node that \
def is_endpoint(G: nx.Graph, node: int, strict=True): neighbors = set(list(G.predecessors(node)) + list(G.successors(node))) n = len(neighbors) d = G.degree(node) if node in neighbors: # If the node appears in its list of neighbors, it self-loops. this is # always an endpoint. return True # If node has no incoming edges or no outgoing edges, it must be an # endpoint elif G.out_degree(node) == 0 or G.in_degree(node) == 0: return True elif not (n == 2 and (d == 2 or d == 4)): # Else, if it does NOT have 2 neighbors AND either 2 or 4 directed # edges, it is an endpoint. either it has 1 or 3+ neighbors, in which # case it is a dead-end or an intersection of multiple streets or has # 2 neighbors but 3 degree (indicating a change from oneway to twoway) # or more than 4 degree (indicating a parallel edge) and thus is an # endpoint return True elif not strict: # Non-strict mode osmids = [] # Add all the edge OSM IDs for incoming edges for u in G.predecessors(node): for key in G[u][node]: osmids.append(G.edges[u, node, key]['osmid']) # Add all the edge OSM IDs for outgoing edges for v in G.successors(node): for key in G[node][v]: osmids.append(G.edges[node, v, key]['osmid']) # If there is more than 1 OSM ID in the list of edge OSM IDs then it is # an endpoint, if not, it isn't return len(set(osmids)) > 1 else: # If none of the preceding rules returned true, then it is not an # endpoint return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def node_is_edge(self, node: MazeCell) -> bool:\n return node.x == 0 or node.x == self._ncols - 1 or node.y == 0 or node.y == self._nrows - 1", "def door_in_edge(self, edge: list) -> bool:\n doors = self.get_interior_doors()\n room1 = self.get_rooms()[edge[0]]\n room2 = self.get_rooms()[edge[1]]\n for i in range(len(doors)):\n if utils.door_room_relation(doors[i], room1) and utils.door_room_relation(doors[i], room2):\n return True\n return False", "def is_edge(self):\n if self._row == 0 or self._row == 9 or self._column == 0 or self._column == 9:\n # check that the edge is not actually a corner square\n if not self.is_corner():\n # If not a corner and in a border row return True\n return True\n\n return False", "def isEdge(self,x,y):\n\t\treturn y in self._dictOut[x]", "def isEdge(self, x, y):\n if y in self.parseX() or x in self.parseX():\n return y in self.dictOut[x]\n else :\n print(\"verteces not found\")", "def _point_faces_edge(self, edge, point):\n a = sqrt((edge[0][0] - edge[1][0]) ** 2 + (edge[0][1] - edge[1][1]) ** 2)\n b = sqrt((edge[0][0] - point[0]) ** 2 + (edge[0][1] - point[1]) ** 2)\n c = sqrt((edge[1][0] - point[0]) ** 2 + (edge[1][1] - point[1]) ** 2)\n ang1, ang2 = self._angle(b, a, c), self._angle(c, a, b)\n if ang1 > pi / 2 or ang2 > pi / 2:\n return False\n return True", "def isEdge(self, x, y):\n return y in self._dictOut[x]", "def has_edge(self, otherNode):\n\t\t\treturn otherNode in self.edges", "def is_end_node():\n return False", "def contains_edge(self, node, other_node):\n return \\\n {node.get_name(), other_node.get_name()} in \\\n list([\n {edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()}\n for edge in self.get_edges()\n ]) # return true if there exists an edge between the input nodes and false otherwise", "def is_edge_server() -> bool:\n return Config().args.port is not None", "def is_self_referential(self, edge):\n # Determine if edge is directed or not to choose the proper splitting character\n split_str = gt.determine_split_string(edge)\n\n # split the edge\n edge_split = edge.split(split_str)\n\n return edge_split[0] == edge_split[-1] and (edge_split[0] == self.start_kind or\n edge_split[0] == self.end_kind)", "def isEdge(self,x,y):\r\n return self.matr[x][y]", "def endpoints(image):\n return _neighbors_conv(image) == 1", "def isEdge(self,x,y):\n\t\treturn self._matr[x][y]", "def IsEulerGraph(self):\n\n for node in self.nodes:\n if ((len(node.neighbours) % 2) == 1) or (len(node.neighbours) == 0):\n return False\n return True", "def isEdge(self,x,y):\n\t\treturn y in self._dict[x]", "def is_boundary_edge(a, b, bdy_edges):\n for edge in bdy_edges:\n a0, b0 = edge\n if a == a0 and b == b0:\n return True\n return False", "def hasEdge(self, startNodeId, endNodeId):\r\n for edge in self.nodes[startNodeId].adjList:\r\n if edge.endNodeId == endNodeId:\r\n return edge\r\n return None", "def has_edges(self):\n\n return len(self._edges) > 0", "def is_endpoint(color):\n\n img = cv2.cvtColor(color, cv2.COLOR_RGB2BGR)\n blur = cv2.GaussianBlur(img,(5,5),0)\n\n lower_range = np.array([175, 175, 175], dtype=np.uint8)\n upper_range = np.array([255, 255, 255], dtype=np.uint8)\n\n mask = cv2.inRange(blur, lower_range, upper_range)\n res = cv2.bitwise_and(img,img, mask= mask)\n\n bilateral_filtered_image = cv2.bilateralFilter(res, 5, 175, 175)\n edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200)\n\n _, contours, _= cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contour_list = []\n for contour in contours:\n \tapprox = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)\n \tarea = cv2.contourArea(contour)\n \tif ((len(approx) > 8) & (area > 10000) & (area < 30000)):\n \t\tcontour_list.append(contour)\n\n if not len(contour_list)==0:\n \treturn True\n else:\n \treturn False", "def is_edge_site(self) -> bool:\n return self.config.edge", "def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy]))", "def edge_ground(X):\n gradient_x = img_conv(X, kernel_sobel_x)\n gradient_y = img_conv(X, kernel_sobel_x.transpose())\n mag = (gradient_x ** 2.0 + gradient_y ** 2.0) ** 0.5\n is_edge = mag > 1.0\n return is_edge.astype('f')", "def edge_not_in_component(edge, component):\n x_start = edge[0]\n x_stop = edge[0] + edge[2]\n y_start = edge[1]\n y_stop = edge[1] + edge[3]\n if x_start >= component[1].start and x_stop <= component[1].stop and y_start >= component[0].start and y_stop <= \\\n component[0].stop:\n return False\n else:\n return True", "def will_hit_edge(self, direction):\n return ((self.position <= 0 and direction.is_left()) or \n (self.position >= self.scene.step_count - 1 and \n direction.is_right()))", "def is_finite(self) -> bool:\n normal = self.to_normal_form()\n di_graph = nx.DiGraph()\n for production in normal.productions:\n body = production.body\n if len(body) == 2:\n di_graph.add_edge(production.head, body[0])\n di_graph.add_edge(production.head, body[1])\n try:\n nx.find_cycle(di_graph, orientation=\"original\")\n except nx.exception.NetworkXNoCycle:\n return True\n return False", "def IsWire(self, *args):\n return _BRepAlgo.BRepAlgo_EdgeConnector_IsWire(self, *args)", "def if_edge_state(self, s):\n if (s[0] == 0) or (s[0] == self.ni - 1) or (s[1] == 0) or (s[1] == self.nj - 1):\n return True\n else:\n return False", "def _can_access_endpoint(self, endpoint):\n if endpoint.visa_required:\n return self._has_valid_visa()\n else:\n return True", "def is_compatible(self, e2):\n\n return (self.type == TypeEdge.HOLE and e2.type == TypeEdge.HEAD) or (self.type == TypeEdge.HEAD and e2.type == TypeEdge.HOLE) \\\n or self.type == TypeEdge.UNDEFINED or e2.type == TypeEdge.UNDEFINED", "def has_neighbour(self, node):\n if node in self.neighbours:\n return True\n return False", "def has_end_effector_link(self):\n return len(self._g.get_end_effector_link()) > 0", "def containsEdge(self, e):\n return any(e.nvt in [self.vertices[i-2], self.vertices[i]] and self.vertices[i-1] == e.pvt for i in range(len(self.vertices)))", "def has_edge(self, v1, v2):\n\n return v1 in self.get_reachables(v2[0], v2[1])", "def hasEdge(self, fromnode, tonode):\n fromnode = self.findNode(fromnode)\n tonode = self.findNode(tonode)\n\n return self.graph.edge_by_node(fromnode, tonode) is not None", "def IsEdge(self, p_int, p_int_1):\n ...", "def has_hit_edge(self):\n return self.will_hit_edge(self._direction)", "def is_indirect_deviation_edge(self, edge):\n def predicate(x):\n a, b = self.edge_sequence(edge)\n if a != b:\n return True\n\n return self._is_deviation_edge_predicate(edge, predicate)", "def is_edge(self, v, w):\n return self.op_norm(v[0], w[0]) == (v[1] + w[1]) and (self.variant.is_bipartite() or v != w)", "def has_edge(\n self, subject_node: str, object_node: str, edge_key: Optional[str] = None\n ) -> bool:\n return self.graph.has_edge(subject_node, object_node, key=edge_key)", "def is_collinear(self, directed_edge):\n\n return self.orientation(directed_edge.begin) == 0 and self.orientation(directed_edge.end) == 0", "def has_edge(self, i: Node, j: Node) -> bool:\n return frozenset({i, j}) in self._edges", "def isDecendentOf(self, node):\n if (self in node.children()):\n return True\n elif (not node.isSink()):\n return reduce(lambda x,y: x or y, [self.isDecendentOf(x) for x in node.children()])\n else:\n return False", "def is_directed(self) -> bool:\n return True", "def isConnectedTo(self, node):\n for arc in self._arcsFrom:\n if arc.getFinish() is node:\n return True\n return False", "def test_edges(self):\n\n edge_list = self.g.edges()\n self.assertEqual(42, len(edge_list))\n\n # p1 p3 and p3 p1 are valid edges\n t1 = ('p1', 'p3')\n self.assertTrue(t1 in edge_list)\n\n t2 = ('p3', 'p1')\n self.assertTrue(t2 in edge_list)\n\n made_up = ('z1', 'q123')\n self.assertFalse(made_up in edge_list)\n\n return None", "def edges_is_closed_curve(edges):\n e_prev = first = edges[0]\n for e in edges[1:]:\n if e_prev[1] != e[0]:\n if e_prev[1] == first[0]:\n # new loop\n first = e\n else:\n return False\n e_prev = e\n if e_prev[1] != first[0]:\n return False\n return True", "def near_segment(point:tuple, edge:tuple)->bool:\n return between(point[0], edge[0][0], edge[1][0]) and between(point[1], edge[0][1], edge[1][1])", "def is_connected(self, start: Union[str, int], end: Union[str, int]) -> bool:\n\t\tif start not in self.vertices or end not in self.vertices:\n\t\t\traise GraphError(\"Start or end not found in graph\")\n\n\t\tvisited = dict(zip(self.vertices, [False] * len(self.vertices)))\n\t\tqueue = []\n\t\tqueue.append(start)\n\t\tvisited[start] = True\n\n\t\twhile queue:\n\t\t\tcur = queue.pop(0)\n\n\t\t\tif cur == end:\n\t\t\t\treturn True\n\n\t\t\tneighbours = self.get_neighbour_vertices(cur)\n\t\t\tfor neighbour in neighbours:\n\t\t\t\tif not visited[neighbour]:\n\t\t\t\t\tqueue.append(neighbour)\n\t\t\t\t\tvisited[neighbour] = True\n\n\t\treturn False", "def is_on_curve(self):\n if self.infinity:\n return True\n left = self.y * self.y\n right = self.x * self.x * self.x + self.ec.a * self.x + self.ec.b\n\n return left == right", "def __edgeRouter(self):\r\n def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n \"\"\" Gets the nearest arrow endpoint. Handles edge reversal \"\"\"\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy])) \r\n \r\n \r\n \r\n #todo: improve method for spline arrows + add comments + optimize?\r\n print '----------------Dummy Edge Routing-----------------'\r\n for dummyEdge in NodeWrapper.ID2LayerEdgeDict.keys():\r\n \r\n dummyList = NodeWrapper.ID2LayerEdgeDict[dummyEdge]\r\n dummyNode = dummyList[0]\r\n dummyChild = dummyNode.children.keys()[0]\r\n linkFlagList = dummyNode.children[dummyChild]\r\n \r\n # Real nodes at start/end of the edge\r\n edgeSourceNode = dummyNode.parents.keys()[0]\r\n edgeSourceNode = edgeSourceNode.getASGNode().graphObject_\r\n dummyNode = dummyList[-1]\r\n edgeTargetNode = dummyNode.children.keys()[0]\r\n #print 'Dummy edge number', dummyEdge,\r\n #print dummyList[0].parents.keys()[0].getName(), edgeTargetNode.getName()\r\n edgeTargetNode = edgeTargetNode.getASGNode().graphObject_\r\n nodeTuple = [edgeSourceNode, edgeTargetNode, None]\r\n \r\n # Some edges are internally reversed to break cycles, when drawing\r\n # this must be taken into account\r\n isReversedEdge = False\r\n edgesToRoute = []\r\n for linkNode, isReversed in linkFlagList:\r\n edgesToRoute.append(linkNode)\r\n if(isReversed):\r\n isReversedEdge = True\r\n \r\n # Get all the points the edge must pass through (sorted by layer order)\r\n dummyList.sort(lambda a, b: cmp(a.getLayer(), b.getLayer()))\r\n if(isReversedEdge):\r\n dummyList.reverse()\r\n sortedDummyRouteList = []\r\n for node in dummyList:\r\n sortedDummyRouteList += node.getEdgePosition()\r\n \r\n # Set the coordinates of the edge directly \r\n # This is complicated by the fact that AToM3 treats edges as two\r\n # segments that join poorly (for spline arrows)\r\n for edgeObject in edgesToRoute: \r\n dc = edgeObject.graphObject_.dc\r\n linkObj = edgeObject.graphObject_ \r\n tag = linkObj.tag\r\n \r\n if(isReversedEdge):\r\n inPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n else:\r\n inPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n \r\n #print 'Dummy route', sortedDummyRouteList\r\n numPoints = len(sortedDummyRouteList) / 2\r\n # Add 2 extra control points for odd case (to make splines nice)\r\n if(numPoints % 2 == 1):\r\n if(numPoints == 1):\r\n center = sortedDummyRouteList\r\n else:\r\n start = sortedDummyRouteList[:numPoints - 1]\r\n end = sortedDummyRouteList[numPoints + 1:]\r\n center = sortedDummyRouteList[numPoints - 1:numPoints + 1]\r\n \r\n if(not isReversedEdge):\r\n newMid1 = [center[0], center[1] - 20]\r\n newMid2 = [center[0], center[1] + 20]\r\n else:\r\n newMid2 = [center[0], center[1] - 20]\r\n newMid1 = [center[0], center[1] + 20]\r\n \r\n \r\n if(numPoints == 1):\r\n sortedDummyRouteList = newMid1 + center + newMid2 \r\n else:\r\n sortedDummyRouteList = start + newMid1 + center + newMid2 + end\r\n centerIndex = numPoints - 1 + 2\r\n \r\n # Add 1 extra control point for even case (to make splines nice)\r\n else:\r\n start = sortedDummyRouteList[:numPoints]\r\n end = sortedDummyRouteList[numPoints:]\r\n center = [start[-2] + (end[0] - start[-2]) / 2, \r\n start[-1] + (end[1] - start[-1]) / 2]\r\n sortedDummyRouteList = start + center + end \r\n centerIndex = numPoints\r\n \r\n # Now I know where the center is... so lets move the center object\r\n # Is the edge object a hyperlink?\r\n if(len(edgeObject.in_connections_ + edgeObject.out_connections_) > 2):\r\n fromObjs = []\r\n for semObj in edgeObject.in_connections_:\r\n fromObjs.append(semObj.graphObject_)\r\n toObjs = []\r\n for semObj in edgeObject.out_connections_:\r\n toObjs.append(semObj.graphObject_)\r\n optimizerHyperLink(dc, linkObj, fromObjs, toObjs, 0, 0, 0, center )\r\n continue\r\n \r\n else:\r\n linkObj.moveTo(* center)\r\n \r\n # Go through the 2 segments in the link\r\n nodeTuple[2] = edgeObject\r\n for connTuple in linkObj.connections:\r\n itemHandler = connTuple[0]\r\n direction = connTuple[1]\r\n \r\n if( direction ): \r\n inPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'start', isReversedEdge)\r\n\r\n segCoords = inPoint + sortedDummyRouteList[:centerIndex+2]\r\n else: \r\n outPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'end', isReversedEdge) \r\n segCoords = sortedDummyRouteList[centerIndex:] + outPoint\r\n segCoords = self.__reverseCoordList(segCoords)\r\n \r\n # Applies the changed coords to the canvas\r\n dc.coords( * [itemHandler] + segCoords ) \r\n \r\n # This may change the associated link drawings: \r\n # move them to the new point \r\n if( direction ):\r\n linkObj.updateDrawingsTo(inPoint[0], inPoint[1], itemHandler, \r\n segmentNumber=1)\r\n else:\r\n linkObj.updateDrawingsTo(outPoint[0], outPoint[1], itemHandler, \r\n segmentNumber=2)", "def edge_between_neighbors(cell_a, cell_b):\n edge = np.logical_and(dilate_simple(cell_a), dilate_simple(cell_b))\n return edge", "def has_2D(self):\n\t\tif self.have_fastas is False:\n\t\t\tself._extract_fastas_from_fast5()\n\t\t\tself.have_fastas = True\n\n\t\tif self.fastas.get('twodirections') is not None:\n\t\t\treturn True\n\t\treturn False", "def test_find_end_nodes_of_triangulation(self):\n\n these_end_node_vertex_indices = (\n skeleton_lines._find_end_nodes_of_triangulation(\n triangle_to_vertex_matrix=TRIANGLE_TO_VERTEX_MATRIX,\n new_edge_table=NEW_EDGE_TABLE))\n\n self.assertTrue(numpy.array_equal(\n these_end_node_vertex_indices, END_NODE_VERTEX_INDICES))", "def contains(self, possible_point):\n# if possible_point == self.endpoints[0] or possible_point == self.endpoints[1]:\n# return False\n distance = sum(possible_point.distance_to(p) for p in self.endpoints)\n return abs(distance - self.length()) < 0.0000001", "def contains_edge(self, source: n, destination: n) -> bool:\n if not contains_vertex(source):\n return False\n if not contains_vertex(destination):\n return False\n return destination in self._graph[source].get_connections()", "def _has_endpoint(self, endpoint):\n return self.endpoints.filter(pk=endpoint.pk).exists()", "def is_closed(self):\n # In Open Cascade, unlinked (open) edges can be identified\n # as they appear in the edges iterator when ignore_orientation=False\n # but are not present in any wire\n ordered_edges = set()\n for wire in self.wires():\n for edge in wire.ordered_edges():\n ordered_edges.add(edge.topods_shape())\n unordered_edges = set([edge.topods_shape() for edge in self.edges()])\n missing_edges = unordered_edges - ordered_edges\n return len(missing_edges) == 0", "def _is_deviation_edge_predicate(self, edge, predicate):\n if not self.is_deviation_edge(edge):\n return False\n if predicate(edge):\n return True\n return False", "def is_directed(self):\n return True", "def need_neighbor(self):\n return self._need_neighbor", "def valid_endpoint(cls):\n\t\treturn cls.__subclasses__() == []", "def test_adjacent_none(graph_with_edges):\n assert graph_with_edges.adjacent('B', 'A') is False", "def is_active_node(start, end, node):\n return (start <= node.get(\"from\", 0) <= end) or (\n node.get(\"from\", 0) <= start <= node.get(\"to\", float(\"inf\"))\n )", "def check_endnode(data, depth, max_depth=3, min_samples=15):\n\n # check for endnode, return a prediction\n if depth >= max_depth or len(data.index) < min_samples:\n end_node = True\n return(end_node)\n\n # if not endnode, get split\n else:\n end_node = False\n return(end_node)", "def is_adjacent(self, remote_host_name):\n # Check if a topology is defined, otherwise use fully connected\n if self.topology is None:\n return True\n\n if self.name in self.topology:\n if remote_host_name in self.topology[self.name]:\n return True\n else:\n return False\n else:\n logging.warning(\n \"Node {} is not in the specified topology and is therefore \"\n \"assumed to have no neighbors\".format(self.name)\n )\n return False", "def has_neighbor(self):\n if self.cur_neighbor is None:\n return False\n if self.cur_neighbor['app_feat'] is None:\n return False\n return True", "def check_edges(self):\n if self.rect.right >= self.screen_rect.right or self.rect.left <= 0:\n return True", "def is_node_onscreen(self, node, screen_edges):\n real_node = self.original_graph.get_node_by_serial(node.serial)\n node_x = real_node.x\n node_y = real_node.y\n node_r = node.get_radius() * 0.05\n return (node_x + node_r) > screen_edges[\"bottom_left\"].get_x() and \\\n (node_x - node_r) < screen_edges[\"top_right\"].get_x() and \\\n (node_y + node_r) > screen_edges[\"bottom_left\"].get_y() and \\\n (node_y - node_r) < screen_edges[\"top_right\"].get_y()", "def is_edge(location, hi, lo, bgArray):\n offsets = [\n (0,1),\n (1,0),\n (0,-1),\n (-1,0)\n ]\n # Check that location is within threshold first\n try:\n b = fetch_val(bgArray, location)\n except IndexError:\n return False\n if b < lo or b > hi:\n return False\n \n # Check if its neighbors are outside the threshold\n for offset in offsets:\n tmp = (location[0] + offset[0], location[1] + offset[1])\n try:\n b = fetch_val(bgArray, tmp)\n except IndexError:\n return True\n if b < lo or b > hi:\n return True\n return False", "def check_end(self):\n return [self.x, self.y] == self.end_pos", "def is_edge(graph, u, v):\n return graph.matrix[u][v]", "def data_plane_public_endpoint(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"data_plane_public_endpoint\")", "def is_directed(self):\n return self._incoming is not self._outgoing\n # directed if maps are distinct", "def is_adjacent(self, startVertex: np.int_ , endVertex: np.int_):\n return self.__mat[startVertex][endVertex]>0", "def are_connected(self, node1, node2):\n return bool( self.get_edge(node1, node2) )", "def is_direct_deviation_edge(self, edge):\n def predicate(x):\n a, b = self.edge_sequence(edge)\n if a == b:\n return True\n\n return self._is_deviation_edge_predicate(edge, predicate)", "def in_node(self, coord):\n for axis in range(3):\n if coord[axis] < self.mins[axis] or coord[axis] > self.maxs[axis]:\n return False\n\n return True", "def test_false_if_no_node(graph_no_edges):\n false_nodes = ['land submarine', 'Portland Timbers', 'tug cable scope', 100]\n for node in false_nodes:\n assert graph_no_edges.has_node(node) is False", "def edge_intersects_edges(e1, nodes, edges):\n for i in range(len(edges)):\n e2 = edges[i]\n if e1[1] == e2[0] or e1[0] == e2[1]:\n continue\n if two_edges_intersect(nodes, e1, e2):\n return True\n return False", "def isPointOnLine(node1, node2, point):\n m, b, d = geometry.lineSpec(node1, node2)\n if d == -1: # if two nodes are the same\n if node1 == point:\n return True\n else:\n return False\n else:\n if m == True: # parallel to y axis\n if point[0] == b and \\\n (((node1[1] <= point[1]) and (point[1] <= node2[1])) or\\\n ((node2[1] <= point[1]) and (point[1] <= node1[1]))):\n return True\n else:\n return False\n \n elif m == False:\n if point[1] == b and \\\n (((node1[0] <= point[0]) and (point[0] <= node2[0])) or\\\n ((node2[0] <= point[0]) and (point[0] <= node1[0]))):\n return True\n else:\n return False\n \n else:\n if(abs(point[1] - (m*point[0] + b)) < 0.05) and \\\n (((node1[0] <= point[0]) and (point[0] <= node2[0])) or\\\n ((node2[0] <= point[0]) and (point[0] <= node1[0]))) and\\\n (((node1[1] <= point[1]) and (point[1] <= node2[1])) or\\\n ((node2[1] <= point[1]) and (point[1] <= node1[1]))):\n return True\n else:\n return False", "def is_event(g, node):\n if node not in g.nodes():\n print('Not a node in the graph')\n return False\n else:\n if g.node[node]['type'] == 'event':\n return True\n else:\n return False", "def __has_multiple_edges(self):\n return \\\n len(\n list(\n [\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n ] # the length of the list which allows duplicates...\n )\n ) != \\\n len(\n set(\n {\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n } # ...should equal the length of the set that does not allow duplicates\n )\n ) # return True if the two data structures are equal in size and False otherwise", "def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right or self.rect.left <= 0:\n return True", "def edge(self, which='inner'):\n def edge_shape(site):\n def in_shape(x): return self.shape(Site(site.family, site.tag + x))\n sites = [in_shape(x) for x in self._directions]\n if which == 'inner':\n return self.shape(site) and not all(sites)\n elif which == 'outer':\n return not self.shape(site) and any(sites)\n return Shape(edge_shape)", "def containsEdge(self, v1, v2):\n for e in self.edges:\n if (e.pvt, e.nvt) in [(v1, v2), (v2, v1)]:\n return True\n return False", "def is_deviation_edge(self, edge):\n if self.edge_attribute(edge, \"type\") == \"deviation\":\n return True\n return False", "def _is_bottom_edge(self, ndx):\n if len(self._dims) == 1:\n return True\n return (ndx % self._dims[1]) == self._dims[1]-1", "def IsDone(self, *args):\n return _BRepAlgo.BRepAlgo_EdgeConnector_IsDone(self, *args)", "def is_connected(self):\n if self.V < 1:\n raise ValueError(\"empty graph\")\n if self.V < 2:\n return True\n if self.E == 0:\n return False\n cc = self.cc()\n return int(cc.max() == 0)", "def has_endpoint(self, endpoint_number: int, direction: USBDirection) -> USBEndpoint:\n return (self.get_endpoint(endpoint_number, direction) is not None)", "def checkDirection(neighbour, current_point, end):\n\n for i in range(3):\n delta = abs(end[i] - current_point[i])\n if abs(end[i] - neighbour[i]) < delta and delta >= 0:\n return True, i\n\n return False, None", "def check_edges(self):\n start = int(input('Enter start vertex: '))\n end = int(input('Enter end vertex: '))\n if self._graph.is_edge_between(start, end):\n print('There is an edge from ' + str(start) + ' to ' + str(end))\n else:\n print('There is NO edge from ' + str(start) + ' to ' + str(end))", "def is_directed(self):\n return self.G.is_directed()", "def is_edge_in_graph(self, _from, _to):\r\n if not self.is_vertex_in_graph(_from):\r\n raise GraphException(f\"The vertex {_from} does not exist in the graph.\")\r\n if not self.is_vertex_in_graph(_to):\r\n raise GraphException(f\"The vertex {_to} does not exist in the graph.\")\r\n return _from in self.__neighbours[_to] and _to in self.__neighbours[_from]", "def graph_has_self_loops(edges):\n\n for a, b in edges:\n if a == b:\n return True\n\n return False", "def is_intersecting(self, directed_edge):\n begin_orientation = self.orientation(directed_edge.begin)\n end_orientation = self.orientation(directed_edge.end)\n \n if(begin_orientation == 0 or end_orientation == 0):\n return self.contains_point(directed_edge.begin) or self.contains_point(directed_edge.end) or directed_edge.contains_point(self.begin) or directed_edge.contains_point(self.end)\n\n directed_begin_orientation = directed_edge.orientation(self.begin)\n directed_end_orientation = directed_edge.orientation(self.end)\n\n return begin_orientation != end_orientation and directed_begin_orientation != directed_end_orientation", "def canReachDFS(start, end):\n visited = set()\n visited.add(start)\n __canReachDFS(start, visited)\n # a path exists if the end node was visited, otherwise the graph is\n # disconnected and no path exists from start to end\n return end in visited", "def is_final_node_reached(self):\n if self.actual_node == self.final_node:\n self.final_node_reached = True" ]
[ "0.6984057", "0.62434214", "0.62212586", "0.61637247", "0.6161208", "0.6138825", "0.61363894", "0.6098323", "0.60917765", "0.6071788", "0.60438186", "0.6020851", "0.5994252", "0.5977758", "0.59772736", "0.59747833", "0.5952888", "0.5948707", "0.5904112", "0.58738995", "0.58695596", "0.5862068", "0.5843104", "0.5826069", "0.5801944", "0.5791275", "0.57862353", "0.57849574", "0.57780087", "0.5773884", "0.5755143", "0.57523465", "0.57521206", "0.5750217", "0.5740432", "0.5693186", "0.56782705", "0.56706923", "0.5667205", "0.56621885", "0.5636554", "0.5632476", "0.56243217", "0.56216586", "0.5612183", "0.5588762", "0.5582339", "0.55686444", "0.5500109", "0.54963475", "0.54929245", "0.5485114", "0.5481701", "0.54816437", "0.5481277", "0.54668856", "0.54528683", "0.5444227", "0.54396814", "0.543069", "0.54232436", "0.5419401", "0.54109424", "0.5402239", "0.538892", "0.5386752", "0.5383907", "0.5371716", "0.5357088", "0.5353645", "0.5348445", "0.53366935", "0.5330339", "0.53302425", "0.53197575", "0.5318971", "0.53049535", "0.53043425", "0.5289455", "0.5289", "0.5286962", "0.5286616", "0.5286361", "0.5280923", "0.5280094", "0.5278907", "0.5278623", "0.5274216", "0.52730215", "0.5270478", "0.5265078", "0.5254949", "0.5253023", "0.52424705", "0.5241374", "0.5233947", "0.5232657", "0.522627", "0.52176166", "0.52094424" ]
0.77966064
0
Recursively build a path of nodes until you hit an endpoint node. Please note this method is taken directly from OSMnx, and can be found in \
def build_path( G: nx.Graph, node: int, endpoints: List[int], path: List[int]) -> List[int]: # For each successor in the passed-in node for successor in G.successors(node): if successor not in path: # If successor is already in path, ignore it, otherwise add to path path.append(successor) if successor not in endpoints: # If successor not endpoint, recursively call # build_path until endpoint found path = build_path(G, successor, endpoints, path) else: # If successor is endpoint, path is completed, so return return path if (path[-1] not in endpoints) and (path[0] in G.successors(path[-1])): # If end of the path is not actually an endpoint and the path's # first node is a successor of the path's final node, then this is # actually a self loop, so add path's first node to end of path to # close it path.append(path[0]) return path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_path(self,first_node,last_node):\n edge_pattern=re.compile('edge_(?P<begin_node>\\w+)_(?P<end_node>\\w+)_(?P<iterator>\\w+)')\n exit_paths=self.get_exiting_edges(first_node)\n next_nodes=self.get_exiting_nodes(first_node)\n #be careful here using the wrong assignment statement breaks this function\n possible_paths=[]\n for exit_path in exit_paths:\n possible_paths.append([exit_path])\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))\n for i in range(len(self.node_names)):\n for index,path in enumerate(possible_paths):\n last_edge=path[-1]\n match=re.match(edge_pattern,last_edge)\n begin_node=match.groupdict()['begin_node']\n end_node=match.groupdict()['end_node']\n #print next_node\n if end_node==last_node:\n #print(\"The path found is {0}\".format(path))\n return path\n next_possible_paths=[]\n next_edges=self.get_exiting_edges(end_node)\n next_nodes=self.get_exiting_nodes(end_node)\n #print(\"{0} is {1}\".format('next_edges',next_edges))\n for index,next_edge in enumerate(next_edges):\n #be careful here using the wrong assignment statement breaks this function\n #next_path=path is a deal breaker!!\n next_path=[]\n for edge in path:\n next_path.append(edge)\n #print(\"{0} is {1}\".format('next_path',next_path))\n #print(\"{0} is {1}\".format('next_edge',next_edge))\n #next_node=next_nodes[index]\n #print next_node\n next_match=re.match(edge_pattern,next_edge)\n next_node=next_match.groupdict()[\"end_node\"]\n begin_node_next_edge=next_match.groupdict()[\"begin_node\"]\n #print(\"{0} is {1}\".format('next_node',next_node))\n #print(\"{0} is {1}\".format('begin_node_next_edge',begin_node_next_edge))\n\n if next_node==last_node and begin_node_next_edge==end_node:\n next_path.append(next_edge)\n #print(\"The path found is {0}\".format(next_path))\n return next_path\n elif begin_node_next_edge==end_node:\n next_path.append(next_edge)\n next_possible_paths.append(next_path)\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n else:\n pass\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n possible_paths=next_possible_paths\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))", "def find_paths(self, start_key, target_key):\n\n stack = [(start_key, [start_key])]\n while stack:\n node_key, path = stack.pop()\n node = self.nodes[node_key]\n for nxt in node.neighbors - set(path):\n if nxt == target_key:\n yield path + [nxt]\n else:\n stack.append((nxt, path + [nxt]))", "def find_path(self, start):\n path = []\n leaf = start\n seen_nodes = []\n while True:\n if self.nodes[leaf]['address'] == '':\n return path\n\n left = leaf if self.nodes[leaf][\n 'left'] else self.nodes[leaf]['sibling']\n right = leaf if not self.nodes[leaf][\n 'left'] else self.nodes[leaf]['sibling']\n next_hash = do_hash(left + right, self.algo)\n leaf = self.nodes[leaf]['parent']\n assert leaf == next_hash\n assert next_hash not in seen_nodes\n assert next_hash in self.nodes\n step = [left, right, next_hash]\n path.append(step)", "def search_path(self):\n\n nodes = [self.start]\n final_node = None\n \n count = 0\n while True:\n count += 1\n\n if count % self.pick_target == 0:\n pick = self.goal.pos[:2]\n else:\n pick = self.car.random_pos()[:2]\n \n nearest = self.get_nearest_node(nodes, pick)\n\n if count % self.check_dubins == 0:\n solutions = self.dubins.find_tangents(nearest.pos, self.goal.pos)\n dubins_route, cost, valid = self.dubins.best_tangent(solutions)\n \n if valid:\n final_node = nearest\n break\n\n phi = self.get_steering_angle(nearest.pos, pick)\n pos = nearest.pos\n branch = [pos[:2]]\n \n for i in range(self.max_steps):\n pos = self.car.step(pos, phi)\n branch.append(pos[:2])\n \n # check safety of route-----------------------\n if phi == 0:\n safe = self.dubins.is_straight_route_safe(nearest.pos, pos)\n else:\n d, c, r = self.car.get_params(nearest.pos, phi)\n safe = self.dubins.is_turning_route_safe(nearest.pos, pos, d, c, r)\n # --------------------------------------------\n \n if not safe:\n continue\n \n new_node = Node(pos, phi, i+1)\n \n if new_node in nodes:\n continue\n \n new_node.branch = branch\n new_node.parent = nearest\n nodes.append(new_node)\n \n route = self.backtracking(final_node) + dubins_route\n path = self.car.get_path(self.car.start_pos, route)\n print('Total iteration:', count)\n \n return path, nodes", "def floyd_warshall_path(self, start, end, next_node): # pragma no cover\n if next_node[start][end] is None:\n return []\n path = [start]\n while start is not end:\n start = next_node[start][end]\n path.append(start)\n return path", "def __edgeRouter(self):\r\n def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n \"\"\" Gets the nearest arrow endpoint. Handles edge reversal \"\"\"\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy])) \r\n \r\n \r\n \r\n #todo: improve method for spline arrows + add comments + optimize?\r\n print '----------------Dummy Edge Routing-----------------'\r\n for dummyEdge in NodeWrapper.ID2LayerEdgeDict.keys():\r\n \r\n dummyList = NodeWrapper.ID2LayerEdgeDict[dummyEdge]\r\n dummyNode = dummyList[0]\r\n dummyChild = dummyNode.children.keys()[0]\r\n linkFlagList = dummyNode.children[dummyChild]\r\n \r\n # Real nodes at start/end of the edge\r\n edgeSourceNode = dummyNode.parents.keys()[0]\r\n edgeSourceNode = edgeSourceNode.getASGNode().graphObject_\r\n dummyNode = dummyList[-1]\r\n edgeTargetNode = dummyNode.children.keys()[0]\r\n #print 'Dummy edge number', dummyEdge,\r\n #print dummyList[0].parents.keys()[0].getName(), edgeTargetNode.getName()\r\n edgeTargetNode = edgeTargetNode.getASGNode().graphObject_\r\n nodeTuple = [edgeSourceNode, edgeTargetNode, None]\r\n \r\n # Some edges are internally reversed to break cycles, when drawing\r\n # this must be taken into account\r\n isReversedEdge = False\r\n edgesToRoute = []\r\n for linkNode, isReversed in linkFlagList:\r\n edgesToRoute.append(linkNode)\r\n if(isReversed):\r\n isReversedEdge = True\r\n \r\n # Get all the points the edge must pass through (sorted by layer order)\r\n dummyList.sort(lambda a, b: cmp(a.getLayer(), b.getLayer()))\r\n if(isReversedEdge):\r\n dummyList.reverse()\r\n sortedDummyRouteList = []\r\n for node in dummyList:\r\n sortedDummyRouteList += node.getEdgePosition()\r\n \r\n # Set the coordinates of the edge directly \r\n # This is complicated by the fact that AToM3 treats edges as two\r\n # segments that join poorly (for spline arrows)\r\n for edgeObject in edgesToRoute: \r\n dc = edgeObject.graphObject_.dc\r\n linkObj = edgeObject.graphObject_ \r\n tag = linkObj.tag\r\n \r\n if(isReversedEdge):\r\n inPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n else:\r\n inPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n \r\n #print 'Dummy route', sortedDummyRouteList\r\n numPoints = len(sortedDummyRouteList) / 2\r\n # Add 2 extra control points for odd case (to make splines nice)\r\n if(numPoints % 2 == 1):\r\n if(numPoints == 1):\r\n center = sortedDummyRouteList\r\n else:\r\n start = sortedDummyRouteList[:numPoints - 1]\r\n end = sortedDummyRouteList[numPoints + 1:]\r\n center = sortedDummyRouteList[numPoints - 1:numPoints + 1]\r\n \r\n if(not isReversedEdge):\r\n newMid1 = [center[0], center[1] - 20]\r\n newMid2 = [center[0], center[1] + 20]\r\n else:\r\n newMid2 = [center[0], center[1] - 20]\r\n newMid1 = [center[0], center[1] + 20]\r\n \r\n \r\n if(numPoints == 1):\r\n sortedDummyRouteList = newMid1 + center + newMid2 \r\n else:\r\n sortedDummyRouteList = start + newMid1 + center + newMid2 + end\r\n centerIndex = numPoints - 1 + 2\r\n \r\n # Add 1 extra control point for even case (to make splines nice)\r\n else:\r\n start = sortedDummyRouteList[:numPoints]\r\n end = sortedDummyRouteList[numPoints:]\r\n center = [start[-2] + (end[0] - start[-2]) / 2, \r\n start[-1] + (end[1] - start[-1]) / 2]\r\n sortedDummyRouteList = start + center + end \r\n centerIndex = numPoints\r\n \r\n # Now I know where the center is... so lets move the center object\r\n # Is the edge object a hyperlink?\r\n if(len(edgeObject.in_connections_ + edgeObject.out_connections_) > 2):\r\n fromObjs = []\r\n for semObj in edgeObject.in_connections_:\r\n fromObjs.append(semObj.graphObject_)\r\n toObjs = []\r\n for semObj in edgeObject.out_connections_:\r\n toObjs.append(semObj.graphObject_)\r\n optimizerHyperLink(dc, linkObj, fromObjs, toObjs, 0, 0, 0, center )\r\n continue\r\n \r\n else:\r\n linkObj.moveTo(* center)\r\n \r\n # Go through the 2 segments in the link\r\n nodeTuple[2] = edgeObject\r\n for connTuple in linkObj.connections:\r\n itemHandler = connTuple[0]\r\n direction = connTuple[1]\r\n \r\n if( direction ): \r\n inPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'start', isReversedEdge)\r\n\r\n segCoords = inPoint + sortedDummyRouteList[:centerIndex+2]\r\n else: \r\n outPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'end', isReversedEdge) \r\n segCoords = sortedDummyRouteList[centerIndex:] + outPoint\r\n segCoords = self.__reverseCoordList(segCoords)\r\n \r\n # Applies the changed coords to the canvas\r\n dc.coords( * [itemHandler] + segCoords ) \r\n \r\n # This may change the associated link drawings: \r\n # move them to the new point \r\n if( direction ):\r\n linkObj.updateDrawingsTo(inPoint[0], inPoint[1], itemHandler, \r\n segmentNumber=1)\r\n else:\r\n linkObj.updateDrawingsTo(outPoint[0], outPoint[1], itemHandler, \r\n segmentNumber=2)", "def path(self):\r\n node, p = self, []\r\n while node:\r\n p.append(node)\r\n node = node.parent\r\n yield from reversed(p)", "def _build_path(self):\n for point_3d in self.path_coordinates:\n self.connect_point_with_neighbors(point_3d)", "def findPathsToBase(A,bSize):\n M,N = A.shape\n pressedPaths = []\n\n #For every two nodes in the base find all paths between them\n for b1 in range(bSize):\n for b2 in range(bSize):\n #Remove all other base nodes from the graph so that\n #we only find paths that go through the specialization set\n if b1 == b2:\n #In this case we are looking for a cycle.\n mask = [b1]+list(range(bSize,N))\n newSize = len(mask) + 1\n reduA = np.zeros((newSize,newSize))\n #Because the networkx cycle finders don't do what we need\n #them to do, we create a new graph and find paths instead\n reduA[:-1,:-1] = A[mask,:][:,mask]\n #Remove ingoing edges from the base node and add to new node\n reduA[-1,:] = reduA[0,:]\n reduA[0,:] = np.zeros(newSize)\n G = nx.DiGraph(reduA.T)\n #Find paths from the base node to the new node\n #same as finding all the cycles\n paths = list(nx.all_simple_paths(G,0,newSize-1))\n\n else:\n mask = [b1,b2]+list(range(bSize,N))\n reduA = A[mask,:][:,mask]\n #Remove base node interactions\n reduA[:2,:2] = np.zeros((2,2))\n G = nx.DiGraph(reduA.T)\n paths = list(nx.all_simple_paths(G,0,1))\n\n #Process Paths so that they make sense when the rest of the base\n #set is added to the graph\n for p in paths:\n if p != []:\n if b1 == b2:\n p = np.array(p) + bSize-1\n else:\n p = np.array(p) + bSize-2\n p[[0,-1]] = [b1, b2]\n pressedPaths.append(p)\n\n return pressedPaths", "def dfs_paths(graph, start, goal, method='dfs'):\n \n # Define the search method\n stack_pop = -1\n if method == 'bfs':\n stack_pop = 0\n \n stack = [(start, [start])]\n while stack:\n (vertex, path) = stack.pop(stack_pop)\n neighbors = node_neighbors(graph, vertex)\n for next_node in set(neighbors) - set(path):\n if next_node == goal:\n yield path + [next_node]\n else:\n stack.append((next_node, path + [next_node]))", "def define_path(self, node): \n if node.childrens!=[]:\n for child in node.childrens:\n node_child = child['node']\n node_child.times_used+=1\n self.define_path(node_child)\n \n \n #take care of not used nodes, set their gradient to 0\n for node in self.input_node:\n if node.times_used==0:\n node.gradient=np.zeros((node.output_dim, self.output_node.output_dim))", "def walk(self, priv_path:list):\n # End conditions for recursive loop\n current_node = priv_path[-1]\n if current_node.location in self.destination and len(priv_path)>1:\n self.addItinerary(priv_path)\n self.n_routes+=1\n return\n if self.n_routes >= self.max_n_routes:\n return\n\n if len(priv_path)>1:\n # Get metadata of last edge type\n last_edge = self.EdgeType(priv_path[-2], priv_path[-1])\n else: # If it's start of itinerary, next edge would be travel edge\n # So, make last edge as stay\n last_edge = 'stay'\n if last_edge == 'stay': # next edge will be travel i.e., ship not None\n next_nodes = [node for node in self.G.neighbors(current_node) \n if self.G.edges[current_node, node]['ship'] is not None]\n else: # Next edge will be stay, i.e., ship = None\n next_nodes = [node for node in self.G.neighbors(current_node)\n if self.G.edges[current_node, node]['ship'] is None]\n \n for node in next_nodes:\n self.walk(priv_path+[node])", "def find_all_paths(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n paths = []\n for node in graph[start]:\n newpaths = find_all_paths(graph, node, end, path)\n paths += newpaths\n return paths", "def Find_Path(self):\n closed_nodes_map = [] # map of closed (tried-out) nodes\n open_nodes_map = [] # map of open (not-yet-tried) nodes\n dir_map = [] # map of directions\n row = [0] * self.n\n for i in range(self.m): # create 2d arrays\n closed_nodes_map.append(list(row))\n open_nodes_map.append(list(row))\n dir_map.append(list(row))\n \n pq = [[], []] # priority queues of open (not-yet-tried) nodes\n pqi = 0 # priority queue index\n # create the start node and push into list of open nodes\n n0 = node(self.xStart, self.yStart, 0.0, 0.0)\n n0.updatePriority(self.xFinish, self.yFinish)\n heappush(pq[pqi], n0)\n open_nodes_map[self.yStart][self.xStart] = n0.priority # mark it on the open nodes map\n \n # A* search\n while len(pq[pqi]) > 0:\n # get the current node w/ the highest priority\n # from the list of open nodes\n n1 = pq[pqi][0] # top node\n n0 = node(n1.xPos, n1.yPos, n1.distance, n1.priority)\n x = n0.xPos\n y = n0.yPos\n heappop(pq[pqi]) # remove the node from the open list\n open_nodes_map[y][x] = 0\n # mark it on the closed nodes map\n closed_nodes_map[y][x] = 1\n \n # quit searching when the goal state is reached\n if x == self.xFinish and y == self.yFinish:\n # Generate the path from finish to start by following the \n # directions.\n return self.Reconstruct_Path(dir_map)\n \n # generate moves (child nodes) in all possible directions\n for i in range(self.num_directions):\n new_x = x + self.dx[i]\n new_y = y + self.dy[i]\n Flag=True\n if not (new_x < 0 or new_x > self.n-1 or new_y < 0 or new_y > self.m - 1\n or self.MAP[new_y][new_x] == 1 or closed_nodes_map[new_y][new_x] == 1):\n # Check to see if the extended path runs through any obstacles\n if (abs(self.dx[i])>1 or abs(self.dy[i])>1):\n # Need to check that the path does not pass an object\n JumpCells=2*max(abs(self.dx[i]),abs(self.dy[i]))-1\n for K in range(1,JumpCells):\n YPOS=int(round(K*1.0*self.dy[i]/JumpCells))\n XPOS=int(round(K*1.0*self.dx[i]/JumpCells))\n if (self.MAP[y+YPOS][x+XPOS]==1):\n Flag=False\n if Flag: \n # generate a child node\n m0 = node(new_x, new_y, n0.distance, n0.priority)\n m0.calc_cost(self.dx[i], self.dy[i])\n m0.updatePriority(self.xFinish, self.yFinish)\n # if it is not in the open list then add into that\n if open_nodes_map[new_y][new_x] == 0:\n open_nodes_map[new_y][new_x] = m0.priority\n heappush(pq[pqi], m0)\n # mark its parent node direction\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n elif open_nodes_map[new_y][new_x] > m0.priority:\n # update the priority info\n open_nodes_map[new_y][new_x] = m0.priority\n # update the parent direction info\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n # replace the node\n # by emptying one pq to the other one\n # except the node to be replaced will be ignored\n # and the new node will be pushed in instead\n while not (pq[pqi][0].xPos == new_x and pq[pqi][0].yPos == new_y):\n heappush(pq[1 - pqi], pq[pqi][0])\n heappop(pq[pqi])\n heappop(pq[pqi]) # remove the wanted node\n # empty the larger size pq to the smaller one\n if len(pq[pqi]) > len(pq[1 - pqi]):\n pqi = 1 - pqi\n while len(pq[pqi]) > 0:\n heappush(pq[1-pqi], pq[pqi][0])\n heappop(pq[pqi]) \n pqi = 1 - pqi\n heappush(pq[pqi], m0) # add the better node instead\n return '','' # no route found", "def find_all_path(self, start, end, path=[]):\n path = path+[start]\n if start == end:\n return path\n paths = []\n for node in self.graph[start]:\n if node not in path:\n newpaths = self.find_path(node, end, path)\n paths.append(newpaths)\n return paths", "def _find_routes(self, start_node, previous_nodes=None):\n if previous_nodes is None:\n previous_nodes = []\n\n routes = []\n for con in self.connections:\n if start_node == con.end:\n con.flip()\n if start_node == con.start:\n # if the connection ends in a box output,\n # add the connection (as a route of length 1)\n if con.end.is_box_output():\n routes.append([con])\n elif con.end.is_box_input():\n raise Exception(\"Route in connections detected, \"\n \"that ends at an input.\")\n elif con.end.is_switch_output():\n # check if there is conflict with previous nodes\n if con.end.switch in previous_nodes:\n raise Exception(\"Loop detected in connections at\"\n f\"switch {con.end.switch}.\")\n # check orientation\n if con.end.switch.orientation == 1:\n raise Exception(\"Conflicting switch orientation \"\n f\"for switch {con.end.switch}\")\n # Set orientation of the switch\n con.end.switch.orientation = -1\n # Add the node to the previous nodes and call the method\n # for the next node\n if con.start.parent_type == 'switch':\n previous_nodes.append(con.start.switch)\n else:\n previous_nodes.append(con.start)\n next_step = self._find_routes(\n con.end.switch.input,\n previous_nodes=previous_nodes\n )\n # Merge the current connection with the resulting routes\n for route in next_step:\n routes.append([con] + route)\n # proceed the analogously for a switch input\n elif con.end.is_switch_input():\n if con.end.switch in previous_nodes:\n raise Exception(\"Loop detected in connections at\"\n f\"switch {con.end.switch}.\")\n if con.end.switch.orientation == -1:\n raise Exception(\"Conflicting switch orientation \"\n f\"for switch {con.end.switch}\")\n con.end.switch.orientation = 1\n if con.start.parent_type == 'switch':\n previous_nodes.append(con.start.switch)\n else:\n previous_nodes.append(con.start)\n\n # continue with both outputs\n next_step0 = self._find_routes(\n con.end.switch.output[0],\n previous_nodes=previous_nodes\n )\n\n next_step1 = self._find_routes(\n con.end.switch.output[1],\n previous_nodes=previous_nodes\n )\n\n for route in next_step0:\n routes.append([con] + route)\n for route in next_step1:\n routes.append([con] + route)\n\n else:\n raise TypeError(f\"Node {con.end} not recognised\")\n\n return routes", "def FindAllPaths(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n if start not in graph:\n return None\n paths = []\n for node in graph[start]:\n if node not in path:\n newpaths = find_all_paths(graph, node, end, path)\n for newpath in newpaths:\n paths.append(newpath)\n return paths", "def bfs_paths(self, start: str, goal: str) -> List[Path]:\n queue = [(start, [start])]\n while queue:\n (node, path) = queue.pop(0)\n if node not in self.graph:\n yield []\n for _next in set(self.graph[node]) - set(path):\n if _next == goal:\n yield path + [_next]\n elif _next in self.graph:\n queue.append((_next, path + [_next]))", "def build_path(start, end):\n a = hierarchy.index(start)\n b = hierarchy.index(end)\n if a == b:\n return []\n elif a < b:\n return hierarchy[a + 1 : b + 1]\n return list(reversed(hierarchy[b:a]))", "def plan_path(self, start_point, end_point, map_obj):\n # STUFF FOR TESTING \n if self.enable_vis:\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.type = marker.POINTS\n marker.action = marker.ADD\n \n marker.scale.x = 0.1\n marker.scale.y = 0.1\n self.vis_pub.publish(marker)\n \n exploration_bias = 1.0 - self.goal_bias\n final_node = None\n num_existing_path_points_added = 0\n \n self.rrt_star = RRTStar(Node(start_point))\n self.max_iterations = self.rrt_star.max_size\n while self.rrt_star.size <= self.max_iterations:\n p = np.random.uniform()\n if p < exploration_bias:\n \n x_rand = self.map.sample_free_space()\n else:\n if final_node is None:\n x_rand = end_point\n else:\n x_rand = self.branched_from_existing_path(\n final_node,\n depth_underestimate=num_existing_path_points_added\n )\n num_existing_path_points_added += 1\n\n x_nearest = self.rrt_star.nearest(x_rand) # Find the nearest node to x_rand\n\n path = self.map.generate_line_path(x_nearest.value, x_rand, eta=self.eta)\n if path is not None: # no obstacles between x_nearest and x_rand\n x_new = path[-1]\n X_nearby_connectable = self.find_nearby_connectable(x_nearest, x_new)\n\n cost_min, node_min = self.find_best_parent(X_nearby_connectable, x_new)\n\n X_nearby_connectable.remove(node_min) # Remove x_new's parent node from the list of nearby nodes so it is not considered for rewiring\n \n # Create the new node at x_new!\n node_new = self.rrt_star.add_config(node_min, x_new)\n \n if self.enable_vis:\n # FOR TESTING ONLY #\n # Code to publish marker for new node\n ###########################################################################################\n TEMP = Point()\n TEMP.x = x_new[0]\n TEMP.y = x_new[1]\n TEMP.z = .05\n marker.points.append(TEMP)\n \n TEMP = ColorRGBA()\n TEMP.r = 1\n TEMP.g = 0\n TEMP.b = 0\n TEMP.a = 1\n \n marker.colors.append(TEMP)\n \n self.vis_pub.publish(marker)\n ###########################################################################################\n\n self.rewire(cost_min, node_new, X_nearby_connectable)\n \n if np.allclose(node_new.value, end_point, .05, 0) and (final_node is None):#np.array_equal(node_new.value, end_point):\n final_node = node_new\n # reduce exploration bias so that we reinforce the existing path\n exploration_bias = .5\n if VERBOSE:\n print(\"Path found!!!!\")\n print(final_node.cost)\n if rospy.get_time() - self.start_time > self.time_thresh:\n if VERBOSE:\n print(self.rrt_star.size)\n break\n\n \n if final_node is not None:\n if self.enable_vis:\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.type = marker.POINTS\n marker.action = marker.ADD\n \n marker.scale.x = 0.1\n marker.scale.y = 0.1\n marker.points = []\n marker.colors = []\n def recur(node):\n if self.enable_vis:\n TEMP = Point()\n TEMP.x = node.value[0]\n TEMP.y = node.value[1]\n TEMP.z = .05\n marker.points.append(TEMP)\n \n TEMP = ColorRGBA()\n TEMP.r = 1\n TEMP.g = 0\n TEMP.b = 0\n TEMP.a = 1\n \n marker.colors.append(TEMP)\n \n \n self.trajectory.points.append([node.value[0], node.value[1]])\n parent = node.parent\n if parent is not None:\n recur(parent)\n recur(final_node)\n self.trajectory.points.reverse()\n if self.enable_vis:\n self.vis_pub.publish(marker)\n if VERBOSE:\n print (final_node.depth)\n else:\n if VERBOSE:\n print(\"No path found! Please try again.\")\n \n \n \n # publish trajectory\n self.traj_pub.publish(self.trajectory.toPoseArray())\n\n # visualize trajectory Markers\n self.trajectory.publish_viz()", "def _build_path(self):\r\n\r\n path = []\r\n \r\n for i in range(len(self.path) - 1):\r\n current_node = self.path[i]\r\n next_node = self.path[i + 1]\r\n \r\n key_list = [i for i in range(len(current_node.leaving_roads)) if current_node.leaving_roads[i].end == next_node]\r\n \r\n if len(key_list) == 0:\r\n raise Exception('ERROR (in gps._build_path()) : there is no route.')\r\n \r\n path.append(key_list[0])\r\n \r\n return path", "def build_node_chains(self):\n\n self.node_chain_lookup = -np.ones(self.tri.npoints, dtype=np.int)\n self.node_chain_list = []\n\n node_chain_idx = 1\n\n self.node_chain_list.append([]) # placeholder for any isolated base-level nodes\n\n for node1 in self.node_high_to_low: \n if (self.node_chain_lookup[node1] != -1): \n continue\n\n junction, this_chain = self._node_walk_downhill(node1)\n\n if len(this_chain) > 1:\n self.node_chain_list.append(this_chain)\n \n self.node_chain_lookup[this_chain[0:-1]] = node_chain_idx \n if self.node_chain_lookup[this_chain[-1]] == -1:\n self.node_chain_lookup[this_chain[-1]] = node_chain_idx\n\n node_chain_idx += 1\n\n else: \n self.node_chain_list[0].append(this_chain[0])\n self.node_chain_lookup[this_chain[0]] = 0\n\n return", "def dfs_paths_dict_recur(\n graph: Mapping[Node, set[Node]],\n start: Node,\n goal: Node,\n path: Optional[list[Node]] = None\n) -> Iterable[list[Node]]:\n if path is None:\n path = [start]\n if start == goal:\n yield path\n else:\n for next_node in graph[start].difference(path):\n next_path = path + [next_node]\n yield from dfs_paths_dict_recur(graph, next_node, goal, next_path)", "def findPath(g, start, end, path=[]):\n path = path + [start]\n if start == end:\n return path\n if not start in g:\n return None\n for node in g[start]:\n if node not in path:\n newpath = findPath(g, node, end, path)\n if newpath: return newpath\n return None", "def create_path(self):\n\n partials = []\n partials.append({})\n #print self.trip_id\n\n #this variable is true if we have not yet recorded the first edge of a path\n first_edge = True\n #this variable is false until we hit the midpoint\n hit_midpoint = False\n\n first_lasts = []\n first_lasts.append([0,0])\n matrices = []\n matrices.append([np.zeros((self.graph.rows,self.graph.cols)),0])\n edge_sets = []\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n cur_line = self.line_num\n good_graphs = []\n good_graphs.append(True)\n nodes_visited = []\n nodes_visited.append([])\n #normalized = dg.normalize(self.graph.lines[cur_line])\n normalized = normalize_simple(self.graph.lines[cur_line])\n matrices_index = 0\n prev_coords = (-1,-1)\n while normalized[0] == self.trip_id:\n lat = normalized[1]\n lon = normalized[2]\n coords = self.graph.gps_to_coords(lat,lon)\n node = self.graph.coords_to_node(coords[0],coords[1])\n\n if prev_coords == (-1,-1) and coords[0] != -1:\n first_lasts[matrices_index][0] = node\n\n if coords[0] == -1 and prev_coords[0] != -1:\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n\n if prev_coords != (-1,-1) and coords[0] != -1 and coords != prev_coords:\n edge_num = self.graph.edge_num(prev_coords[0],prev_coords[1],coords[0],coords[1])\n if edge_num == -1:\n good_graphs[matrices_index] = False\n else:\n edge_sets[matrices_index][edge_num] = 1\n if edge_num in partials[matrices_index] and partials[matrices_index][edge_num] == 0:\n del partials[matrices_index][edge_num]\n if not hit_midpoint:\n if first_edge:\n above = (prev_coords[0]-1,prev_coords[1])\n below = (prev_coords[0]+1,prev_coords[1])\n left = (prev_coords[0],prev_coords[1]-1)\n right = (prev_coords[0],prev_coords[1]+1)\n for next_coords in (above,below,left,right):\n other_edge = self.graph.edge_num(prev_coords[0],prev_coords[1],next_coords[0],next_coords[1])\n if other_edge != -1:\n partials[matrices_index][other_edge] = 0\n first_edge = False\n if self.graph.coords_to_node(prev_coords[0],prev_coords[1]) == self.midpoint:\n hit_midpoint = True\n partials[matrices_index][edge_num] = 1\n if self.graph.coords_to_node(coords[0],coords[1]) == self.midpoint:\n hit_midpoint = True\n\n\n\n if coords[0] == -1:\n matrices.append([np.zeros((self.graph.rows,self.graph.cols)),0])\n first_lasts.append([0,0])\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n good_graphs.append(True)\n nodes_visited.append([])\n matrices_index += 1\n partials.append({})\n hit_midpoint = False\n first_edge = True\n \n elif coords[0] < self.graph.rows and coords[1] < self.graph.cols and not matrices[matrices_index][0][coords[0]][coords[1]]:\n matrices[matrices_index][1] += 1\n matrices[matrices_index][0][coords[0]][coords[1]] = 1\n nodes_visited[matrices_index].append(coords)\n\n prev_coords = coords\n\n cur_line += 1\n if cur_line == len(self.graph.lines):\n break\n #normalized = dg.normalize(self.graph.lines[cur_line])\n normalized = normalize_simple(self.graph.lines[cur_line])\n\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n self.next_line = cur_line\n best_index = 0\n best_score = 0\n for matrix_index in range(len(matrices)):\n if matrices[matrix_index][1] > best_score:\n best_score = matrices[matrix_index][1]\n best_index = matrix_index\n\n for coords in nodes_visited[best_index]:\n self.graph.node_visit(self.trip_id,coords)\n \n\n if self.trip_id not in self.graph.trip_id2line_num:\n #if first_lasts[best_index] == [28,5]:\n # print \"a to b: %d\" % self.trip_id\n self.graph.first_last2trip_ids[tuple(first_lasts[best_index])].append(self.trip_id)\n\n return matrices[best_index][0],edge_sets[best_index],good_graphs[best_index],partials[best_index]", "def generate_path(goal_node, visited):\n goal_state = goal_node['state']\n path = [goal_state]\n while goal_node['parent']:\n path.append(goal_node['state'])\n goal_node = visited[goal_node['parent']]\n return path", "def build_path(start, hi, lo, bgArray):\n dead_ends = 0\n offsets = [\n (0, 1),\n (1, 1),\n (1, 0),\n (1, -1),\n (0, -1),\n (-1, -1),\n (-1, 0),\n (-1, 1)\n ]\n visited = [start,]\n path = [start,]\n location = start\n while path != []:\n found = False\n for offset in offsets:\n neighbor = (location[0] + offset[0], location[1] + offset[1])\n if len(visited) > 1 and neighbor == start:\n # lArray[neighbor] = label\n # print(\"Dead ends: \", dead_ends)\n return (path, visited, dead_ends)\n if is_edge(neighbor, hi, lo, bgArray) and neighbor not in visited:\n # lArray[neighbor] = label\n visited.append(neighbor)\n path.append(neighbor)\n location = neighbor\n found = True\n break\n if not found:\n # Dead end found, re-trace steps\n # print(\"@@@DEAD END!\")\n dead_ends += 1\n path.pop()\n if len(path) > 0:\n location = path[len(path)-1]\n print(\"@@@Edge is not part of the path? What the?\")\n return ([],[], -1)", "def construct_path(node):\n path = []\n current = node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path", "def find_path_recursion(cls, start_point: Coordination, current_point: Coordination,\n path_list: list[Coordination] = None):\n if path_list is None:\n path_list = [current_point]\n\n # the robot will keep going parallel to x-axis until it hits the x point of the start point\n if current_point.x != start_point.x:\n next_y = current_point.y\n\n if current_point.x > start_point.x:\n next_x = current_point.x - 1\n else:\n next_x = current_point.x + 1\n\n next_coordination = Coordination(next_x, next_y)\n path_list.append(next_coordination)\n cls.find_path_recursion(start_point=start_point, current_point=next_coordination, path_list=path_list)\n\n # the robot will keep going parallel to y-axis until it hits the y point of the start point\n elif current_point.y != start_point.y:\n next_x = current_point.x\n\n if current_point.y > start_point.y:\n next_y = current_point.y - 1\n else:\n next_y = current_point.y + 1\n\n next_coordination = Coordination(next_x, next_y)\n path_list.append(next_coordination)\n cls.find_path_recursion(start_point=start_point, current_point=next_coordination, path_list=path_list)\n\n return path_list", "def path(self):\n node, return_path = self, []\n while node:\n # Add the nodes in reverse order to a list until you reach the\n # root parent node which will terminate the loop\n return_path.append(node)\n node = node.parent\n # Reverse the list to get the proper path back\n return list(reversed(return_path))", "def create_path(network, user_A, user_B, path=[]):\n path = path + [user_A] # all paths include starting node\n if user_A == user_B: # id the last node is user_B a valid path exists\n return path # base case\n for node in network[user_A][0]:\n if node not in path: # otherwise path is an infinite loop\n path = create_path(network, node, user_B, path)\n if path: # after the recursion hits the base case\n return path\n return None", "def create_path_new(self):\n\n \n first_lasts = []\n first_lasts.append([0,0])\n matrices = []\n matrices.append([[[0 for i in range(self.graph.cols)] for i in range(self.graph.rows)],0])\n edge_sets = []\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n cur_line = self.line_num\n nodes_visited = []\n nodes_visited.append([])\n normalized = normalize_simple(self.graph.lines[cur_line])\n matrices_index = 0\n prev_coords = (-1,-1)\n prev_gps = (-1.0,-1.0)\n while normalized[0] == self.trip_id:\n lat = normalized[1]\n lon = normalized[2]\n coords = self.graph.gps_to_coords(lat,lon)\n node = self.graph.coords_to_node(coords[0],coords[1])\n\n if prev_coords == (-1,-1) and coords[0] != -1:\n first_lasts[matrices_index][0] = node\n\n if coords[0] == -1 and prev_coords[0] != -1:\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n\n if prev_coords != (-1,-1) and coords[0] != -1 and coords != prev_coords:\n edge_num = self.graph.edge_num(prev_coords[0],prev_coords[1],coords[0],coords[1])\n if edge_num == -1:\n new_edges = self.find_edges((lat,lon),prev_gps)\n for add_edge in new_edges:\n edge_sets[matrices_index][add_edge] = 1\n else:\n edge_sets[matrices_index][edge_num] = 1\n\n if coords[0] == -1:\n matrices.append([[[0 for i in range(self.graph.cols)] for i in range(self.graph.rows)],0])\n first_lasts.append([0,0])\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n nodes_visited.append([])\n matrices_index += 1\n \n elif coords[0] < self.graph.rows and coords[1] < self.graph.cols and not matrices[matrices_index][0][coords[0]][coords[1]]:\n matrices[matrices_index][1] += 1\n matrices[matrices_index][0][coords[0]][coords[1]] = 1\n nodes_visited[matrices_index].append(coords)\n\n prev_coords = coords\n\n cur_line += 1\n if cur_line == len(self.graph.lines):\n break\n normalized = normalize_simple(self.graph.lines[cur_line])\n prev_gps = (lat,lon)\n\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n self.next_line = cur_line\n best_index = 0\n best_score = 0\n for matrix_index in range(len(matrices)):\n if matrices[matrix_index][1] > best_score:\n best_score = matrices[matrix_index][1]\n best_index = matrix_index\n\n #for coords in nodes_visited[best_index]:\n # self.graph.node_visit(self.trip_id,coords)\n\n #if self.trip_id not in self.graph.trip_id2line_num:\n # self.graph.first_last2trip_ids[tuple(first_lasts[best_index])].append(self.trip_id)\n\n return matrices[best_index][0],edge_sets[best_index],first_lasts[best_index]", "def backtrack(start, end, prev):\n backtracked = False\n curr_node = end\n # instantiate path as list with destination only\n path = [curr_node]\n while not backtracked:\n # retrieve previous node\n prev_node = prev[curr_node]\n # insert it at beginning of path\n path.insert(0, prev_node)\n # move onto previous node as current node for next iteration\n curr_node = prev_node\n # break loop if we reached start\n if curr_node == start:\n backtracked = True\n return path", "def retrace_path(current_node, start_node, chip):\n path = []\n path_cost = 0\n collisions = 0\n\n while current_node != start_node:\n # Add the cost for the wires\n path_cost += 1\n x = current_node.position[0]\n y = current_node.position[1]\n z = current_node.position[2]\n\n # Keep track of collisions\n if chip.coordinates[z][y][x].used:\n collisions =+ 1\n\n path.append(current_node)\n current_node = current_node.parent\n\n # Add the costs for the collisions\n path_cost += collisions * 300\n\n # Add the start node\n path.append(current_node)\n\n # Reverse the order of the path\n return path[::-1], path_cost, True", "def path(visited,node):\n solution_path = [node]\n while solution_path[-1][\"parent\"]:\n solution_path.append(visited[tuple(solution_path[-1][\"parent\"])])\n return solution_path", "def build_path(cask_node, nodes):\n if cask_node.parent.name != 'ABC':\n nodes.insert(0, cask_node.parent.name)\n build_path(cask_node.parent, nodes)\n return nodes", "def build_reachgraph_from_path( path_delta_list, bottom_func_delta ):\r\n\tlastidx = len( path_delta_list ) - 1\r\n\twhile lastidx != 0:\r\n\t\t# create the first delta graph\r\n\t\tdeltagraph = create_stack_delta_graph_from_function( path_delta_list[ lastidx ][0], 25 )\r\n\t\tsicken = deltagraph.make_GML_output()\r\n\t\tf = file(\"c:\\\\deltagraph_%lx.gml\" % path_delta_list[ lastidx ][0], \"wt\")\r\n\t\tf.write(sicken)\r\n\t\tf.close()\r\n\t\t#reachgraph = create_reachgraph_from_delta_graph( deltagraph, -200 )\r\n\t\t# calculate total delta of the chain\r\n\t\tdelta = 0\r\n\t\tcount = lastidx\r\n\t\twhile count != 0:\r\n\t\t\tdelta = delta - path_delta_list[ count ][1]\r\n\t\t\tdelta = delta - 4\r\n\t\t\tcount = count - 1\r\n\t\tdelta = delta - path_delta_list [ 0 ][1]\r\n\t\tprint \"Calling create_reachgraph from %lx with delta %d\" % (path_delta_list[ lastidx ][0], delta )\r\n\t\treachgraph = create_reachgraph_from_delta_graph( deltagraph, delta )\t\r\n\t\tsicken = reachgraph.make_GML_output()\r\n\t\tf = file(\"c:\\\\reach_%lx_%d.gml\" % (path_delta_list[lastidx][0], delta), \"wt\")\r\n\t\tf.write(sicken)\r\n\t\tf.close()\r\n\t\tlastidx = lastidx - 1", "def _trace_path_from_start(self, node=None):\n if node==None: node=self.goal\n current_node=node\n path_list=np.array([])\n while(current_node!=None):\n path_list=np.append(path_list,current_node.state)\n path_list=path_list.reshape(-1,self.goal.state.size)\n current_node=current_node.parent\n\n path_list=path_list[::-1]\n print (path_list)\n return path_list", "def find_path(self, start, end, path=[]):\n path = path+[start]\n if start == end:\n return path\n for node in self.graph[start]:\n if node not in path:\n newpath = self.find_path(node, end, path)\n if newpath:\n return newpath\n return None", "def is_endpoint(G: nx.Graph, node: int, strict=True):\n\n neighbors = set(list(G.predecessors(node)) + list(G.successors(node)))\n n = len(neighbors)\n d = G.degree(node)\n\n if node in neighbors:\n # If the node appears in its list of neighbors, it self-loops. this is\n # always an endpoint.\n return True\n\n # If node has no incoming edges or no outgoing edges, it must be an\n # endpoint\n elif G.out_degree(node) == 0 or G.in_degree(node) == 0:\n return True\n\n elif not (n == 2 and (d == 2 or d == 4)):\n # Else, if it does NOT have 2 neighbors AND either 2 or 4 directed\n # edges, it is an endpoint. either it has 1 or 3+ neighbors, in which\n # case it is a dead-end or an intersection of multiple streets or has\n # 2 neighbors but 3 degree (indicating a change from oneway to twoway)\n # or more than 4 degree (indicating a parallel edge) and thus is an\n # endpoint\n return True\n\n elif not strict:\n # Non-strict mode\n osmids = []\n\n # Add all the edge OSM IDs for incoming edges\n for u in G.predecessors(node):\n for key in G[u][node]:\n osmids.append(G.edges[u, node, key]['osmid'])\n\n # Add all the edge OSM IDs for outgoing edges\n for v in G.successors(node):\n for key in G[node][v]:\n osmids.append(G.edges[node, v, key]['osmid'])\n\n # If there is more than 1 OSM ID in the list of edge OSM IDs then it is\n # an endpoint, if not, it isn't\n return len(set(osmids)) > 1\n\n else:\n # If none of the preceding rules returned true, then it is not an\n # endpoint\n return False", "def get_paths_to_simplify(G: nx.Graph, strict: bool=True) -> List[List[int]]:\n\n # First identify all the nodes that are endpoints\n endpoints = set([node for node in G.nodes()\n if is_endpoint(G, node, strict=strict)])\n\n # Initialize the list to be returned; an empty list\n paths_to_simplify = []\n\n # For each endpoint node, look at each of its successor nodes\n for node in endpoints:\n for successor in G.successors(node):\n if successor not in endpoints:\n # if the successor is not an endpoint, build a path from the\n # endpoint node to the next endpoint node\n try:\n paths_to_simplify.append(\n build_path(G,\n successor,\n endpoints,\n path=[node, successor]))\n except RuntimeError:\n # Note: Recursion errors occur if some connected component\n # is a self-contained ring in which all nodes are not\n # end points handle it by just ignoring that\n # component and letting its topology remain intact\n # (this should be a rare occurrence).\n log(('Recursion error: exceeded max depth, moving on to '\n 'next endpoint successor'), level=lg.WARNING)\n\n return paths_to_simplify", "def construct_path(edge_dict, node_list):\n previous = None\n for item in node_list:\n if previous is None:\n previous = Path(item, 0)\n previous_item = item\n else:\n actions = edge_dict[previous_item]\n # assume that the actions are uniquely represented\n action_dict = {action.end: action for action in actions}\n correct_action = action_dict[item]\n previous_item = item\n previous = previous.extend_path(\n correct_action.end, correct_action.cost)\n return previous", "def path(self): # Path taken to reach Goal\n node, path_back = self, []\n while node:\n path_back.append(node)\n node = node.parent\n return list(reversed(path_back))", "def get_paths(self, target, use_edges=False, downwards=None):\n raise NotImplementedError()", "def dfs(self):\n def add_to_stack(stack, done, src, path):\n for dest in self.edges[src]:\n if dest not in done:\n for step_path in self.edges[src][dest]:\n stack.append((dest, step_path, path))\n done.add(src)\n stack = [] # Stack of steps to take\n done = set() # Nodes we've visited\n # Seed the stack with all edges from the start cell.\n add_to_stack(stack, done, self.start_cell, '')\n while stack:\n (src, step_path, path) = stack.pop()\n path = path + step_path\n if src == self.exit_cell:\n return path\n add_to_stack(stack, done, src, path)\n return '' # No path found.", "def paths(self, start):\n # This is probably a little slow\n tupadd = lambda p, v: (p[0] + v[0], p[1] + v[1])\n # First, we'll check adjacency moves.\n adj = [tupadd(start, v) for v in DIRECTIONS]\n yield from (p for p in adj if self.board(p) == 0)\n # Now we check repeated hops.\n # We do this by a breadth first search.\n\n #TODO: Consensus on legality of hopping back to start and \"skipping\"\n visited = set(adj)\n to_visit = [start]\n while len(to_visit):\n pt = to_visit.pop(0)\n if pt in visited:\n continue\n\n # We have to actually move a piece\n # But this stops us from considering \"start\" even if we can\n # make some hops and get back to start\n if pt is not start:\n yield pt\n \n visited.add(pt)\n # Compute the hop directions\n dirs = ((tupadd(pt, v), tupadd(pt, tupadd(v, v))) for v in DIRECTIONS)\n to_visit.extend(\n dest for over, dest in dirs\n if self.board(over) > 0\n and self.board(dest) == 0\n and dest not in visited\n and over != start\n )", "def __generate_all_shortest_paths(self,cutoff = 10):\n if cutoff < 1:\n cutoff = 10\n self.__logger.info(\"cutoff value must be a positive integer. Set back to default value: 10\")\n\n all_pair_shortest_paths = nx.all_pairs_shortest_path(self.G, cutoff=cutoff)\n for item in all_pair_shortest_paths:\n from_node = item[0]\n paths = item[1]\n for destination,path in paths.items():\n yield (len(path),path)", "def closed_paths(entities, vertices):\n # get a networkx graph of entities\n graph, closed = vertex_graph(entities)\n # add entities that are closed as single- entity paths\n entity_paths = np.reshape(closed, (-1, 1)).tolist()\n # look for cycles in the graph, or closed loops\n vertex_paths = nx.cycles.cycle_basis(graph)\n\n # loop through every vertex cycle\n for vertex_path in vertex_paths:\n # a path has no length if it has fewer than 2 vertices\n if len(vertex_path) < 2:\n continue\n # convert vertex indices to entity indices\n entity_paths.append(\n vertex_to_entity_path(vertex_path,\n graph,\n entities,\n vertices))\n\n return entity_paths", "def build_links(self):\n xygrid = self.xymap.xygrid\n\n # we must use the xygrid coordinates\n x, y = self.x, self.y\n\n # scan in all directions for links\n for direction, (dx, dy) in MAPSCAN.items():\n\n lx, ly = x + dx, y + dy\n\n if lx in xygrid and ly in xygrid[lx]:\n link = xygrid[lx][ly]\n\n # just because there is a link here, doesn't mean it has a\n # connection in this direction. If so, the `end_node` will be None.\n end_node, weight, steps = link.traverse(REVERSE_DIRECTIONS[direction])\n\n if end_node:\n # the link could be followed to an end node!\n\n self.first_links[direction] = link\n\n # check the actual direction-alias to use, since this may be\n # different than the xygrid cardinal directions. There must be\n # no duplicates out of this node or there will be a\n # multi-match error later!\n first_step_name = steps[0].direction_aliases.get(direction, direction)\n if first_step_name in self.closest_neighbor_names:\n raise MapParserError(\n f\"has more than one outgoing direction '{first_step_name}'. \"\n \"All directions out of a node must be unique.\",\n self,\n )\n self.closest_neighbor_names[first_step_name] = direction\n\n node_index = end_node.node_index\n self.weights[node_index] = weight\n self.links[direction] = end_node\n # this is useful for map building later - there could be multiple\n # links tied together until getting to the node\n self.xy_steps_to_node[direction] = steps\n\n # used for building the shortest path. Note that we store the\n # aliased link directions here, for quick display by the\n # shortest-route solver\n shortest_route = self.shortest_route_to_node.get(node_index, (\"\", [], BIGVAL))[\n 2\n ]\n if weight < shortest_route:\n self.shortest_route_to_node[node_index] = (first_step_name, steps, weight)", "def _iterate_protocol_recursive(self, this_node, path):\n # step through every edge from the current node.\n for edge in self.edges_from(this_node.id):\n # keep track of the path as we fuzz through it, don't count the root node.\n # we keep track of edges as opposed to nodes because if there is more then one path through a set of\n # given nodes we don't want any ambiguity.\n path.append(edge)\n\n message_path = \"->\".join([self.nodes[e.dst].name for e in path])\n logging.debug('fuzzing: {0}'.format(message_path))\n\n for x in self._iterate_single_node(path):\n yield x\n\n # recursively fuzz the remainder of the nodes in the session graph.\n for x in self._iterate_protocol_recursive(self.fuzz_node, path):\n yield x\n\n # finished with the last node on the path, pop it off the path stack.\n if path:\n path.pop()", "def pathDAG(graph, value, path, onePath):\n for node in graph:\n if node.value == value:\n for vertex in node.arrow:\n if vertex == None:\n path.append(onePath)\n break\n \n else:\n onePath.append(vertex.value)\n pathDAG(graph, vertex.value, path, onePath)\n onePath = [onePath[0]]\n \n return path", "def get_connected_nodes(node, current_path_len) :\r\n\r\n connected_nodes = [] #A list of the connected nodes\r\n closed_list_coords = get_path_coordinates(closed_list)\r\n\r\n #Checking if the node belongs to the 1st row\r\n if(node.coords[0] != 0) :\r\n connected_node = Node((node.coords[0] - 1, node.coords[1]), goal_pos, current_path_len)\r\n #Checking if the node has already been traversed or is it is an obstacle\r\n if(not connected_node.coords in closed_list_coords and not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the last row\r\n if(node.coords[0] != grid_dims[0] - 1) :\r\n connected_node = Node((node.coords[0] + 1, node.coords[1]), goal_pos, current_path_len)\r\n #Checking if the node has already been traversed or is it is an obstacle\r\n if(not connected_node.coords in closed_list_coords and not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the 1st column\r\n if(node.coords[1] != 0) :\r\n connected_node = Node((node.coords[0], node.coords[1] - 1), goal_pos, current_path_len)\r\n #Checking if the node has already been traversed or is it is an obstacle\r\n if(not connected_node.coords in closed_list_coords and not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n #Checking if the node belongs to the 1st column\r\n if(node.coords[1] != grid_dims[1] - 1) :\r\n connected_node = Node((node.coords[0], node.coords[1] + 1), goal_pos, current_path_len)\r\n #Checking if the node has already been traversed or is it is an obstacle\r\n if(not connected_node.coords in closed_list_coords and not connected_node.coords in obstacle_coords) :\r\n connected_nodes.append(connected_node)\r\n\r\n return connected_nodes", "def manage_paths(node, paths) :\r\n\r\n #Getting the nodes neighbouring the given node\r\n neighbours = get_neighbouring_nodes(node) \r\n\r\n #Creating a new path branch\r\n new_path = [] #The new path\r\n path_found = False #Indicates whether the path to which the node belongs has been found\r\n\r\n #Looping through the neighbours\r\n for neighbour in neighbours :\r\n for path in paths :\r\n #Checking whether the path contains the neighbour\r\n if(neighbour in path) :\r\n index = path.index(neighbour)\r\n #Checking if the branch belongs to the current path\r\n if(path[index].gn_value == neighbour.gn_value) :\r\n new_path = path[:index + 1] + [node] #Creating a new path branch\r\n new_path[-1].gn_value = new_path.__len__() - 1 #Updating the node's g(n) value\r\n path_found = True\r\n break\r\n if(path_found) :\r\n break\r\n \r\n if(not path_found) :\r\n raise Exception(\"No branch junction found\")\r\n\r\n #Setting the new path as the current path\r\n return new_path", "def get_path_endpoints(self):\n endpoints = []\n\n # Get the far end of the last path segment\n path, split_ends, position_stack = self.trace()\n endpoint = path[-1][2]\n if split_ends is not None:\n for termination in split_ends:\n endpoints.extend(termination.get_path_endpoints())\n elif endpoint is not None:\n endpoints.append(endpoint)\n\n return endpoints", "def dfs(starting_vertex):\n s = Stack()\n\n s.push([starting_vertex])\n\n while s.size() > 0:\n p = s.pop()\n l = p[-1]\n\n if l not in new_visited_rooms:\n return p\n neighbors = set(get_neighbors(l))\n \n for n in neighbors:\n new_path = p.copy()\n new_path.append(n)\n s.push(new_path)", "def reverse_path_iterator(node):\n while node:\n yield node\n node = node.parent", "def _find_all_paths(self, start_vertex: str, end_vertex: str, path=[]):\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n paths = []\n for vertex in self.graph[start_vertex]:\n if vertex not in path:\n extended_paths = self._find_all_paths(vertex,\n end_vertex,\n path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def test_find_all_paths():\n g = Graph()\n node_1 = Node({'A':['B','C']})\n g.add(node_1)\n node_2 = Node({'B':['C','D']})\n g.add(node_2)\n node_3 = Node({'C':['D']})\n g.add(node_3)\n node_4 = Node({'D':['C']})\n g.add(node_4)\n node_5 = Node({'E':['C']})\n g.add(node_5)\n\n # zero path between node_1 and node_5\n paths_0 = g.find_all_paths(node_1, node_5)\n assert len(paths_0) == 0\n # only one path between node_5 and node_4\n paths_1 = g.find_all_paths(node_5, node_4)\n assert len(paths_1) == 1\n assert [ node.name for node in paths_1[0] ] == [ node_5.name, node_3.name, node_4.name ]\n # three paths between node_1 and node_3, verify all the three paths are returned\n paths_3 = g.find_all_paths(node_1, node_3)\n assert len(paths_3) == 3\n for path in paths_3:\n assert [ node.name for node in path ] == [ node_1.name, node_2.name, node_3.name ] or \\\n [ node.name for node in path ] == [ node_1.name, node_2.name, node_4.name, node_3.name ] or \\\n [ node.name for node in path ] == [ node_1.name, node_3.name ]", "def find_next(to_nodes, from_node):\n for to_node in to_nodes:\n interaction = (from_node, to_node)\n if interaction not in used_edges:\n return interaction\n return None", "def reconstruct_path(goal: Vector2D, prev_node: dict) -> list:\n path = []\n prev = prev_node[goal] # remove 'goal' from path\n \n while prev != None:\n path.append(prev)\n prev = prev_node[prev]\n \n path = path[:-1] # remove 'start' from path\n path.reverse()\n return path", "def derive_path(self, path):\n next_node = self\n for identifier in path:\n next_node = next_node.derive_one(identifier)\n\n return next_node", "def extract_paths(self, start_node, end_node, metapaths=None, mats=None, n_jobs=1):\n\n # Validate the given nodes and get information on nodes needed for processing arguments.\n # IDs could potentially be strings or integers..?\n assert type(start_node) == str or not isinstance(start_node, collections.Iterable)\n assert type(end_node) == str or not isinstance(end_node, collections.Iterable)\n assert self.id_to_metanode[start_node] == self.start_kind\n assert self.id_to_metanode[end_node] == self.end_kind\n start_idx = self.nid_to_index[start_node]\n end_idx = self.nid_to_index[end_node]\n\n # Get all metapaths if none passed\n if metapaths is None:\n metapaths = sorted(list(self.metapaths.keys()))\n # If single metapath passed, make it a list\n if type(metapaths) == str:\n metapaths = [metapaths]\n\n if mats is None:\n mats = self.degree_weighted_matrices\n\n arguments = []\n path_nodes = {}\n for mp in metapaths:\n to_multiply = mt.get_matrices_to_multiply(mp, self.metapaths, mats)\n to_multiply[0] = to_multiply[0][start_idx, :]\n to_multiply[len(to_multiply)-1] = to_multiply[-1][:, end_idx]\n arguments.append({'to_multiply': to_multiply, 'start_idx': start_idx,\n 'end_idx': end_idx, 'metapath': mp})\n edges = mt.get_edge_names(mp, self.metapaths)\n path_nodes[mp] = [b.replace('<', '-').replace('>', '-').split(' - ')[0] for b in edges] + \\\n [edges[-1].replace('<', '-').replace('>', '-').split(' - ')[-1]]\n\n result = parallel_process(array=arguments, function=mt.get_individual_paths,\n use_kwargs=True, n_jobs=n_jobs, front_num=0)\n\n out = []\n\n for r in result:\n for res in r:\n node_ids = []\n nodes = []\n for idx, node_type in zip(res['node_idxs'], path_nodes[res['metapath']]):\n node_id = self.index_to_nid[node_type][idx]\n node = self.nid_to_name[node_id]\n\n node_ids.append(node_id)\n nodes.append(node)\n\n if len(node_ids) == len(set(node_ids)):\n out.append({'node_ids': node_ids, 'nodes': nodes, 'metapath': res['metapath'],\n 'metric': res['metric']})\n\n return out", "def get_path(start, child_father_dict):\n\n def _dfs(node, path, res):\n path.append(node)\n if node not in child_father_dict:\n res.append(path.copy())\n path.pop()\n return\n for ni in child_father_dict[node]:\n _dfs(ni, path, res)\n path.pop()\n\n all_path = []\n if start in child_father_dict:\n _dfs(start, [], all_path)\n return all_path", "def findRoute(self, x1, y1, x2, y2):\r\n\r\n\t\t# Check to see if the start and end node are the same\r\n\t\tif x1 == x2 and y1 == y2:\r\n\t\t\treturn [(x1, y1)]\r\n\r\n\t\troot_node = DijkstraNode(x1, y1, None, 0)\r\n\t\troot_node.neighbours = self.getNeighbours(x1, y1)\r\n\r\n\t\t# Create a dictionary to store all of the nodes\r\n\t\tall_nodes = {(x1, y1): root_node}\r\n\t\t# If no starting place is found return nothing\r\n\t\tif len(root_node.neighbours) == 0:\r\n\t\t\treturn []\r\n\t\tcurrent_node = root_node\r\n\t\twhile (x2, y2) not in all_nodes:\r\n\r\n\t\t\t# If the algorithm hasn't found the target node and cannot explore further then return empty path\r\n\t\t\tif current_node is None:\r\n\t\t\t\treturn []\r\n\r\n\t\t\tcurrent_node.neighbours = self.getNeighbours(current_node.x, current_node.y)\r\n\r\n\t\t\t# The distance from the root node through the current node to the neighbour\r\n\t\t\tcurrent_neighbour_dist = current_node.dist + 1\r\n\r\n\t\t\tfor neighbour in current_node.neighbours:\r\n\t\t\t\tif neighbour in all_nodes:\r\n\t\t\t\t\tneighbour_node = all_nodes[neighbour]\r\n\t\t\t\t\tif current_neighbour_dist < neighbour_node.dist:\r\n\t\t\t\t\t\t# The new best path is through the current node\r\n\t\t\t\t\t\tneighbour_node.parent = current_node\r\n\t\t\t\t\t\tneighbour_node.dist = current_neighbour_dist\r\n\t\t\t\telse:\r\n\t\t\t\t\t# Add a new node if it doesn't exist within the currently explored nodes\r\n\t\t\t\t\tall_nodes[neighbour] = DijkstraNode(neighbour[0], neighbour[1], current_node, current_neighbour_dist)\r\n\r\n\t\t\t# Mark the current node as being explored as you have checked all the neighbours\r\n\t\t\tcurrent_node.explored = True\r\n\r\n\t\t\t# Gets a list of all of the unexplored nodes to check for the next node to explore\r\n\t\t\tunexplored_nodes = [node for _, node in all_nodes.items() if not node.explored]\r\n\r\n\t\t\tif len(unexplored_nodes) > 0:\r\n\t\t\t\t# Go to the next node with the smallest distance that hasn't been explored\r\n\t\t\t\tcurrent_node = min(unexplored_nodes, key=lambda node: node.dist)\r\n\t\t\telse:\r\n\t\t\t\tcurrent_node = None\r\n\r\n\t\t# Make your way back from the target node\r\n\t\tcurrent_node = all_nodes[(x2, y2)]\r\n\t\t# Initialise a list to hold the path going from the target to the root\r\n\t\treversed_path = []\r\n\t\t# This will end when the root node tries to travel to a None node\r\n\t\twhile current_node is not None:\r\n\t\t\t# Add the current node to the list\r\n\t\t\treversed_path.append((current_node.x, current_node.y))\r\n\t\t\t# Travel to the parent node\r\n\t\t\tcurrent_node = current_node.parent\r\n\t\t\t# current_node will be None at the root because the parent of the root node is 'None'\r\n\r\n\t\t# Return the list in the correct order\r\n\t\treturn list(reversed(reversed_path))", "def find_all_paths(parents_to_children, start, end, path=[]):\r\n path = path + [start]\r\n if start == end:\r\n return [path]\r\n if start not in parents_to_children.keys():\r\n return []\r\n paths = []\r\n for node in parents_to_children[start]:\r\n if node not in path:\r\n newpaths = find_all_paths(parents_to_children, node, end, path)\r\n for newpath in newpaths:\r\n paths.append(tuple(newpath))\r\n return paths", "def traverse(self, start_direction, _weight=0, _linklen=1, _steps=None):\n xygrid = self.xymap.xygrid\n\n end_direction = self.get_direction(start_direction)\n if not end_direction:\n if _steps is None:\n # is perfectly okay to not be linking back on the first step (to a node)\n return None, 0, None\n raise MapParserError(\n f\"was connected to from the direction {start_direction}, but \"\n \"is not set up to link in that direction.\",\n self,\n )\n\n # note that if `get_direction` returns an unknown direction, this will be equivalent\n # to pointing to an empty location, which makes sense\n dx, dy = MAPSCAN.get(end_direction, (BIGVAL, BIGVAL))\n end_x, end_y = self.x + dx, self.y + dy\n try:\n next_target = xygrid[end_x][end_y]\n except KeyError:\n # check if we have some special action up our sleeve\n next_target = self.at_empty_target(start_direction, end_direction)\n\n if not next_target:\n raise MapParserError(f\"points to empty space in the direction {end_direction}!\", self)\n\n _weight += self.get_weight(start_direction, _weight)\n if _steps is None:\n _steps = []\n _steps.append(self)\n\n if hasattr(next_target, \"node_index\"):\n # we reached a node, this is the end of the link.\n # we average the weight across all traversed link segments.\n return (\n next_target,\n _weight / max(1, _linklen) if self.average_long_link_weights else _weight,\n _steps,\n )\n else:\n # we hit another link. Progress recursively.\n return next_target.traverse(\n REVERSE_DIRECTIONS.get(end_direction, end_direction),\n _weight=_weight,\n _linklen=_linklen + 1,\n _steps=_steps,\n )", "def find_path(self, start_point: Pos, end_point: Pos, obstacles: list) -> list:\n pass", "def backtrack_to_start_to_draw_purpose(board, end):\r\n cell = board.at(end)\r\n # print(cell)\r\n path = []\r\n lis = []\r\n while cell != None:\r\n path.append(cell)\r\n cell = cell.path_from\r\n for i in path[-1:]:\r\n for j in i.position:\r\n lis.append(j)\r\n\r\n return path", "def graph_search(initial_state):\n path = [initial_state]\n current_node = copy.deepcopy(initial_state)\n while True:\n count = len(path)\n result = expand(current_node)\n for i in result:\n if i[1][1] == 0:\n path.append(i)\n break\n if len(path) > count:\n break\n else:\n current_node = result[-1]\n path.append(result[-1])\n return path", "def _node_walk_downhill(self, node):\n \n chain = -np.ones(self.tri.npoints, dtype=np.int) # in case the mesh is a spiral ziggurat\n\n idx = 0\n maxIdx = self.tri.npoints\n chain[idx] = node\n low_neighbour = self._node_lowest_neighbour(node)\n junction = -1\n\n while low_neighbour != -1:\n idx += 1\n chain[idx] = low_neighbour \n if self.node_chain_lookup[low_neighbour] != -1:\n junction = self.node_chain_lookup[low_neighbour]\n break \n\n low_neighbour = self._node_lowest_neighbour(low_neighbour)\n \n return junction, chain[0:idx+1]", "def nb_simple_paths(self, start, end):\n if start not in self.nodes or end not in self.nodes:\n return None\n\n visited = set()\n nb_paths = 0\n\n def search(node, goal):\n nonlocal nb_paths\n visited.add(node)\n\n if node == goal:\n nb_paths += 1\n else:\n for neighbor in self.nodes[node]:\n if neighbor not in visited:\n search(neighbor, goal)\n\n visited.remove(node)\n\n search(start, end)\n return nb_paths", "def reconstruct_path(cameFrom, current):\n total_path = np.array([[current.x],[current.y]])\n while current_in_cameFrom(current,cameFrom):\n current = current.father\n node_x = current.x\n node_y = current.y\n node_pos = np.array([[node_x],[node_y]])\n total_path = np.hstack((total_path,node_pos))\n\n l1 = total_path[0,:]\n l1 = l1[::-1]\n l2 = total_path[1,:]\n l2 = l2[::-1]\n total_path = np.vstack((l1,l2))\n return total_path", "def get_shortest_path_edge(\n self,\n xP0,\n yP0,\n xP1,\n yP1,\n npoint_ref=1,\n debug_info=False,\n ):\n # find indices of endpoints\n if self.on_sphere:\n idxP0 = get_index_lonlat(xP0, yP0, self.xvertex, self.yvertex)\n idxP1 = get_index_lonlat(xP1, yP1, self.xvertex, self.yvertex)\n else:\n idxP0 = get_index_xy(xP0, yP0, self.xvertex, self.yvertex)\n idxP1 = get_index_xy(xP1, yP1, self.xvertex, self.yvertex)\n print('Vertex closest to P0: {:8.5f} {:8.5f}'.format(self.xvertex[idxP0], self.yvertex[idxP0]))\n print('Vertex closest to P1: {:8.5f} {:8.5f}'.format(self.xvertex[idxP1], self.yvertex[idxP1]))\n # find reference points\n x_ref, y_ref = gc_interpolate(self.xvertex[idxP0], self.yvertex[idxP0],\n self.xvertex[idxP1], self.yvertex[idxP1], npoint_ref+2)\n if self.on_sphere:\n x_ref = np.mod(x_ref[1:-1], 360)\n y_ref = np.mod(y_ref[1:-1], 360)\n # initialize an empty path\n out = EdgePath()\n # loop over reference points, find the path between these points\n idx_sp0 = idxP0\n for i in np.arange(npoint_ref):\n idx_vertex = np.minimum(i,1)\n idx_sp1 = get_index_xy(x_ref[i], y_ref[i], self.xvertex, self.yvertex)\n print(' - Vertex closest to RefP{:d}: {:8.5f} {:8.5f}'.format(i+1, self.xvertex[idx_sp1], self.yvertex[idx_sp1]))\n out_i = get_path_edge(idx_sp0, idx_sp1,\n self.vertexid, self.xvertex, self.yvertex,\n self.edgeid, self.xedge, self.yedge,\n self.edges_vertex, self.vertices_edge,\n self.on_sphere, debug_info)\n out = out + out_i\n idx_sp0 = idx_sp1\n # last path, start from end points P1\n out_n = get_path_edge(idxP1, idx_sp1,\n self.vertexid, self.xvertex, self.yvertex,\n self.edgeid, self.xedge, self.yedge,\n self.edges_vertex, self.vertices_edge,\n self.on_sphere, debug_info)\n out = out + out_n.reverse()\n return out", "def point_neighbors_recursion(self, point):\n # Sanity checks\n if point is None:\n raise ValueError(\"Cannot operate on None\")\n\n neighbors = []\n # 1-dimension\n if len(point) == 1:\n neighbors.append([point[0] - 1]) # left\n neighbors.append([point[0]]) # current\n neighbors.append([point[0] + 1]) # right\n\n return neighbors\n\n # n-dimensional\n for sub_dimension in self.point_neighbors_recursion(point[1:]):\n neighbors.append([point[0] - 1] + sub_dimension) # left + (n-1)-dimensional combinations\n neighbors.append([point[0]] + sub_dimension) # current + (n-1)-dimensional combinations\n neighbors.append([point[0] + 1] + sub_dimension) # right + (n-1)-dimensional combinations\n\n return neighbors", "def _generate_subpaths(self):\n\n scale = self.SCALE\n\n for point in self._points:\n x_base = point[0] * scale + self.border * scale + self.line_size\n y_base = point[1] * scale + self.border * scale + self.line_size\n\n yield 'M {x0} {y0} L {x0} {y1} L {x1} {y1} L {x1} {y0} z'.format(\n x0=x_base,\n y0=y_base,\n x1=x_base + scale,\n y1=y_base + scale\n )", "def __search_path(self, start_node, goal_node):\n\n path = []\n queue = PriorityQueue()\n queue.put((0, start_node))\n visited = set(start_node)\n\n branch = {}\n found = False\n \n while not queue.empty():\n item = queue.get()\n current_cost = item[0]\n current_node = item[1]\n\n if current_node == goal_node: \n found = True\n break\n else:\n for next_node in self._route_graph[current_node]:\n cost = self._route_graph.edges[current_node, next_node]['weight']\n new_cost = current_cost + cost + self.__heuristic(next_node, goal_node)\n\n if next_node not in visited: \n visited.add(next_node) \n queue.put((new_cost, next_node))\n\n branch[next_node] = (new_cost, current_node)\n\n path = []\n path_cost = 0\n if found:\n # retrace steps\n path = []\n n = goal_node\n path_cost = branch[n][0]\n while branch[n][1] != start_node:\n path.append(branch[n][1])\n n = branch[n][1]\n path.append(branch[n][1])\n else:\n print(\"Path Not Found\")\n\n return path[::-1], path_cost", "def calculate_path(self):\n #Se repite el ciclo para el número especificado de veces\n for i in range(self.iterations):\n for ant in self.ants:\n ant.setup_ant()\n while not ant.final_node_reached:\n #Seleccion aleatoria del nodo a visitar\n node_to_vist = self.select_next_node(self.map.nodes_array[int(ant.actual_node[0])][int(ant.actual_node[1])])\n #Mover la hormiga al siguiente nodo seleccionado al azar\n ant.move_ant(node_to_visit)\n #Compruebe si se ha alcanzado la solución\n ant.is_final_node_reached()\n #Agregar la ruta resultante a la lista de rutas\n self.add_to_path_results(self.delete_loops(ant.get_visited_nodes()))\n # Habilitar a la hormiga para otra busqueda\n ant.enable_start_new_path()\n \n # Actualizar el nivel global de feromonas\n self.pheromone_update()\n self.best_result = self.paths[0]\n\n #Vaciar la lista de rutas\n self.empty_paths()\n print('Iteration: ', i, 'lenght of the path: ', len(self.best_result))\n return self.best_result", "def topological_nodes_generator(graph, reverse=...):\n ...", "def bfs(self, start, end):\n\n queue = [start]\n parent = dict()\n\n # Initialize parent dictionary\n for v in iter(self._reachable): parent[v] = None\n parent[start] = start\n\n while len(queue) > 0:\n (x, y) = queue.pop(0)\n if (x, y) == end: break\n\n for v in self.get_reachables(x, y):\n if parent[v] is not None: \n # Vertex v already visited\n continue\n parent[v] = (x, y)\n queue.append(v)\n\n # Reconstruct path\n path = [end]\n vertex = end\n\n while parent[vertex] != vertex:\n if parent[vertex] is None: return []\n path.append(parent[vertex])\n vertex = parent[vertex]\n\n path.reverse()\n return path", "def astar(maze, start, end, agents):\r\n\r\n # Create start and end node\r\n start_node = Node(None, start)\r\n end_node = Node(None, end)\r\n\r\n # Initialize both open and closed list\r\n open_list = []\r\n open_pos = []\r\n closed_pos = []\r\n\r\n # Add the start node\r\n open_list.append(start_node)\r\n open_pos.append(start)\r\n\r\n # Loop until you find the end\r\n while len(open_list) > 0:\r\n\r\n # Get the current node\r\n current_node = open_list[0]\r\n current_index = 0\r\n \r\n for index, item in enumerate(open_list):\r\n if item.f < current_node.f:\r\n current_node = item\r\n current_index = index\r\n\r\n # Pop current off open list, add to closed list\r\n open_list.pop(current_index)\r\n open_pos.pop(current_index)\r\n closed_pos.append(current_node.position)\r\n\r\n # Found the goal\r\n if current_node == end_node:\r\n path = []\r\n\r\n current = current_node\r\n while current is not None:\r\n path.append(current.position) \r\n current = current.parent\r\n\r\n return path[::-1] # Return reversed path\r\n\r\n # # Generate children\r\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0)]: # Adjacent squares\r\n \r\n # Get node position\r\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\r\n\r\n # Make sure within range\r\n if node_position[0] > maze.shape[0]-1 or node_position[0] < 0 or node_position[1] > maze.shape[1]-1 or node_position[1] < 0:\r\n continue\r\n\r\n # Make sure walkable terrain\r\n if maze[node_position[0]][node_position[1]] == 0:\r\n continue\r\n\r\n if not validataPath(current_node, node_position, agents):\r\n continue\r\n\r\n # Create new node\r\n child = Node(current_node, node_position)\r\n\r\n if node_position not in closed_pos:\r\n child = Node(current_node, node_position)\r\n\r\n # Create the f, g, and h values\r\n child.g = current_node.g + 1\r\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\r\n child.f = child.g + child.h\r\n\r\n # Child is already in the open list\r\n if node_position in open_pos:\r\n index = open_pos.index(node_position)\r\n if open_list[index].g > child.g:\r\n open_list[index] = child\r\n\r\n # Add the child to the open list\r\n else:\r\n open_list.append(child)\r\n open_pos.append(node_position)\r\n\r\n return None", "def make_path(self, loop):\n path=Path(closed=True)\n for c in range(0,len(loop)):\n connection=loop[c]\n nextConnection = loop[(c+1)%len(loop)]\n lastConnection = loop[(c-1)%len(loop)]\n if self.dontFill(connection, nextConnection, lastConnection):\n return False\n cp = self.corner_pos(connection, nextConnection, lastConnection)\n corner_offset = cp[0]\n corner_dir = cp[1]\n # catch case when it is the end of a single rod\n if type(corner_offset) is list:\n endpoints = corner_offset\n corner_offset = endpoints[0]\n else:\n endpoints = False\n if connection.other.radius is not None and ( corner_offset.length() < connection.other.radius or corner_dir >0):\n # catch case when it is the end of a single rod\n if endpoints:\n para=(connection.this.pos-connection.other.pos).normalize()\n d = math.sqrt(connection.other.radius**2 - corner_offset.length()**2)\n path.add_point(PSharp(connection.other.pos + corner_offset + d*para))\n path.add_point(PArc(connection.other.pos, radius=connection.other.radius, direction='cw'))\n path.add_point(PSharp(connection.other.pos - corner_offset+d*para))\n else:\n path.add_point(PAroundcurve(connection.other.pos + corner_offset, centre=connection.other.pos, radius=connection.other.radius, direction='cw'))\n\n elif self.get_intRadius(connection, connection.other) is not None:\n # path.add_point(PIncurve(connection.other.pos + corner_offset, radius=self.get_intRadius(connection, connection.other)))\n # path.add_point(PIncurve(connection.other.pos - corner_offset, radius=self.get_intRadius(connection, connection.other)))\n # path.add_point(PIncurve(connection.other.pos - corner_offset, radius=self.get_intRadius(connection, connection.other)))\n path.add_point(PIncurve(connection.other.pos + corner_offset, radius=self.get_intRadius(connection, connection.other)))\n else:\n cornerpos = self.corner_pos(connection, nextConnection, lastConnection)\n if type(cornerpos) is list:\n path.add_point(PSharp(connection.other.pos + cornerpos[0]))\n path.add_point(PSharp(connection.other.pos + cornerpos[1]))\n else:\n path.add_point(PSharp(connection.other.pos + cornerpos))#self.corner_pos(connection, nextConnection, lastConnection)))\n # path.add_point(PSharp(connection.other.pos - cornerpos))#self.corner_pos(connection, nextConnection, lastConnection)))\n if connection.other.holeRad is not None:\n if type(connection.other.holeRad) is int or type(connection.other.holeRad) is float:\n self.otherpaths.append(Circle(connection.other.pos, rad=connection.other.holeRad, side='in'))\n else:\n t=copy.deepcopy(connection.other.holeRad)\n t.translate(connection.other.pos)\n self.otherpaths.append(t)\n return path", "def make_sidewalk_nodes(street, prev_node, curr_node, next_node):\n if prev_node is None:\n v = - curr_node.vector_to(next_node, normalize=False)\n vec_prev = curr_node.vector() + v\n prev_node = Node(None, vec_prev[0], vec_prev[1])\n elif next_node is None:\n v = - curr_node.vector_to(prev_node, normalize=False)\n vec_next = curr_node.vector() + v\n next_node = Node(None, vec_next[0], vec_next[1])\n\n curr_latlng = np.array(curr_node.location())\n\n v_cp_n = curr_node.vector_to(prev_node, normalize=True)\n v_cn_n = curr_node.vector_to(next_node, normalize=True)\n v_sidewalk = v_cp_n + v_cn_n\n\n if np.linalg.norm(v_sidewalk) < 1e-10:\n v_sidewalk_n = np.array([v_cn_n[1], - v_cn_n[0]])\n else:\n v_sidewalk_n = v_sidewalk / np.linalg.norm(v_sidewalk)\n\n p1 = curr_latlng + street.distance_to_sidewalk * v_sidewalk_n\n p2 = curr_latlng - street.distance_to_sidewalk * v_sidewalk_n\n\n p_sidewalk_1 = Node(None, p1[0], p1[1])\n p_sidewalk_2 = Node(None, p2[0], p2[1])\n\n curr_node.append_sidewalk_node(street.id, p_sidewalk_1)\n curr_node.append_sidewalk_node(street.id, p_sidewalk_2)\n\n # Figure out on which side you want to put each sidewalk node\n v_c1 = curr_node.vector_to(p_sidewalk_1)\n if np.cross(v_cn_n, v_c1) > 0:\n return p_sidewalk_1, p_sidewalk_2\n else:\n return p_sidewalk_2, p_sidewalk_1", "def draw_plan(self, end_nodes, colors):\n for agent, nodes in enumerate(end_nodes.values()):\n for node in nodes:\n # self.root.draw_path_from_node(node, color=colors[agent], label='Agent ' + str(agent)) TODO: add back to this\n self.root.draw_path_from_node(self, node, color=colors[agent], agent=agent)", "def find_all_path(self, start_vertex, end_vertex, path=[]):\n\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n if start_vertex not in graph:\n return []\n\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_path(vertex, end_vertex,path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def _backtrack_path(self, current=None):\n path = []\n if self.path_found:\n if not current:\n current = self.end_node\n path.append(self.end_node)\n while current.parent:\n path.append(current.parent)\n self.board_array[current.parent.y][current.parent.x] = 4\n current = current.parent\n self.board_array[self.start_node.y][self.start_node.x] = 2\n self.board_array[self.end_node.y][self.end_node.x] = 3\n return path", "def find_path(source_point, destination_point, mesh):\r\n\r\n path = []\r\n boxes = {}\r\n\r\n sourceBox = None\r\n destBox = None\r\n\r\n # print(mesh)\r\n\r\n for box in mesh['boxes']:\r\n if box not in boxes and inBox(box, source_point):\r\n sourceBox = box\r\n # print(sourceBox)\r\n if box not in boxes and inBox(box, destination_point):\r\n destBox = box\r\n # print(destBox)\r\n\r\n if (sourceBox is None) or (destBox is None):\r\n print(\"No Path!\")\r\n return [], []\r\n\r\n dist = {}\r\n prev = {}\r\n boxCoords = {}\r\n\r\n dist[sourceBox] = 0\r\n prev[sourceBox] = None\r\n boxCoords[sourceBox] = source_point\r\n\r\n priorityQueue = []\r\n heappush(priorityQueue, (dist[sourceBox], sourceBox))\r\n\r\n adj = mesh[\"adj\"]\r\n\r\n while priorityQueue:\r\n\r\n currentCost, currentPos = heappop(priorityQueue)\r\n #neighbors = adj(graph, currentPos)\r\n\r\n # if currentPos == destination:\r\n # path = []\r\n # currPath = destination\r\n # while currPath is not None:\r\n # path.insert(0, currPath)\r\n # currPath = prev[currPath]\r\n # return path\r\n\r\n if currentPos == destBox:\r\n path = [boxCoords[currentPos], destination_point]\r\n\r\n backBox = prev[currentPos]\r\n backCoord = boxCoords[currentPos]\r\n\r\n while backBox is not None:\r\n path.insert(0, [boxCoords[backBox], backCoord])\r\n backBox = prev[backBox]\r\n backCoord = boxCoords[backBox]\r\n print(backCoord)\r\n\r\n return path, boxes.keys()\r\n\r\n # for neighborPos, neighborCost in neighbors:\r\n\r\n # alt = dist[currentPos] + neighborCost\r\n\r\n # if neighborPos not in dist or alt < dist[neighborPos]:\r\n # dist[neighborPos] = alt\r\n # prev[neighborPos] = currentPos\r\n # heappush(priorityQueue, (alt, neighborPos))\r\n\r\n for neighbor in adj[currentPos]:\r\n\r\n boxes[neighbor] = currentPos\r\n\r\n xRange = [max(currentPos[0], neighbor[0]),\r\n min(currentPos[1], neighbor[1])]\r\n yRange = [max(currentPos[2], neighbor[2]),\r\n min(currentPos[3], neighbor[3])]\r\n\r\n firstCost = euclideanDistance(\r\n (xRange[0], yRange[0]), boxCoords[currentPos])\r\n secondCost = euclideanDistance(\r\n (xRange[1], yRange[1]), boxCoords[currentPos])\r\n\r\n if firstCost <= secondCost:\r\n finalCost = firstCost\r\n finalPoint = (xRange[0], yRange[0])\r\n else:\r\n finalCost = secondCost\r\n finalPoint = (xRange[1], yRange[1])\r\n\r\n alt = currentCost + finalCost\r\n if neighbor not in dist or alt < dist[neighbor]:\r\n dist[neighbor] = alt\r\n prev[neighbor] = currentPos\r\n boxCoords[neighbor] = finalPoint\r\n heappush(priorityQueue, (alt, neighbor))\r\n return None", "def dfs(g: nx.Graph, start_node: Any) -> str:\n\n way = []\n stack = [start_node]\n y = {node: [] for node in g.nodes}\n while stack:\n elem = stack.pop()\n way.append(elem)\n for node in list(g.neighbors(elem)):\n if node not in way:\n stack.append(node)\n y[node].extend((*y[elem], elem))\n print(y)\n return \"\".join(way)", "def get_path(prevs, goal, start):\n path = OD({goal: 0})\n cur = goal\n while cur != start:\n (cost, node) = prevs.get(cur)\n if node == None or node in path:\n print(\"ERROR: No path found from %s -> %s\" % (start, goal))\n return (0, None)\n path[node] = path[cur] + cost\n cur = node\n return (path[start], path.keys()[::-1])", "def decompose_paths_rec(node_inner, path):\n if node_inner.is_leaf():\n path = np.append(path, str(node_inner.value))\n return path[None]\n else:\n paths = np.array([])\n for edge_name in node_inner.child_nodes:\n new_path = np.append(path, str(edge_name))\n paths = np.append(paths, decompose_paths_rec(node_inner.child_nodes[edge_name], new_path))\n return paths", "def shortest_path(edges, start, end):\n visitedNodes = []\n queue = [[start]]\n if start == end:\n return [start]\n \n while queue:\n path = queue.pop(0)\n node = path[-1]\n if node not in visitedNodes:\n neighbors = get_neighbors(edges, node)\n for neighbor in neighbors:\n newPath = list(path)\n newPath.append(neighbor)\n queue.append(newPath)\n if neighbor == end:\n return fix_format(edges, newPath)\n visitedNodes.append(node)\n return None", "def find_path(sources, goals, connections):\n visited = set()\n expanded = set()\n queue = deque()\n\n for s in sources:\n queue.appendleft([s])\n\n while queue:\n path = queue.pop()\n head = path[-1]\n visited.add(head)\n\n neighbours = [o for (i, o) in connections if i == head]\n for neighbour in neighbours:\n if neighbour in goals:\n return path + [neighbour]\n elif neighbour not in visited:\n queue.appendleft(path + [neighbour])\n\n return []", "def depth_limited_search(initial_state, goal_state, limit):\n\n return recursive_dls(createRootNode(initial_state), goal_state, limit)", "def relations_from(self, start_node):", "def dfs_edges_generator(graph, source, reverse=...):\n ...", "def graph_traverse(nodes, node, path_count, path_length):\n\tpath_length += 1\n\t\n\t# For any connected_node, traverse that node unless it's the last node\n\tfor connected_node in node[1]:\n\t\ttry:\n\t\t\tgraph_traverse(nodes, nodes[int(connected_node)], path_count, path_length)\n\t\t\t\n\t\texcept:\n\t\t\t# Total paths\n\t\t\tnodes[1][0][2] += 1\n\t\t\n\t\t\t# Shortest path\n\t\t\tif path_length < nodes[1][0][0]:\n\t\t\t\tnodes[1][0][0] = path_length\n\n\t\t\t# Longest path\n\t\t\tif path_length > nodes[1][0][1]:\n\t\t\t\tnodes[1][0][1] = path_length\n\n\t\t\tcontinue\n\tprint(nodes[1][0][2])", "def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy]))", "def _build_edges(self, build_edge_to : Callable[['SpatialGraph.Node'], List[int]]) -> None:\n for node in self.nodes.values():\n for idx in build_edge_to(node):\n if not node.has_edge_to(idx) \\\n and not idx == node.id: # prevent self-links\n node.neighbours[idx] = self.nodes[idx]\n # put reverse edge ?\n if not self.directed:\n self.nodes[idx].neighbours[node.id] = node", "def connectNodes(imgR,nodes,start,goal):\n alphabet = string.ascii_lowercase\n nodeConnections = [[] for i in range(len(nodes)+2)]\n for index, node in enumerate(nodes):\n paths = adjPaths(imgR,node)\n for path in paths:\n result = checkPath(imgR,nodes,node,path)\n if not result == 0: \n nIndex = nodes.index(result)\n nodeConnections[index+1].append(alphabet[nIndex+1])\n paths = adjPaths(imgR,start) # add start to nodes\n for path in paths:\n result = checkPath(imgR,nodes,start,path)\n if not result == 0: \n nIndex = nodes.index(result)\n nodeConnections[0].append(alphabet[nIndex+1])\n for node in nodeConnections[0]:\n nodeConnections[alphabet.index(node)].append(alphabet[0])\n paths = adjPaths(imgR,goal) # add goal to nodes\n for path in paths:\n result = checkPath(imgR,nodes,goal,path)\n if not result == 0: \n nIndex = nodes.index(result)\n nodeConnections[len(nodeConnections)-1].append(alphabet[nIndex+1])\n for node in nodeConnections[len(nodeConnections)-1]:\n nodeConnections[alphabet.index(node)].append(alphabet[len(nodeConnections)-1])\n return [alphabet[i] for i in range(len(nodes)+2)], nodeConnections", "def IteratePaths(self):\n self.w = self.setwage(self.K, self.N)\n self.r = self.setrate(self.K, self.N)\n self.b = self.benefit(self.N)\n\n a1, aT = [-1,], []\n\n for q in range(self.Nq):\n if q == 0:\n self.apath[-1] = 0.2\n elif q == 1:\n self.apath[-1] = 0.3\n else:\n self.apath[-1] = max(0,aT[-1]-(aT[-1]-aT[-2])*a1[-1]/(a1[-1]-a1[-2]))\n \n self.npath[-1] = 0\n self.cpath[-1] = self.apath[-1]*(1+self.r) + self.b\n\n for y in range(-2,-(self.T+1),-1): # y = -2, -3,..., -60\n self.apath[y], self.npath[y], self.cpath[y] = self.DirectSolve(y)\n\n aT.append(self.apath[-1])\n a1.append(self.apath[-self.T])\n if (fabs(self.apath[-self.T])<self.tol):\n break\n for y in range(-1,-(self.T+1),-1):\n self.upath[y] = self.util(self.cpath[y],self.npath[y])", "def traverse(name, furtherPath):" ]
[ "0.66372293", "0.5984706", "0.5966498", "0.5830176", "0.58110154", "0.58090067", "0.5781353", "0.577707", "0.5756183", "0.5688646", "0.56742686", "0.5663207", "0.5620114", "0.56037915", "0.5599367", "0.55866355", "0.5558341", "0.5553427", "0.5551647", "0.55495954", "0.55245346", "0.5511733", "0.54762167", "0.5463652", "0.54474235", "0.5445509", "0.5444622", "0.5434825", "0.53709906", "0.5356731", "0.5321662", "0.53194714", "0.53179497", "0.5314198", "0.5303361", "0.52987325", "0.5294574", "0.5294091", "0.52848095", "0.52814764", "0.5279384", "0.5267074", "0.5258199", "0.52297", "0.5221113", "0.52130455", "0.5210196", "0.5209828", "0.5209379", "0.5190318", "0.5186012", "0.5159913", "0.5157307", "0.51503384", "0.5144052", "0.51391935", "0.5138859", "0.512524", "0.51202273", "0.5119457", "0.5118971", "0.51188713", "0.5117721", "0.5117143", "0.51070446", "0.50992626", "0.508856", "0.5086588", "0.5085821", "0.5085817", "0.50844353", "0.5082598", "0.5082151", "0.50771815", "0.50769854", "0.50761676", "0.5072177", "0.5068722", "0.5064642", "0.50571185", "0.50497717", "0.50429827", "0.5036823", "0.5033297", "0.50313294", "0.5022306", "0.5015438", "0.50137186", "0.5006412", "0.5000937", "0.49995884", "0.49961847", "0.49955204", "0.49936968", "0.4992543", "0.49913725", "0.49818775", "0.4980704", "0.49777713", "0.4975624" ]
0.7077985
0
Create a list of all the paths to be simplified between endpoint nodes. \ The path is ordered from the first endpoint, through the interstitial \ nodes, to the second endpoint. Please note this method is taken directly from OSMnx, and can be found in \
def get_paths_to_simplify(G: nx.Graph, strict: bool=True) -> List[List[int]]: # First identify all the nodes that are endpoints endpoints = set([node for node in G.nodes() if is_endpoint(G, node, strict=strict)]) # Initialize the list to be returned; an empty list paths_to_simplify = [] # For each endpoint node, look at each of its successor nodes for node in endpoints: for successor in G.successors(node): if successor not in endpoints: # if the successor is not an endpoint, build a path from the # endpoint node to the next endpoint node try: paths_to_simplify.append( build_path(G, successor, endpoints, path=[node, successor])) except RuntimeError: # Note: Recursion errors occur if some connected component # is a self-contained ring in which all nodes are not # end points handle it by just ignoring that # component and letting its topology remain intact # (this should be a rare occurrence). log(('Recursion error: exceeded max depth, moving on to ' 'next endpoint successor'), level=lg.WARNING) return paths_to_simplify
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_paths(self):\n # convert to node sequences, dropping s'\n self.nodeseq_paths = []\n for path in self.paths:\n node_seq = [] # don't include s'\n for arc in path:\n node_seq.append(self.arc_info[arc]['destin'])\n self.nodeseq_paths.append(node_seq)\n # convert to og graph\n self.converted_paths = []\n for path in self.nodeseq_paths:\n this_path = []\n add_next_node = True\n for i in range(len(path) - 1):\n print(\"This path is\", this_path)\n node1 = path[i]\n node2 = path[i + 1]\n print(\"node1={}, node2={}\".format(node1, node2))\n if (node1, node2) in self.mapping:\n sc = self.mapping[(node1, node2)]\n print(\"uses sc edge for {}\".format(sc))\n print(\"should add {}, but also need to check for overlaps\".\n format(sc[1:-1]))\n if sc[1] in this_path:\n # we have an overlap\n start = len(this_path) - this_path.index(sc[1])\n this_path.extend(sc[start:-1])\n else:\n this_path.extend(sc[1:-1])\n add_next_node = False # next node is second of sc edge\n elif add_next_node:\n this_path.append(node1)\n else:\n add_next_node = True\n this_path.append(path[-1])\n self.converted_paths.append(this_path)", "def build_path(\n G: nx.Graph,\n node: int,\n endpoints: List[int],\n path: List[int]) -> List[int]:\n\n # For each successor in the passed-in node\n for successor in G.successors(node):\n if successor not in path:\n # If successor is already in path, ignore it, otherwise add to path\n path.append(successor)\n\n if successor not in endpoints:\n # If successor not endpoint, recursively call\n # build_path until endpoint found\n path = build_path(G, successor, endpoints, path)\n\n else:\n # If successor is endpoint, path is completed, so return\n return path\n\n if (path[-1] not in endpoints) and (path[0] in G.successors(path[-1])):\n # If end of the path is not actually an endpoint and the path's\n # first node is a successor of the path's final node, then this is\n # actually a self loop, so add path's first node to end of path to\n # close it\n path.append(path[0])\n\n return path", "def _find_all_paths(self, start_vertex: str, end_vertex: str, path=[]):\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n paths = []\n for vertex in self.graph[start_vertex]:\n if vertex not in path:\n extended_paths = self._find_all_paths(vertex,\n end_vertex,\n path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def shortestpaths(self, start, end, edgeweight=\"t_0\"):\n graph = self.graph\n shortest_nodepaths = list(\n nx.all_shortest_paths(\n graph, start, end, weight=edgeweight, method=\"dijkstra\"\n )\n )\n shortest_paths = []\n for path in shortest_nodepaths:\n edgepath = []\n for i in range(len(path) - 1):\n edgepath.append((path[i], path[i + 1]))\n shortest_paths.append(edgepath)\n\n return shortest_paths", "def get_path_list(path):\n # Build the path from end back to beginning.\n nodes = []\n prevevt = None\n while path is not None:\n nodes.append((path.node, path.cost, prevevt))\n prevevt = path.evt\n path = path.prev\n\n nodes.reverse()\n return nodes", "def _generate_subpaths(self):\n\n scale = self.SCALE\n\n for point in self._points:\n x_base = point[0] * scale + self.border * scale + self.line_size\n y_base = point[1] * scale + self.border * scale + self.line_size\n\n yield 'M {x0} {y0} L {x0} {y1} L {x1} {y1} L {x1} {y0} z'.format(\n x0=x_base,\n y0=y_base,\n x1=x_base + scale,\n y1=y_base + scale\n )", "def reconstruct_path(goal: Vector2D, prev_node: dict) -> list:\n path = []\n prev = prev_node[goal] # remove 'goal' from path\n \n while prev != None:\n path.append(prev)\n prev = prev_node[prev]\n \n path = path[:-1] # remove 'start' from path\n path.reverse()\n return path", "def _build_path(self):\r\n\r\n path = []\r\n \r\n for i in range(len(self.path) - 1):\r\n current_node = self.path[i]\r\n next_node = self.path[i + 1]\r\n \r\n key_list = [i for i in range(len(current_node.leaving_roads)) if current_node.leaving_roads[i].end == next_node]\r\n \r\n if len(key_list) == 0:\r\n raise Exception('ERROR (in gps._build_path()) : there is no route.')\r\n \r\n path.append(key_list[0])\r\n \r\n return path", "def extract_path(self):\n if self.extracted_path is not None:\n return self.extracted_path\n current = self\n path = []\n while current:\n path.append([current.end, current.path_cost])\n current = current.parent\n return list(reversed(path))", "def find_all_path(self, start_vertex, end_vertex, path=[]):\n\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n if start_vertex not in graph:\n return []\n\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_path(vertex, end_vertex,path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def all_shortest_paths(self, start_node, end_node):\n s=self.min_dist(start_node,end_node)\n return self.all_paths(start_node,end_node,s,[])", "def __edgeRouter(self):\r\n def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n \"\"\" Gets the nearest arrow endpoint. Handles edge reversal \"\"\"\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy])) \r\n \r\n \r\n \r\n #todo: improve method for spline arrows + add comments + optimize?\r\n print '----------------Dummy Edge Routing-----------------'\r\n for dummyEdge in NodeWrapper.ID2LayerEdgeDict.keys():\r\n \r\n dummyList = NodeWrapper.ID2LayerEdgeDict[dummyEdge]\r\n dummyNode = dummyList[0]\r\n dummyChild = dummyNode.children.keys()[0]\r\n linkFlagList = dummyNode.children[dummyChild]\r\n \r\n # Real nodes at start/end of the edge\r\n edgeSourceNode = dummyNode.parents.keys()[0]\r\n edgeSourceNode = edgeSourceNode.getASGNode().graphObject_\r\n dummyNode = dummyList[-1]\r\n edgeTargetNode = dummyNode.children.keys()[0]\r\n #print 'Dummy edge number', dummyEdge,\r\n #print dummyList[0].parents.keys()[0].getName(), edgeTargetNode.getName()\r\n edgeTargetNode = edgeTargetNode.getASGNode().graphObject_\r\n nodeTuple = [edgeSourceNode, edgeTargetNode, None]\r\n \r\n # Some edges are internally reversed to break cycles, when drawing\r\n # this must be taken into account\r\n isReversedEdge = False\r\n edgesToRoute = []\r\n for linkNode, isReversed in linkFlagList:\r\n edgesToRoute.append(linkNode)\r\n if(isReversed):\r\n isReversedEdge = True\r\n \r\n # Get all the points the edge must pass through (sorted by layer order)\r\n dummyList.sort(lambda a, b: cmp(a.getLayer(), b.getLayer()))\r\n if(isReversedEdge):\r\n dummyList.reverse()\r\n sortedDummyRouteList = []\r\n for node in dummyList:\r\n sortedDummyRouteList += node.getEdgePosition()\r\n \r\n # Set the coordinates of the edge directly \r\n # This is complicated by the fact that AToM3 treats edges as two\r\n # segments that join poorly (for spline arrows)\r\n for edgeObject in edgesToRoute: \r\n dc = edgeObject.graphObject_.dc\r\n linkObj = edgeObject.graphObject_ \r\n tag = linkObj.tag\r\n \r\n if(isReversedEdge):\r\n inPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n else:\r\n inPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n \r\n #print 'Dummy route', sortedDummyRouteList\r\n numPoints = len(sortedDummyRouteList) / 2\r\n # Add 2 extra control points for odd case (to make splines nice)\r\n if(numPoints % 2 == 1):\r\n if(numPoints == 1):\r\n center = sortedDummyRouteList\r\n else:\r\n start = sortedDummyRouteList[:numPoints - 1]\r\n end = sortedDummyRouteList[numPoints + 1:]\r\n center = sortedDummyRouteList[numPoints - 1:numPoints + 1]\r\n \r\n if(not isReversedEdge):\r\n newMid1 = [center[0], center[1] - 20]\r\n newMid2 = [center[0], center[1] + 20]\r\n else:\r\n newMid2 = [center[0], center[1] - 20]\r\n newMid1 = [center[0], center[1] + 20]\r\n \r\n \r\n if(numPoints == 1):\r\n sortedDummyRouteList = newMid1 + center + newMid2 \r\n else:\r\n sortedDummyRouteList = start + newMid1 + center + newMid2 + end\r\n centerIndex = numPoints - 1 + 2\r\n \r\n # Add 1 extra control point for even case (to make splines nice)\r\n else:\r\n start = sortedDummyRouteList[:numPoints]\r\n end = sortedDummyRouteList[numPoints:]\r\n center = [start[-2] + (end[0] - start[-2]) / 2, \r\n start[-1] + (end[1] - start[-1]) / 2]\r\n sortedDummyRouteList = start + center + end \r\n centerIndex = numPoints\r\n \r\n # Now I know where the center is... so lets move the center object\r\n # Is the edge object a hyperlink?\r\n if(len(edgeObject.in_connections_ + edgeObject.out_connections_) > 2):\r\n fromObjs = []\r\n for semObj in edgeObject.in_connections_:\r\n fromObjs.append(semObj.graphObject_)\r\n toObjs = []\r\n for semObj in edgeObject.out_connections_:\r\n toObjs.append(semObj.graphObject_)\r\n optimizerHyperLink(dc, linkObj, fromObjs, toObjs, 0, 0, 0, center )\r\n continue\r\n \r\n else:\r\n linkObj.moveTo(* center)\r\n \r\n # Go through the 2 segments in the link\r\n nodeTuple[2] = edgeObject\r\n for connTuple in linkObj.connections:\r\n itemHandler = connTuple[0]\r\n direction = connTuple[1]\r\n \r\n if( direction ): \r\n inPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'start', isReversedEdge)\r\n\r\n segCoords = inPoint + sortedDummyRouteList[:centerIndex+2]\r\n else: \r\n outPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'end', isReversedEdge) \r\n segCoords = sortedDummyRouteList[centerIndex:] + outPoint\r\n segCoords = self.__reverseCoordList(segCoords)\r\n \r\n # Applies the changed coords to the canvas\r\n dc.coords( * [itemHandler] + segCoords ) \r\n \r\n # This may change the associated link drawings: \r\n # move them to the new point \r\n if( direction ):\r\n linkObj.updateDrawingsTo(inPoint[0], inPoint[1], itemHandler, \r\n segmentNumber=1)\r\n else:\r\n linkObj.updateDrawingsTo(outPoint[0], outPoint[1], itemHandler, \r\n segmentNumber=2)", "def reconstruct_path(source, target, predecessors):\n if source == target:\n return []\n prev = predecessors[source]\n curr = prev[target]\n path = [target, curr]\n while curr != source:\n curr = prev[curr]\n path.append(curr)\n return list(reversed(path))", "def get_path(self,first_node,last_node):\n edge_pattern=re.compile('edge_(?P<begin_node>\\w+)_(?P<end_node>\\w+)_(?P<iterator>\\w+)')\n exit_paths=self.get_exiting_edges(first_node)\n next_nodes=self.get_exiting_nodes(first_node)\n #be careful here using the wrong assignment statement breaks this function\n possible_paths=[]\n for exit_path in exit_paths:\n possible_paths.append([exit_path])\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))\n for i in range(len(self.node_names)):\n for index,path in enumerate(possible_paths):\n last_edge=path[-1]\n match=re.match(edge_pattern,last_edge)\n begin_node=match.groupdict()['begin_node']\n end_node=match.groupdict()['end_node']\n #print next_node\n if end_node==last_node:\n #print(\"The path found is {0}\".format(path))\n return path\n next_possible_paths=[]\n next_edges=self.get_exiting_edges(end_node)\n next_nodes=self.get_exiting_nodes(end_node)\n #print(\"{0} is {1}\".format('next_edges',next_edges))\n for index,next_edge in enumerate(next_edges):\n #be careful here using the wrong assignment statement breaks this function\n #next_path=path is a deal breaker!!\n next_path=[]\n for edge in path:\n next_path.append(edge)\n #print(\"{0} is {1}\".format('next_path',next_path))\n #print(\"{0} is {1}\".format('next_edge',next_edge))\n #next_node=next_nodes[index]\n #print next_node\n next_match=re.match(edge_pattern,next_edge)\n next_node=next_match.groupdict()[\"end_node\"]\n begin_node_next_edge=next_match.groupdict()[\"begin_node\"]\n #print(\"{0} is {1}\".format('next_node',next_node))\n #print(\"{0} is {1}\".format('begin_node_next_edge',begin_node_next_edge))\n\n if next_node==last_node and begin_node_next_edge==end_node:\n next_path.append(next_edge)\n #print(\"The path found is {0}\".format(next_path))\n return next_path\n elif begin_node_next_edge==end_node:\n next_path.append(next_edge)\n next_possible_paths.append(next_path)\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n else:\n pass\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n possible_paths=next_possible_paths\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))", "def create_path(self):\n\n partials = []\n partials.append({})\n #print self.trip_id\n\n #this variable is true if we have not yet recorded the first edge of a path\n first_edge = True\n #this variable is false until we hit the midpoint\n hit_midpoint = False\n\n first_lasts = []\n first_lasts.append([0,0])\n matrices = []\n matrices.append([np.zeros((self.graph.rows,self.graph.cols)),0])\n edge_sets = []\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n cur_line = self.line_num\n good_graphs = []\n good_graphs.append(True)\n nodes_visited = []\n nodes_visited.append([])\n #normalized = dg.normalize(self.graph.lines[cur_line])\n normalized = normalize_simple(self.graph.lines[cur_line])\n matrices_index = 0\n prev_coords = (-1,-1)\n while normalized[0] == self.trip_id:\n lat = normalized[1]\n lon = normalized[2]\n coords = self.graph.gps_to_coords(lat,lon)\n node = self.graph.coords_to_node(coords[0],coords[1])\n\n if prev_coords == (-1,-1) and coords[0] != -1:\n first_lasts[matrices_index][0] = node\n\n if coords[0] == -1 and prev_coords[0] != -1:\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n\n if prev_coords != (-1,-1) and coords[0] != -1 and coords != prev_coords:\n edge_num = self.graph.edge_num(prev_coords[0],prev_coords[1],coords[0],coords[1])\n if edge_num == -1:\n good_graphs[matrices_index] = False\n else:\n edge_sets[matrices_index][edge_num] = 1\n if edge_num in partials[matrices_index] and partials[matrices_index][edge_num] == 0:\n del partials[matrices_index][edge_num]\n if not hit_midpoint:\n if first_edge:\n above = (prev_coords[0]-1,prev_coords[1])\n below = (prev_coords[0]+1,prev_coords[1])\n left = (prev_coords[0],prev_coords[1]-1)\n right = (prev_coords[0],prev_coords[1]+1)\n for next_coords in (above,below,left,right):\n other_edge = self.graph.edge_num(prev_coords[0],prev_coords[1],next_coords[0],next_coords[1])\n if other_edge != -1:\n partials[matrices_index][other_edge] = 0\n first_edge = False\n if self.graph.coords_to_node(prev_coords[0],prev_coords[1]) == self.midpoint:\n hit_midpoint = True\n partials[matrices_index][edge_num] = 1\n if self.graph.coords_to_node(coords[0],coords[1]) == self.midpoint:\n hit_midpoint = True\n\n\n\n if coords[0] == -1:\n matrices.append([np.zeros((self.graph.rows,self.graph.cols)),0])\n first_lasts.append([0,0])\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n good_graphs.append(True)\n nodes_visited.append([])\n matrices_index += 1\n partials.append({})\n hit_midpoint = False\n first_edge = True\n \n elif coords[0] < self.graph.rows and coords[1] < self.graph.cols and not matrices[matrices_index][0][coords[0]][coords[1]]:\n matrices[matrices_index][1] += 1\n matrices[matrices_index][0][coords[0]][coords[1]] = 1\n nodes_visited[matrices_index].append(coords)\n\n prev_coords = coords\n\n cur_line += 1\n if cur_line == len(self.graph.lines):\n break\n #normalized = dg.normalize(self.graph.lines[cur_line])\n normalized = normalize_simple(self.graph.lines[cur_line])\n\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n self.next_line = cur_line\n best_index = 0\n best_score = 0\n for matrix_index in range(len(matrices)):\n if matrices[matrix_index][1] > best_score:\n best_score = matrices[matrix_index][1]\n best_index = matrix_index\n\n for coords in nodes_visited[best_index]:\n self.graph.node_visit(self.trip_id,coords)\n \n\n if self.trip_id not in self.graph.trip_id2line_num:\n #if first_lasts[best_index] == [28,5]:\n # print \"a to b: %d\" % self.trip_id\n self.graph.first_last2trip_ids[tuple(first_lasts[best_index])].append(self.trip_id)\n\n return matrices[best_index][0],edge_sets[best_index],good_graphs[best_index],partials[best_index]", "def possible(self):\n return [tuple(path) for path in nx.all_shortest_paths(self._gpm.Graph, source=self.source, target=self.target)]", "def build_path(start, end):\n a = hierarchy.index(start)\n b = hierarchy.index(end)\n if a == b:\n return []\n elif a < b:\n return hierarchy[a + 1 : b + 1]\n return list(reversed(hierarchy[b:a]))", "def path(self):\n\t\tnode, path_back = self, []\n\t\twhile node:\n\t\t\tpath_back.append(node)\n\t\t\tnode = node.parent\n\t\treturn list(reversed(path_back))", "def bfs_paths(self, start: str, goal: str) -> List[Path]:\n queue = [(start, [start])]\n while queue:\n (node, path) = queue.pop(0)\n if node not in self.graph:\n yield []\n for _next in set(self.graph[node]) - set(path):\n if _next == goal:\n yield path + [_next]\n elif _next in self.graph:\n queue.append((_next, path + [_next]))", "def expand_paths_by_nodes(self, paths):\n paths_formatted = set()\n # Expand each path\n for path in paths:\n if len(path) < 2:\n continue\n expanded_paths = set()\n if self.include_entity:\n relations_for_each_step = [[path[0]]]\n else:\n relations_for_each_step = []\n for index in range(1, len(path)):\n node1 = path[index-1]\n node2 = path[index]\n if (node1, node2) in self.pair_to_relations:\n relations = self.pair_to_relations[(node1, node2)]\n else:\n print(node1, node2)\n relations_for_each_step.append(relations)\n if self.include_entity:\n relations_for_each_step.append([node2])\n expanded_paths.update(list(itertools.product(*relations_for_each_step)))\n paths_formatted.update(expanded_paths)\n return paths_formatted", "def path(self):\r\n node, p = self, []\r\n while node:\r\n p.append(node)\r\n node = node.parent\r\n yield from reversed(p)", "def find_all_path(self, start, end, path=[]):\n path = path+[start]\n if start == end:\n return path\n paths = []\n for node in self.graph[start]:\n if node not in path:\n newpaths = self.find_path(node, end, path)\n paths.append(newpaths)\n return paths", "def closed_paths(entities, vertices):\n # get a networkx graph of entities\n graph, closed = vertex_graph(entities)\n # add entities that are closed as single- entity paths\n entity_paths = np.reshape(closed, (-1, 1)).tolist()\n # look for cycles in the graph, or closed loops\n vertex_paths = nx.cycles.cycle_basis(graph)\n\n # loop through every vertex cycle\n for vertex_path in vertex_paths:\n # a path has no length if it has fewer than 2 vertices\n if len(vertex_path) < 2:\n continue\n # convert vertex indices to entity indices\n entity_paths.append(\n vertex_to_entity_path(vertex_path,\n graph,\n entities,\n vertices))\n\n return entity_paths", "def path(self):\n node, return_path = self, []\n while node:\n # Add the nodes in reverse order to a list until you reach the\n # root parent node which will terminate the loop\n return_path.append(node)\n node = node.parent\n # Reverse the list to get the proper path back\n return list(reversed(return_path))", "def constructShortestPath(self):\r\n sp = []\r\n v = self.t\r\n while self.preds[v]: # is not None\r\n sp.append(v)\r\n v = self.preds[v]\r\n sp.append(self.s) # source\r\n sp.reverse() # to have the path from source to dest and not t to s\r\n return sp, self.graph.getCoords(sp)", "def decompose_paths(self):\n if self.child_nodes == {}:\n return []\n\n import numpy as np\n\n def decompose_paths_rec(node_inner, path):\n \"\"\"\n This function does the recursive create_path of the decomposition\n :param node_inner:\n :param path:\n \"\"\"\n if node_inner.is_leaf():\n path = np.append(path, str(node_inner.value))\n return path[None]\n else:\n paths = np.array([])\n for edge_name in node_inner.child_nodes:\n new_path = np.append(path, str(edge_name))\n paths = np.append(paths, decompose_paths_rec(node_inner.child_nodes[edge_name], new_path))\n return paths\n\n decomposition = decompose_paths_rec(self, np.array([]))\n return decomposition.reshape((decomposition.shape[0]/(self.d+1), self.d+1))", "def calculate_paths(topology):\n nodes = topology['nodes']\n edges = topology['links']\n\n dist = [[len(nodes) + 1 for x in range(len(nodes))] for y in range(len(nodes))]\n paths = [[[] for x in range(len(nodes))] for y in range(len(nodes))]\n\n for e in edges.values():\n s, d = int(e['source']), int(e['target'])\n dist[s][d] = dist[d][s] = 1\n paths[s][d] = [e['id']]\n paths[d][s] = [e['id']]\n\n for k in range(len(nodes)):\n for i in range(len(nodes)):\n for j in range(len(nodes)):\n if dist[i][k] + dist[k][j] < dist[i][j]:\n dist[i][j] = dist[i][k] + dist[k][j]\n paths[i][j] = paths[i][k] + paths[k][j]\n return paths", "def find_all_paths(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n paths = []\n for node in graph[start]:\n newpaths = find_all_paths(graph, node, end, path)\n paths += newpaths\n return paths", "def _path(from_object, to_object):\n\n if from_object._root != to_object._root:\n raise ValueError(\"No connecting path found between \" +\n str(from_object) + \" and \" + str(to_object))\n\n other_path = []\n obj = to_object\n while obj._parent is not None:\n other_path.append(obj)\n obj = obj._parent\n other_path.append(obj)\n object_set = set(other_path)\n from_path = []\n obj = from_object\n while obj not in object_set:\n from_path.append(obj)\n obj = obj._parent\n index = len(from_path)\n i = other_path.index(obj)\n while i >= 0:\n from_path.append(other_path[i])\n i -= 1\n return index, from_path", "def path(self):\n node, path_back = self, []\n while node:\n path_back.append(node)\n node = node.parent\n return list(reversed(path_back))", "def path(self):\n node, path_back = self, []\n while node:\n path_back.append(node)\n node = node.parent\n return list(reversed(path_back))", "def path(self):\n node, path_back = self, []\n while node:\n path_back.append(node.state[0])\n node = node.parent\n return list(reversed(path_back))", "def _points_to_paths(self, points):\n prev = points[0]\n result = []\n for point in points[1:]:\n path = specctraobj.Path()\n path.aperture_width = self._from_pixels(1)\n path.vertex.append(prev)\n path.vertex.append(point)\n result.append(path)\n prev = point\n return result", "def reconstruct_path(current):\r\n path = [current.coord]\r\n parent = current.parent\r\n while parent:\r\n path = [parent.coord] + path\r\n parent = parent.parent\r\n path = path[1:]\r\n return path", "def paths_list(ctx):\n for path in ctx.obj['CLIENT'].paths.list():\n if not path.source.name:\n cidr_blocks = [subnetwork.cidr_block for subnetwork in path.source.subnetworks]\n source_name = \",\".join(cidr_blocks)\n network_name = \"external\"\n else:\n source_name = path.source.name\n network_name = path.source.network.name\n click.echo(\"%s:%s -(%s)-> %s:%s\" % (network_name, source_name, path.port,\n path.network.name, path.destination.name))", "def floyd_warshall_path(self, start, end, next_node): # pragma no cover\n if next_node[start][end] is None:\n return []\n path = [start]\n while start is not end:\n start = next_node[start][end]\n path.append(start)\n return path", "def create_path_new(self):\n\n \n first_lasts = []\n first_lasts.append([0,0])\n matrices = []\n matrices.append([[[0 for i in range(self.graph.cols)] for i in range(self.graph.rows)],0])\n edge_sets = []\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n cur_line = self.line_num\n nodes_visited = []\n nodes_visited.append([])\n normalized = normalize_simple(self.graph.lines[cur_line])\n matrices_index = 0\n prev_coords = (-1,-1)\n prev_gps = (-1.0,-1.0)\n while normalized[0] == self.trip_id:\n lat = normalized[1]\n lon = normalized[2]\n coords = self.graph.gps_to_coords(lat,lon)\n node = self.graph.coords_to_node(coords[0],coords[1])\n\n if prev_coords == (-1,-1) and coords[0] != -1:\n first_lasts[matrices_index][0] = node\n\n if coords[0] == -1 and prev_coords[0] != -1:\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n\n if prev_coords != (-1,-1) and coords[0] != -1 and coords != prev_coords:\n edge_num = self.graph.edge_num(prev_coords[0],prev_coords[1],coords[0],coords[1])\n if edge_num == -1:\n new_edges = self.find_edges((lat,lon),prev_gps)\n for add_edge in new_edges:\n edge_sets[matrices_index][add_edge] = 1\n else:\n edge_sets[matrices_index][edge_num] = 1\n\n if coords[0] == -1:\n matrices.append([[[0 for i in range(self.graph.cols)] for i in range(self.graph.rows)],0])\n first_lasts.append([0,0])\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n nodes_visited.append([])\n matrices_index += 1\n \n elif coords[0] < self.graph.rows and coords[1] < self.graph.cols and not matrices[matrices_index][0][coords[0]][coords[1]]:\n matrices[matrices_index][1] += 1\n matrices[matrices_index][0][coords[0]][coords[1]] = 1\n nodes_visited[matrices_index].append(coords)\n\n prev_coords = coords\n\n cur_line += 1\n if cur_line == len(self.graph.lines):\n break\n normalized = normalize_simple(self.graph.lines[cur_line])\n prev_gps = (lat,lon)\n\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n self.next_line = cur_line\n best_index = 0\n best_score = 0\n for matrix_index in range(len(matrices)):\n if matrices[matrix_index][1] > best_score:\n best_score = matrices[matrix_index][1]\n best_index = matrix_index\n\n #for coords in nodes_visited[best_index]:\n # self.graph.node_visit(self.trip_id,coords)\n\n #if self.trip_id not in self.graph.trip_id2line_num:\n # self.graph.first_last2trip_ids[tuple(first_lasts[best_index])].append(self.trip_id)\n\n return matrices[best_index][0],edge_sets[best_index],first_lasts[best_index]", "def paths(p, q):\n if (p, q) == (0, 0):\n return [((0, 0),)]\n answer = list()\n if p > 0:\n west = paths(p - 1, q)\n for path in west:\n answer.append(path + ((p, q),))\n if q > 0:\n south = paths(p, q - 1)\n for path in south:\n answer.append(path + ((p, q),))\n return answer", "def get_sorted_paths(self, src, dst):\n\n paths = self.topo.get_all_paths_between_nodes(src, dst)\n # trim src and dst\n paths = [x[1:-1] for x in paths]\n return paths", "def transition_path(self):\n node, path_back = self, []\n while node:\n path_back.append(node.action)\n node = node.parent\n return list(reversed(path_back))", "def getPaths(self):\n\n trafficEndPoints = []\n # A job denotes a traffic flow, which corresponds to an iperf task.\n for job in self.config.trace.jobs:\n trafficEndPoints.append((job['src'], job['dst']))\n\n # Obtain details about user-specified non-default links.\n configuredLinks = []\n for linkInfo in self.config.topoData['linkInfos']:\n configuredLinks.append((linkInfo['src'], linkInfo['dst']))\n\n paths = None\n spec = self.config.topoData['flowSpec']\n if spec == 'shortest_path':\n # export paths info and create routing conf using shortest paths\n adjFile = self.config.adjacencyFile\n writeAdjList(self.net, adjFile)\n info(\"**** [G2]: adjacency list written to file\", adjFile, \"\\n\")\n\n outfile = os.path.join(self.config.outPath, SHORTEST_PATH_FILE)\n paths = generateShortestPaths(adjFile, outfile, trafficEndPoints, configuredLinks)\n info(\"**** [G2]: shortest paths written to file\", outfile, \"\\n\")\n # Note: Since there can be multiple shortest paths between two endpoints, solution could vary.\n elif \".json\" in spec:\n info(\"**** [G2]: reading path info from\", spec, \"\\n\")\n paths = readFromPathFile(spec)\n else:\n paths = None\n return paths", "def interjoint_paths(self, return_indices=False):\n paths = []\n for tree in self.components():\n subpaths = self._single_tree_interjoint_paths(\n tree, return_indices=return_indices\n )\n paths.extend(subpaths)\n\n return paths", "def findPathsToBase(A,bSize):\n M,N = A.shape\n pressedPaths = []\n\n #For every two nodes in the base find all paths between them\n for b1 in range(bSize):\n for b2 in range(bSize):\n #Remove all other base nodes from the graph so that\n #we only find paths that go through the specialization set\n if b1 == b2:\n #In this case we are looking for a cycle.\n mask = [b1]+list(range(bSize,N))\n newSize = len(mask) + 1\n reduA = np.zeros((newSize,newSize))\n #Because the networkx cycle finders don't do what we need\n #them to do, we create a new graph and find paths instead\n reduA[:-1,:-1] = A[mask,:][:,mask]\n #Remove ingoing edges from the base node and add to new node\n reduA[-1,:] = reduA[0,:]\n reduA[0,:] = np.zeros(newSize)\n G = nx.DiGraph(reduA.T)\n #Find paths from the base node to the new node\n #same as finding all the cycles\n paths = list(nx.all_simple_paths(G,0,newSize-1))\n\n else:\n mask = [b1,b2]+list(range(bSize,N))\n reduA = A[mask,:][:,mask]\n #Remove base node interactions\n reduA[:2,:2] = np.zeros((2,2))\n G = nx.DiGraph(reduA.T)\n paths = list(nx.all_simple_paths(G,0,1))\n\n #Process Paths so that they make sense when the rest of the base\n #set is added to the graph\n for p in paths:\n if p != []:\n if b1 == b2:\n p = np.array(p) + bSize-1\n else:\n p = np.array(p) + bSize-2\n p[[0,-1]] = [b1, b2]\n pressedPaths.append(p)\n\n return pressedPaths", "def path(self): # Path taken to reach Goal\n node, path_back = self, []\n while node:\n path_back.append(node)\n node = node.parent\n return list(reversed(path_back))", "def get_path_endpoints(self):\n endpoints = []\n\n # Get the far end of the last path segment\n path, split_ends, position_stack = self.trace()\n endpoint = path[-1][2]\n if split_ends is not None:\n for termination in split_ends:\n endpoints.extend(termination.get_path_endpoints())\n elif endpoint is not None:\n endpoints.append(endpoint)\n\n return endpoints", "def path(self):\n node, path_back = self, []\n while node:\n path_back.append(node.state)\n node = node.parent\n return list(reversed(path_back))", "def paths(self, return_indices=False):\n paths = []\n for tree in self.components():\n paths += self._single_tree_paths(tree, return_indices=return_indices)\n return paths", "def _build_path(self):\n for point_3d in self.path_coordinates:\n self.connect_point_with_neighbors(point_3d)", "def FindAllPaths(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n if start not in graph:\n return None\n paths = []\n for node in graph[start]:\n if node not in path:\n newpaths = find_all_paths(graph, node, end, path)\n for newpath in newpaths:\n paths.append(newpath)\n return paths", "def path_to_edges(path):\n return list((u, v) for u, v in zip(path[:-1], path[1:]))", "def paths(self):\n return self._visit(self.start)", "def _deconstruct_path(predecessors, end):\n if end not in predecessors:\n return None\n current = end\n path = []\n while current:\n path.append(current)\n current = predecessors.get(current)\n return list(reversed(path))", "def update_trip_path(trip_mpois, paths, graph):\n n_nodes = len(trip_mpois)\n # adjacency matrix\n new_paths = np.zeros(shape=(n_nodes, n_nodes))\n\n # iterate through all the nodes and create a list of nodes with sequential id\n for i, node1 in enumerate(trip_mpois):\n for j, node2 in enumerate(trip_mpois):\n new_paths[i, j] = paths[node1, node2]\n\n # new_paths = new_paths/np.max(new_paths[new_paths < _INF])\n # new_paths[np.isinf(new_paths)] = _INF\n\n # create a dummy edge between end and start node with weight 0\n new_paths[1,0] = -_INF\n # new_paths[0,1] = _INF\n\n shortest_path = None\n if n_nodes > 5:\n shortest_path, dist = tsp.solve(n_nodes, new_paths)\n # shortest_path = range(n_nodes)\n else:\n shortest_path = range(n_nodes)\n\n trip_path = np.array(trip_mpois)[shortest_path]\n\n if ___DEBUG:\n fname = 'dump/' + str(n_nodes) + '.dist'\n np.savetxt(fname, new_paths, fmt='%.6f')\n \n mpoi_pos = np.zeros(shape=(n_nodes,2))\n \n for i, node in enumerate(trip_mpois):\n pos_3d = graph.vs[node]['position']\n assert node == graph.vs[node].index\n mpoi_pos[i,:] = pos_3d[:2]\n\n fname = 'dump/' + str(n_nodes) + '.pos'\n np.savetxt(fname, mpoi_pos)\n \n # print trip_mpois, trip_path\n\n return trip_path", "def __generate_all_shortest_paths(self,cutoff = 10):\n if cutoff < 1:\n cutoff = 10\n self.__logger.info(\"cutoff value must be a positive integer. Set back to default value: 10\")\n\n all_pair_shortest_paths = nx.all_pairs_shortest_path(self.G, cutoff=cutoff)\n for item in all_pair_shortest_paths:\n from_node = item[0]\n paths = item[1]\n for destination,path in paths.items():\n yield (len(path),path)", "def shortest_path(start, end):\n if start == end:\n return []\n \n start_frontier = Queue.Queue()\n start_parent = {}\n start_level = {}\n start_parent_move = {}\n start_frontier.put(start)\n start_level[start] = 0\n start_parent[start] = None\n start_parent_move[start] = None\n \n end_frontier = Queue.Queue()\n end_parent = {}\n end_level = {}\n end_parent_move = {}\n end_frontier.put(end)\n end_level[end] = 0\n end_parent[end] = None\n end_parent_move[end] = None\n \n intersectFound = False\n intersect = None\n level = 0\n while (True):\n level += 1\n# print (\"level = \" + str(level))\n if not start_frontier.empty():\n vertex = start_frontier.get()\n for move in rubik.quarter_twists:\n position = rubik.perm_apply(move,vertex)\n if position not in start_parent:\n# print (\"start permutation unvisited\")\n start_parent[position] = vertex\n start_level[position] = level\n start_parent_move[position] = move\n start_frontier.put(position)\n if position in end_parent:\n# print (\"position exists in end_parent\")\n intersect = position\n intersectFound = True\n break\n if intersectFound:\n break\n if not end_frontier.empty():\n vertex = end_frontier.get()\n for move in rubik.quarter_twists:\n position = rubik.perm_apply(move,vertex)\n if position not in end_parent:\n# print (\"end permutation unvisited\")\n end_parent[position] = vertex\n end_level[position] = level\n end_parent_move[position] = move\n end_frontier.put(position)\n if position in start_parent:\n# print (\"position exists in start_parent\")\n intersect = position\n intersectFound = True\n break\n if intersectFound:\n break\n if end_frontier.empty() and start_frontier.empty():\n break\n \n if intersect is None:\n return None\n \n path = []\n pos = intersect\n while (start_parent[pos] is not None):\n path.insert(0,start_parent_move[pos])\n pos = start_parent[pos]\n \n pos = intersect\n while (end_parent[pos] is not None):\n move = rubik.perm_inverse(end_parent_move[pos])\n path.append(move)\n pos = end_parent[pos]\n \n# path = [None] * start_level[intersect]\n# pos = intersect\n# move = start_parent_move[pos]\n# path[start_level[intersect]-1] = move\n# for i in range(start_level[intersect]-2,-1,-1):\n# if (start_parent[pos] is not None):\n# pos = start_parent[pos]\n# move = start_parent_move[pos]\n# path[i] = move\n# \n# pos = intersect\n# while (end_parent[pos] is not None):\n# move = rubik.perm_inverse(end_parent_move[pos])\n# path.append(move)\n# pos = end_parent[pos]\n \n return path", "def all_simple_paths(self, starting_vertices=None, ending_vertices=None,\n max_length=None, trivial=False):\n return list(self.all_paths_iterator(starting_vertices=starting_vertices, ending_vertices=ending_vertices, simple=True, max_length=max_length, trivial=trivial))", "def get_path(self) :\n path = [self]\n s = self.get_parent()\n while s is not None :\n path.append(s)\n s = s.get_parent()\n path.reverse()\n return path", "def filter_paths(self, paths):\n formatted_paths = set()\n for path in paths:\n formatted_path = []\n if self.include_entity:\n if len(path) == 3:\n continue\n formatted_path.append(self.idx_to_node[path[0]].get_name())\n for rdx in range(0, (len(path)-1)/2):\n formatted_path.append(self.idx_to_relation[path[rdx*2+1]])\n formatted_path.append(self.idx_to_node[path[rdx*2+2]].get_name())\n else:\n if len(path) == 1:\n continue\n for rel_idx in path:\n formatted_path.append(self.idx_to_relation[rel_idx])\n formatted_paths.add(tuple(formatted_path))\n return formatted_paths", "def constructPaths(graph):\n\n paths = [ [] for x in xrange(len(graph)) ] # Initialise our list\n\n for i in xrange(len(graph)): # Iterate over all nodes\n\n index = i # Will be used to repeatedly get the predecessor\n\n # Setting up the initial values\n paths[i].append(i)\n\n while True:\n\n indexOfPred = graph[index].getPredecessor() # Getting the index of the predecessor of this node\n\n if indexOfPred == -1: # If it is the source vertex, break. (Will break if the current Node doesn't have a predecessor as well)\n\n break\n\n else:\n\n paths[i].append(indexOfPred) # Add the index of the predecessor to our path\n\n index = indexOfPred # Set index to be the index of the predecessor to repeatedly get predecessors\n\n return paths", "def endpoints(self):\n return (self._origin,self._destination)", "def decompose_paths_rec(node_inner, path):\n if node_inner.is_leaf():\n path = np.append(path, str(node_inner.value))\n return path[None]\n else:\n paths = np.array([])\n for edge_name in node_inner.child_nodes:\n new_path = np.append(path, str(edge_name))\n paths = np.append(paths, decompose_paths_rec(node_inner.child_nodes[edge_name], new_path))\n return paths", "def _trace_path_from_start(self, node=None):\n if node==None: node=self.goal\n current_node=node\n path_list=np.array([])\n while(current_node!=None):\n path_list=np.append(path_list,current_node.state)\n path_list=path_list.reshape(-1,self.goal.state.size)\n current_node=current_node.parent\n\n path_list=path_list[::-1]\n print (path_list)\n return path_list", "def select_paths(self, dpaths=None):\r\n \r\n locs = [] # Find list of starting locs\r\n if len(dpaths) == 0:\r\n return [], []\r\n \r\n for dpath in dpaths:\r\n locs.append(dpath.path[0]) # Get starting loc\r\n \r\n start_locs = self.get_starts(locs=locs)\r\n start_paths = [] # Get paths with these starting locs\r\n other_paths = []\r\n for dpath in dpaths:\r\n if dpath.path[0] in start_locs:\r\n start_paths.append(dpath)\r\n else:\r\n other_paths.append(dpath)\r\n last = self.get_val(\"last\")\r\n if last is 0 or last == \"\":\r\n last = len(dpaths)\r\n closed_tour = self.get_val(\"closed_tour\")\r\n not_tour = self.get_val(\"not_tour\")\r\n comp = self.get_val(\"comp\") \r\n not_comp = self.get_val(\"not_comp\") \r\n\r\n arr_list = start_paths\r\n other_list = other_paths\r\n if closed_tour or not_tour:\r\n a_list = []\r\n o_list = []\r\n for ad in arr_list:\r\n used = False\r\n is_tour = ad.is_closed_tour\r\n if closed_tour:\r\n if is_tour:\r\n a_list.append(ad)\r\n used = True\r\n if not_tour:\r\n if not is_tour:\r\n a_list.append(ad)\r\n used = True\r\n if not used:\r\n o_list.append(ad)\r\n arr_list = a_list\r\n other_list += o_list \r\n \r\n if comp or not_comp:\r\n a_list = []\r\n o_list = []\r\n for ad in arr_list:\r\n used = False\r\n is_comp = ad.is_complete_tour\r\n if comp:\r\n if is_comp:\r\n a_list.append(ad)\r\n used = True\r\n if not_comp:\r\n if not is_comp:\r\n a_list.append(ad)\r\n used = True\r\n if not used:\r\n o_list.append(ad)\r\n arr_list = a_list\r\n other_list += o_list\r\n self.prev_arr_list = arr_list \r\n return arr_list, other_list", "def calc_path_2_ORCIDs(path=curr,node1=None,node2=None):\n\n with open(path + '/' + 'ORCID_graph.pkl', 'rb') as f:\n G = pickle.load(f)\n\n if (node1 is None) or (node2 is None):\n with open(path + '/' + 'centrality.csv', 'rb') as f:\n centrality = csv.reader(f, delimiter='\\t')\n rn = 0\n for row in centrality:\n if rn == 0:\n tmp1 = row\n rn += 1\n elif rn == 1:\n tmp2 = row\n rn += 1\n else:\n break\n if node1 is None:\n node1 = tmp1[0]\n if node2 is None:\n node2 = tmp2[0]\n\n try:\n short_path = nx.algorithms.shortest_paths.generic.shortest_path(G, source=node1,target=node2)\n except:\n return []\n\n return short_path", "def optimize_path(path):\n rospy.loginfo(\"Optimizing path\")\n\n opt_path = []\n current_direction = (0, 0)\n last_direction = (0, 0)\n\n for i in range(len(path) -1):\n current_direction = (path[i+1][0] - path[i][0], path[i+1][1] - path[i][1])\n if current_direction != last_direction:\n opt_path.append(path[i])\n last_direction = current_direction\n \n opt_path.append(path[-1]) #add the last coordinate back\n\n return opt_path", "def reconstruct_path(came_from, start, goal):\n current = goal\n path = [current]\n\n # Append configuartion to board as a step until the begin situation is reached\n while current != start:\n current = came_from[current][0]\n path.append(current)\n path.append(start)\n path.reverse()\n return [path[1:]]", "def shortestPath(G, start, end):\n\n D, P = Dijkstra(G, start)\n print(D)\n print(P)\n Path = []\n while 1:\n Path.append(end)\n if end == start: break\n end = P[end]\n Path.reverse()\n return Path", "def extract_paths(measures: List['UserMeasure']) -> List['GwPoint']:\n\n path: List['GwPoint'] = []\n measures = sorted(measures, key = lambda k: k.timestamp)\n (src, dest) = find_endpoints(measures)\n dest_index = 0\n while 'D' not in (src, dest): # Loop until the end of the file is reached\n for m in measures[dest_index:]:\n dest_index += 1\n if m.zone == dest:\n break\n src_index = dest_index\n for m in reversed(measures[:dest_index]):\n src_index -= 1\n if m.zone == src:\n break\n dag = to_DAG(measures[src_index:dest_index])\n for d in dag.list:\n path.append(GwPoint(\n d.id,\n d.lac,\n d.find_gw_match().azimuth,\n d.find_gw_match().longitude,\n d.find_gw_match().latitude,\n d.zone,\n d.timestamp\n ))\n src_index = dest_index\n (src, dest) = find_endpoints(measures[src_index:])\n return path", "def get_path_ends(self):\n\n end1, end2 = self.get_end_vertices()\n\n return Path(end1), Path(end2)", "def redundant_paths(G, source, target, weight, coeff, max_dist):\n\n search_result = dijkstra(G, source, target, weight, max_dist=max_dist)\n\n if not search_result:\n return []\n\n path, dist = search_result\n\n if dist > max_dist:\n return []\n\n # if max_dist is less than shortest path distance * coeff, use that\n available_dist = min(dist * coeff, max_dist)\n\n path = [source]\n edges = []\n \n paths = _get_paths(G, path, edges, target, weight, available_dist) \n\n return paths", "def find_path(sources, goals, connections):\n visited = set()\n expanded = set()\n queue = deque()\n\n for s in sources:\n queue.appendleft([s])\n\n while queue:\n path = queue.pop()\n head = path[-1]\n visited.add(head)\n\n neighbours = [o for (i, o) in connections if i == head]\n for neighbour in neighbours:\n if neighbour in goals:\n return path + [neighbour]\n elif neighbour not in visited:\n queue.appendleft(path + [neighbour])\n\n return []", "def paths(self):\n base = self.base_link\n graph = self.graph()\n paths = {}\n for b in self.links.values():\n try:\n paths[b.name] = shortest(graph, base, b.name)\n except BaseException as E:\n print('exception:', E)\n\n joint_paths = {}\n for body, path in paths.items():\n joint_paths[body] = [graph.get_edge_data(a, b)['joint']\n for a, b in zip(path[:-1], path[1:])]\n return joint_paths", "def shortestPath(G,start,end):\n\n D,P = Dijkstra(G,start)\n Path = []\n while 1:\n Path.append(end)\n if end == start: break`\u001b`\n end = P[end]\n Path.reverse()\n return Path", "def Sidetrack2Path(tree, sidetracks, s, t):\n\tvisited = set()\t\t# visited nodes\n\tpath = []\t\t# set of edges\n\tsidenodes = set(links[e][0] for e in sidetracks)\t# nodes that needs to be sidetracked\n\tcurrent = s\n\twhile current != t:\n\t\t# Loop detection\n\t\tif current in visited:\n\t\t\treturn []\n\t\telse:\n\t\t\tvisited.add(current)\n\t\t# No loop is found yet, proceed one hop\n\t\tif current in sidenodes:\n\t\t\tedge = [e for e in sidetracks if links[e][0] == current]\n\t\t\tcurrent = links[edge[0]][1]\n\t\t\tpath.append(edge[0])\n\t\telse:\n\t\t\tedge = [i for i,e in enumerate(links) if e == (current,tree[current])]\n\t\t\tcurrent = links[edge[0]][1]\n\t\t\tpath.append(edge[0])\n\t# Destination reached. Return the path\n\treturn path", "def shorter_path(start, goal):\n if start == goal:\n return [start]\n explored = set() \n queue = [ [start] ] \n while queue:\n path = queue.pop(0)\n s = path[-1]\n for state, action in bj_subway[s].items():\n if state not in explored:\n explored.add(state)\n path2 = path + [action, state]\n if state == goal:\n\t\t\t\t\t# print path2\n\t\t\t\t\t# for x in queue:\n\t\t\t\t\t# print x\n\t\t\t\t\treturn path2\n else:\n queue.append(path2)\n return []", "def paths(self, start):\n # This is probably a little slow\n tupadd = lambda p, v: (p[0] + v[0], p[1] + v[1])\n # First, we'll check adjacency moves.\n adj = [tupadd(start, v) for v in DIRECTIONS]\n yield from (p for p in adj if self.board(p) == 0)\n # Now we check repeated hops.\n # We do this by a breadth first search.\n\n #TODO: Consensus on legality of hopping back to start and \"skipping\"\n visited = set(adj)\n to_visit = [start]\n while len(to_visit):\n pt = to_visit.pop(0)\n if pt in visited:\n continue\n\n # We have to actually move a piece\n # But this stops us from considering \"start\" even if we can\n # make some hops and get back to start\n if pt is not start:\n yield pt\n \n visited.add(pt)\n # Compute the hop directions\n dirs = ((tupadd(pt, v), tupadd(pt, tupadd(v, v))) for v in DIRECTIONS)\n to_visit.extend(\n dest for over, dest in dirs\n if self.board(over) > 0\n and self.board(dest) == 0\n and dest not in visited\n and over != start\n )", "def construct_path(edge_dict, node_list):\n previous = None\n for item in node_list:\n if previous is None:\n previous = Path(item, 0)\n previous_item = item\n else:\n actions = edge_dict[previous_item]\n # assume that the actions are uniquely represented\n action_dict = {action.end: action for action in actions}\n correct_action = action_dict[item]\n previous_item = item\n previous = previous.extend_path(\n correct_action.end, correct_action.cost)\n return previous", "def path_to_edges(self):\n\n edges = [0 for i in range(self.graph.num_edges)]\n\n for row in range(self.graph.rows):\n for col in range(self.graph.cols):\n if self.path[row][col]:\n if row + col < self.graph.cols - 1:\n if col < self.graph.cols - 1 and self.path[row][col + 1]:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row, col + 1)\n edge_number = self.graph.diags[row + col] + 2 * row\n edges[edge_number] = 1\n if row < self.graph.rows - 1 and self.path[row + 1][col]:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row + 1, col)\n edge_number = self.graph.diags[row + col] + 1 + 2 * row\n edges[edge_number] = 1\n else:\n col_dist = self.graph.cols - col - 1\n if col < self.graph.cols - 1 and self.path[row][col + 1]:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row, col + 1)\n edge_number = self.graph.diags[row + col] + 2 * col_dist - 1\n edges[edge_number] = 1\n if row < self.graph.rows - 1 and self.path[row + 1][col]:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row + 1, col)\n edge_number = self.graph.diags[row + col] + 2 * col_dist\n edges[edge_number] = 1\n \n\n return edges", "def get_spliced_paths(self, p_i, p_j):\n p_i_nodes = self.get_path_nodes(p_i)[1:-1]\n p_j_nodes = self.get_path_nodes(p_j)[1:-1]\n # we will splice at least common node\n print(\"Path we are attempting to splice\")\n print(p_i, p_j)\n print(p_i_nodes, p_j_nodes)\n if len(set(p_i_nodes).intersection(set(p_j_nodes))) == 0:\n print(\"no overlap\")\n common_node = min(set(p_i_nodes).intersection(set(p_j_nodes)))\n # find where p_i gets to this node\n index_i = 0\n index_j = 0\n for index, arc in enumerate(p_i):\n destin = self.arc_info[arc][\"destin\"]\n if destin == common_node:\n index_i = index + 1\n break\n for index, arc in enumerate(p_j):\n destin = self.arc_info[arc][\"destin\"]\n if destin == common_node:\n index_j = index + 1\n break\n p_i_prime = p_j[:index_j] + p_i[index_i:]\n p_j_prime = p_i[:index_i] + p_j[index_j:]\n return (p_i_prime, p_j_prime)", "def reversed_edges(path):\n \n # Reversed initialization\n reversed_edges = []\n \n # Loop\n for edge in path:\n reversed_edges.append(edge[::-1])\n \n return reversed_edges", "def GetAllStationsOfRoute(PathInfo):\r\n\tpath = []\r\n\tfor ConnectionInfo in PathInfo:\r\n\t\tpath.append(ConnectionInfo[ConnInfoInd['station_from']])\r\n\tif len(PathInfo) > 0:\r\n\t\tpath.append(PathInfo[-1][ConnInfoInd['station_to']])\r\n\treturn path", "def extractPathsFromExploded (cls, exploded_paths_dict, min_dist_pairs,\n id_connector_character):\n min_length_paths = defaultdict(lambda: defaultdict(lambda: None))\n for original_starting_node, d in min_dist_pairs.iteritems():\n for original_ending_node, tup in d.iteritems():\n exploded_path = exploded_paths_dict[tup[0]][tup[1]]\n # get only the exploded IDs, which come from node ID-s\n path_with_only_node_ids = filter(\n lambda lid, sep=id_connector_character: sep in lid, exploded_path)\n # transform them back to the original ID-s\n path_with_original_node_ids = map(\n lambda lid, sep=id_connector_character: lid.split(sep)[1],\n path_with_only_node_ids)\n # the startgin and ending node ID may not be in place\n if path_with_original_node_ids[0] != original_starting_node:\n path_with_original_node_ids.insert(0, original_starting_node)\n if path_with_original_node_ids[-1] != original_ending_node:\n path_with_original_node_ids.append(original_ending_node)\n\n # a transit infra appears twice in the path after each other, because\n # there was an inbound and an outbound port.\n path_with_original_node_ids_no_duplicates = [\n path_with_original_node_ids[0]]\n for n in path_with_original_node_ids:\n if n != path_with_original_node_ids_no_duplicates[-1]:\n path_with_original_node_ids_no_duplicates.append(n)\n path_with_original_node_ids_no_duplicates_str = map(\n lambda node_id: NFFGToolBox.try_to_convert(node_id),\n path_with_original_node_ids_no_duplicates)\n min_length_paths[NFFGToolBox.try_to_convert(original_starting_node)][\n NFFGToolBox.try_to_convert(original_ending_node)] = \\\n path_with_original_node_ids_no_duplicates_str\n\n # convert embedded default dicts\n for k in min_length_paths:\n min_length_paths[k] = dict(min_length_paths[k])\n return dict(min_length_paths)", "def shortest_path(self, other):\n shortest_paths = []\n lcs = self.lowest_common_subsumer(other)\n for subsumer in lcs:\n paths_to_lcs1 = self.shortest_path_to_hypernym(subsumer)\n paths_to_lcs2 = other.shortest_path_to_hypernym(subsumer)\n for path_to_lcs1 in paths_to_lcs1:\n for path_to_lcs2 in paths_to_lcs2:\n current_path = path_to_lcs1\n path_to_lcs2 = path_to_lcs2[::-1]\n for el in path_to_lcs2[1:]:\n current_path.append(el)\n shortest_paths.append(current_path)\n return shortest_paths", "def path_convert(self):\n pub_path = Exp_msg()\n for i in self.path:\n epoint = Cordi()\n (epoint.x, epoint.y) = i\n pub_path.bliss.append(epoint)\n return(pub_path)", "def compute_path(predecessor_matrix, start_node, end_node):\n\n i = start_node\n j = end_node\n path = []\n\n #Go through the predecessor matrix to save the data in a list\n while j != i:\n path.append(j)\n j = predecessor_matrix[j]\n path.append(i)\n\n #reverse it so that it goes from start node to end node instead\n path.reverse()\n return path", "def find_unique_paths(self, source: str, target: str, number: int = 3) -> List[Tuple[List[str], float]]:\n assert self.graph_handler\n counter = 0\n path_tuple_list = list()\n # # # Initialise iterator over shortest simple paths if it is either not set or source/target do not match\n if not self._use_old_iterator or \\\n self._unique_iterator_memory is None or \\\n self._unique_iterator_memory[0][0][0] != source or \\\n self._unique_iterator_memory[0][0][-1] != target:\n path_iterator = iter(nx.shortest_simple_paths(self.graph_handler.graph, source, target, weight=\"weight\"))\n # # # Find first path and its cost\n old_path = next(path_iterator)\n old_path_cost = nx.path_weight(self.graph_handler.graph, old_path, weight=\"weight\")\n else:\n path_iterator = self._unique_iterator_memory[1]\n old_path = self._unique_iterator_memory[0][0]\n old_path_cost = self._unique_iterator_memory[0][1]\n\n while counter < number:\n same_cost = True\n tmp_path_list: List[List[str]] = list()\n # # # Collect all paths with same cost\n # TODO: Parameter for max tmp paths\n n_max_collected_paths = 10\n while same_cost and len(tmp_path_list) < n_max_collected_paths:\n # # # Append old path to tmp_path list\n tmp_path_list.append(old_path)\n # # # Get next path and its cost\n new_path = next(path_iterator, None)\n # # # Break loop if no path is returned\n if new_path is None:\n break\n new_path_cost = nx.path_weight(self.graph_handler.graph, new_path, weight=\"weight\")\n # # # Check if new cost different to old cost\n if abs(old_path_cost - new_path_cost) > 1e-12:\n same_cost = False\n # # # Overwrite old path with new path\n old_path = new_path\n\n # # # Append path with most nodes to tuple list and its cost\n path_tuple_list.append((max(tmp_path_list, key=lambda x: len(x)), # pylint: disable=unnecessary-lambda\n old_path_cost))\n # # # Break counter loop if no more paths to target are found\n if new_path is None:\n break\n old_path_cost = new_path_cost\n counter += 1\n # # # Store iterator and path info (list of nodes and length)\n if new_path is not None:\n self._unique_iterator_memory = ((new_path, new_path_cost), path_iterator)\n return path_tuple_list", "def find_all_paths(parents_to_children, start, end, path=[]):\r\n path = path + [start]\r\n if start == end:\r\n return [path]\r\n if start not in parents_to_children.keys():\r\n return []\r\n paths = []\r\n for node in parents_to_children[start]:\r\n if node not in path:\r\n newpaths = find_all_paths(parents_to_children, node, end, path)\r\n for newpath in newpaths:\r\n paths.append(tuple(newpath))\r\n return paths", "def get_node_pairs_from_path(path):\n\n path = path[:-1]\n pairs = []\n for i in range(len(path)):\n center_node = path[i]\n for j in range(max(i - config.window_size, 0), min(i + config.window_size + 1, len(path))):\n if i == j:\n continue\n node = path[j]\n pairs.append([center_node, node])\n return pairs", "def get_path_patch(self):\n end1, end2 = self.get_end_vertices()\n\n verts = [] + end1 + end2 + end1[:1]\n\n return Path(verts)", "def getPath(self) -> List['StateNode']:\n rest = []\n if self.previous is not None:\n rest = self.previous.getPath()\n return rest + [self]", "def _single_tree_paths(self, tree, return_indices):\n skel = tree.consolidate()\n\n tree = defaultdict(list)\n\n for edge in skel.edges:\n svert = edge[0]\n evert = edge[1]\n tree[svert].append(evert)\n tree[evert].append(svert)\n\n def dfs(path, visited):\n paths = []\n stack = [ (path, visited) ]\n \n while stack:\n path, visited = stack.pop(0)\n\n vertex = path[-1]\n children = tree[vertex]\n \n visited[vertex] = True\n\n children = [ child for child in children if not visited[child] ]\n\n if len(children) == 0:\n paths.append(path)\n\n for child in children:\n stack.append( \n (path + [child], copy.deepcopy(visited))\n )\n\n return paths\n \n root = skel.edges[0,0]\n paths = dfs([root], defaultdict(bool))\n\n root = np.argmax([ len(_) for _ in paths ])\n root = paths[root][-1]\n \n paths = dfs([ root ], defaultdict(bool))\n\n if return_indices:\n return [ np.flip(path) for path in paths ]\n\n return [ np.flip(skel.vertices[path], axis=0) for path in paths ]", "def find_paths(self, source, destination, closed=None):\n if closed is None:\n closed = set()\n closed.add(source)\n links = {x.trusted for x in self._tau\n if x.truster == source and x.trusted not in closed}\n if len(links) == 0: # base\n return []\n if destination in links: # base\n return [[Trust(source, destination)]]\n # recurse\n retval = []\n for link in links:\n linkpaths = self.find_paths(link, destination, closed)\n for path in linkpaths:\n path.insert(0, Trust(source, link))\n retval += linkpaths\n\n for path in retval:\n if None in path:\n retval.remove(path)\n if len(retval) == 0:\n return []\n return retval", "def get_overlapping_path_pairs(self):\n path_pairs = []\n for i, path_1 in enumerate(self.paths):\n for j, path_2 in enumerate(self.paths):\n if j != i:\n if len(set(path_1).intersection(set(path_2))) != 0:\n path_pairs.append((i,j))\n path_pairs.sort()\n return path_pairs", "def paths(self, source, target):\n assert source in self.node_map\n assert target in self.node_map\n if has_path(self.G2, source, target):\n return nx.all_simple_paths(self.G2, source=source, target=target)\n return None", "def construct_path(node):\n path = []\n current = node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path", "def get_path(prev_vertices, vertices):\n cur_vertex = 0\n prev = prev_vertices[0, vertices]\n cur_vertices = vertices\n path = [0]\n while prev != 0:\n path.append(prev)\n cur_vertex = prev\n cur_vertices = frozenset(cur_vertices - set([cur_vertex]))\n prev = prev_vertices[cur_vertex, cur_vertices]\n return path[::-1]", "def reverse_path(self, came_from: dict, came_from_direction: dict, goal_location: tuple) -> list:\n current = goal_location\n parent = came_from.get(goal_location, None)\n path = []\n\n while parent is not None:\n path.append(came_from_direction[current])\n current, parent = parent, came_from.get(parent, None)\n\n return list(reversed(path))", "def reconstruct_path(cameFrom, current):\n total_path = np.array([[current.x],[current.y]])\n while current_in_cameFrom(current,cameFrom):\n current = current.father\n node_x = current.x\n node_y = current.y\n node_pos = np.array([[node_x],[node_y]])\n total_path = np.hstack((total_path,node_pos))\n\n l1 = total_path[0,:]\n l1 = l1[::-1]\n l2 = total_path[1,:]\n l2 = l2[::-1]\n total_path = np.vstack((l1,l2))\n return total_path", "def get_shortest_path(self, node_id_start: int, node_id_end: int) -> List[int]:\n\n _, tree_idx_start = self.node_id_to_idx(node_id_start)\n _, tree_idx_end = self.node_id_to_idx(node_id_end)\n\n assert tree_idx_start == tree_idx_end, 'Provided node ids need to be part of the same tree'\n\n graph = self.get_graph(tree_idx_start)\n shortest_path = nx.shortest_path(graph, node_id_start, node_id_end)\n\n return shortest_path", "def dfs_paths_dict_recur(\n graph: Mapping[Node, set[Node]],\n start: Node,\n goal: Node,\n path: Optional[list[Node]] = None\n) -> Iterable[list[Node]]:\n if path is None:\n path = [start]\n if start == goal:\n yield path\n else:\n for next_node in graph[start].difference(path):\n next_path = path + [next_node]\n yield from dfs_paths_dict_recur(graph, next_node, goal, next_path)" ]
[ "0.66736597", "0.65876526", "0.64044726", "0.6344763", "0.6287056", "0.6267099", "0.6243408", "0.6209103", "0.61667985", "0.6163704", "0.61539465", "0.6153504", "0.61441976", "0.61239386", "0.6118913", "0.611599", "0.6104713", "0.6080712", "0.60661316", "0.6046239", "0.60459167", "0.6019914", "0.60191035", "0.60152674", "0.6003547", "0.5991242", "0.59912205", "0.5976045", "0.59692216", "0.59678894", "0.59678894", "0.59471285", "0.5929589", "0.5917709", "0.5911215", "0.5909964", "0.5897278", "0.5896014", "0.5888625", "0.58878326", "0.5875323", "0.5872718", "0.58494365", "0.584768", "0.58403254", "0.58362126", "0.5823426", "0.5812879", "0.5804091", "0.5796819", "0.5794767", "0.5792312", "0.5781376", "0.5777919", "0.57546896", "0.57507086", "0.57279986", "0.5715738", "0.57044375", "0.5702033", "0.5698827", "0.5697752", "0.5687331", "0.5686688", "0.5679316", "0.56645286", "0.566028", "0.564965", "0.5645535", "0.56191", "0.5618616", "0.561274", "0.5606805", "0.5600852", "0.559939", "0.55963105", "0.5590434", "0.5589982", "0.55851203", "0.5577575", "0.55753034", "0.5573405", "0.55587786", "0.5558167", "0.5556476", "0.55413824", "0.5539101", "0.55321854", "0.55316204", "0.5527481", "0.5524275", "0.551308", "0.55030614", "0.5499063", "0.54922473", "0.54918855", "0.54854053", "0.54734224", "0.54721415", "0.5471844" ]
0.68498194
0
Download a project from Dash and create a GIT repo for it.
def import_project(name, apikey, repo): def validump_resource(jsonres, restype): get_schema_validator(restype).validate(jsonres) return json.dumps(jsonres) def split_templates(spider, spider_filename, files): templates = spider['templates'] spider['templates'] = [] spider['template_names'] = [] for template in templates: template['name'] = template['page_id'] spider['template_names'].append(template['name']) template_fname = os.path.join( spider_filename.rpartition('.')[0], str(template['name']) + '.json') files[template_fname] = validump_resource(template, 'template') archive = zipfile.ZipFile(StringIO(_download_project(name, apikey))) files = {} for filename in archive.namelist(): contents = archive.read(filename) if filename == 'items.json': resource = 'items' elif filename == 'extractors.json': resource = 'extractors' elif filename.startswith('spiders'): resource = 'spider' else: resource = None if resource in ['items', 'spider', 'extractors']: as_json = json.loads(contents) if resource == 'items': as_json = _fix_items(as_json) elif resource == 'spider': split_templates(as_json, filename, files) contents = validump_resource(as_json, resource) files[filename] = contents if 'extractors.json' not in files: files['extractors.json'] = '{}' if ('items.json' not in files or not files['items.json'] or files['items.json'] == '{}'): files['items.json'] = DEFAULT_DASH_ITEM repo.save_files(files, 'master', 'Publishing initial import.') # XXX: Tell dash that project has been opened in Portia deploy_project(name, apikey, changed_files=[])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def git_project(soup, github_user, github_pass, github_repo, github_name):\n giturl = 'https://{user}:{password}@github.com/{user}/{repo}.git'.format(\n user=github_user, password=github_pass, repo=github_repo\n )\n oldcwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n gitdir = os.path.join(tmpdir, github_repo)\n cmd = 'git clone {} {}'.format(shlex.quote(giturl), shlex.quote(gitdir))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(gitdir)\n rhinoscrape(soup, github_user, github_name)\n cmd = 'git add .'\n subprocess.run(shlex.split(cmd), check=False)\n msg = 'Project committed by Rhino Repo'\n cmd = 'git commit -m {}'.format(shlex.quote(msg))\n subprocess.run(shlex.split(cmd), check=False)\n cmd = 'git push {}'.format(shlex.quote(giturl))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(oldcwd)\n shutil.rmtree(tmpdir, ignore_errors=True)", "def download_dependency_github(name, repo, tag, temp_path, build_path, config):\n wp = os.getcwd()\n os.chdir(temp_path)\n # Clone into the repo, pull the specified tag\n clone_cmd = f\"git clone https://github.com/{repo}.git\"\n tag_cmd = f\"git checkout master && git fetch && git fetch --tags && git checkout {tag}\"\n os.system(clone_cmd)\n os.chdir(name)\n os.system(tag_cmd)\n os.chdir(wp)\n # Move the contents of GameData into the build directory\n shutil.copytree(os.path.join(temp_path, name, \"GameData\", name), os.path.join(build_path, \"GameData\", name))", "def createproject(project_name):\n app_clone_script = 'git clone https://github.com/jaarce/falcon-bp.git %s' % project_name\n subprocess.call(app_clone_script.split(' '))", "def _Download( self ):\n self._DownloadPipe += PackageUtil.ExecuteSimpleCommand( \"git\", [\"clone\", \"git@github.com:mastbaum/avalanche.git\", self.GetInstallPath()], None, os.getcwd() )\n return", "def fetch_repo(data):\n repo = Repository.objects.get(**data)\n\n # create a temporary directory\n tmp_dir = util.tmp_dir('github')\n\n # log\n log.info(\"Fetching repo %s to %s\", repo.full_name, tmp_dir)\n\n # clone the repository to the directory\n git.Repo.clone_from(repo.git_url, tmp_dir)\n\n # add the repo path to the database\n repo.local_path = tmp_dir\n repo.save()\n\n # tell workers the repo is available\n publish('github.repo_available', data)", "def cli(ctx, repo_home):\n # Create a repo object and remember it as as the context object.\n ctx.obj = Repo(os.path.abspath(repo_home))", "def main(repo):\n print(subprocess.call(['make', 'setup']))\n with Docker('doppins') as docker:\n print(docker.run('git clone {repo} cloned'.format(repo=repo)).out)", "def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url", "def new_repo(req, source, psp_dir, url_helper=None):\n req.content_type = 'text/html'\n repo_dir = req.filename.rsplit('/', 1)[0]\n files = [f for f in os.listdir(repo_dir) if f[-3:] == '.h5']\n top_level = psp.PSP(req, filename=psp_dir+'new_repo.psp')\n top_level.run({'context': req.uri,\n 'files': files})", "def download_datalad_repo(self,git_ref=\"master\",ignore_dirty_data=False):\n\n if not self.params[\"data\"].get(\"url\"):\n raise ValueError(\n \"A value for url must be provided if the data \"\n \"type is datalad_repo \"\n )\n # Get directory name for repository\n dl_dset = datalad.Dataset(str(self.params['data']['location']))\n get_tests_data_dir(dl_dset,dset_url=self.params['data']['url'])", "def repository_create_hosted():\n pass", "def _download_project(name, apikey):\n payload = {'apikey': apikey, 'project': name, 'version': 'portia'}\n r = requests.get(DASH_API_URL + 'as/project-slybot.zip', params=payload)\n return r.content", "def download_fabric_factory():\n local('hg clone http://bitbucket.org/yml/fabric_factory/')", "def create_repo_cli(api_client, url, provider, path):\n content = ReposApi(api_client).create(url, provider, path)\n click.echo(pretty_format(content))", "def main():\n\n # get all repos a user has access to\n gh = Github(options.username, options.pat)\n user = gh.get_user()\n # filter for those under the user account\n userrepos = {\n repo.name : repo.git_url for repo in user.get_repos() \\\n if repo.git_url.startswith(\"git://github.com/\" + options.username)\n }\n # create a backup dir\n dirname = datetime.today().strftime(\"%Y%m%d-%H%M%S\")\n os.makedirs(\"./backup/\" + dirname)\n # clone all user repos\n for k, v in userrepos.items():\n url = \"https://\" + options.pat + \"@\" + v.removeprefix(\"git://\")\n subprocess.check_call([\n \"git\",\n \"clone\",\n url,\n \"./backup/\" + dirname + \"/\" + k\n ])", "def get_own_repo():\n own_repo = GitClass(name='self', url='https://github.com/meganhmoore/github-api-covid-data', owner='meganhmoore',\n repo='github-api-covid-data', branch='develop/new_data')\n return own_repo", "def download(parent, name=None):\n with cd(parent):\n if not name:\n run(\"drush dl\")\n else:\n run(\"drush dl --drupal-project-rename=%s\" % name)", "def fetch_repo(root, repo, url, destination_temp):\n\n print \"Fetching %s from %s\" % (repo, url)\n\n if root.exists('repos/%s' % repo):\n print \"Repo %s exists, issuing a git pull...\" % repo\n call('cd repos/%s; git pull' % repo, shell=True)\n else:\n print \"Repo %s does not exist, issuing a git clone...\" % repo\n\n # explicitely create dir as implicit creation fails on server\n root.makedir('%s/%s' % (destination_temp, repo))\n call('cd repos; git clone %s %s' % (url, repo), shell=True)\n # call('git clone %s %s/%s > /dev/null 2>&1' % (repo['url'], source, repo['id']), shell=True)", "def init(args: argparse.Namespace) -> None:\n\tbranch = args.branch\n\turl = args.url\n\n\trepo_path = os.path.join(os.path.abspath(\".\"), \".repo\")\n\tLOGGER.info(\"Creating repo directory at %s\", repo_path)\n\tos.makedirs(repo_path, exist_ok=True)\n\t_run_git([\"clone\", \"-b\", branch, url, MANIFEST_DIRECTORY], repo_path)\n\tLOGGER.info(\"Initialized repository at %s\", repo_path)", "def create_prod_git_repo(git_repo_name):\n with cd(git_dir):\n run(\"git init --bare %s.git && cd %s.git && git config http.receivepack true\" %\n (git_repo_name,git_repo_name))", "def init_repo(repo_clone_url, path, version):\n # Create path for repo\n local_repo = Path(path) / version\n local_repo = local_repo.expanduser()\n \n # Initialize repository\n repo = git.Repo.clone_from(repo_clone_url, local_repo)\n return repo, local_repo", "def clone_github_repo(self):\n repository_local_destination = os.path.join(MODULES_PATH, 'github', self.username, self.repository_name)\n if not os.path.exists(repository_local_destination):\n Repo.clone_from(self.repo_url, repository_local_destination, branch='master')\n init_filename = os.path.join(repository_local_destination, '__init__.py')\n open(init_filename, 'a').close()", "def clone_repo():\n with settings(warn_only=True):\n run('git clone %(repository_url)s %(repo_path)s' % env)", "def download_from_uri(uri: str, dst: utils.ReadWritePath) -> str:\n if uri.startswith('github://'):\n raise NotImplementedError('Github sources not supported yet')\n\n path = utils.as_path(uri)\n if not path.exists():\n raise ValueError(f'Unsuported source: {uri}')\n\n # Download the main file\n python_module = path / f'{path.name}.py'\n python_module.copy(dst / python_module.name)\n\n # TODO(tfds): Should also support download on the extra files (e.g. label.txt,\n # util module,...)\n\n # Add the `__init__` file\n (dst / '__init__.py').write_text('')\n return python_module.stem", "def newproject():\n log('Criando novo projeto', yellow)\n log('Cria a conta no bitbucket com o nome do projeto vázio que o script se encarregará do resto', red)\n\n conta = raw_input('Digite o nome do projeto: ')\n\n local('echo \"clonando projeto %s\"' % bitbucket_repository)\n local('git clone {0} {1}{2}'.format(bitbucket_repository, folder_project_local, conta))\n local('cd {0}{1}'.format(folder_project_local, conta))\n local('mkvirtualenv {0}'.format(conta))\n local('setvirtualenvproject')\n local('pip install -r requirements.txt')\n local('rm -rf {0}{1}/.git'.format(folder_project_local, conta))\n local('rm -rf README.md')\n local('git init')\n local('git remote add origin git@bitbucket.org:{0}/{1}.git'.format(bitbucket_user, conta))", "def git_url():\n return \"https://github.com/tisnik/victimsdb-sample-data.git\"", "def hbtn_project(project_id):\n auth = flask.request.get_json()\n if AUTH_KEYS - auth.keys():\n flask.abort(400)\n auth['hbtn_email'] = auth['hbtn_email'].split('@')[0]\n if not auth['hbtn_email'].isnumeric():\n flask.abort(400)\n auth['hbtn_email'] = '@'.join([\n auth['hbtn_email'], 'holbertonschool.com'\n ])\n auth['hbtn_token'] = hbtn_api_auth_token(\n auth['hbtn_email'], auth['hbtn_password'], auth['hbtn_api_key']\n )\n user = hbtn_api_user(auth['hbtn_token'])\n proj = hbtn_api_project(project_id, auth['hbtn_token'])\n repo = proj['tasks'][0]['github_repo']\n with create_session(auth['hbtn_email'], auth['hbtn_password']) as session:\n git_project(get_soup(session, project_id),\n github_user=user['github_username'],\n github_pass=auth['github_password'],\n github_name=user['full_name'],\n github_repo=repo)\n return (os.path.join(repo, proj['name']), 200)", "def download(repo_url, sha, working_dir):\n print 'Downloading %s ...' % (sha)\n sf_zip = os.path.join(working_dir, 'sf.gz')\n with open(sf_zip, 'wb+') as f:\n f.write(requests.get(github_api(repo_url) + '/zipball/' + sha).content)\n zip_file = ZipFile(sf_zip)\n zip_file.extractall(working_dir)\n zip_file.close()\n\n for name in zip_file.namelist():\n if name.endswith('/src/'):\n src_dir = name\n break\n\n return os.path.join(working_dir, src_dir)", "def sync_git_repo():\n # get the current dir of this script\n current_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n repo_path = os.path.join(current_dir,REPO_NAME)\n logging.info(\"Repository path is: \"+repo_path)\n # check to see if a repo has been init already\n try: \n repo = git.Repo(repo_path)\n logging.info(\"Git repo has already been created.\")\n except (git.exc.InvalidGitRepositoryError,git.exc.NoSuchPathError):\n logging.info(\"No git repo has been initialized for this module. Cloning from github.com now.\")\n repo_url = \"https://\"+REPO_USERNAME+\":\"+REPO_PERSONAL_ACCESS_TOKEN+\"@github.com/\"+REPO_USERNAME+\"/\"+REPO_NAME+\".git\"\n git.Repo.clone_from(repo_url,repo_path)\n logging.info(\"Repo cloned successfully.\")\n repo = git.Repo(repo_path)\n # now we have a valid repo created \n # pull the latest data from the repo\n origin = repo.remotes.origin\n origin.pull()\n # create the csv output dir if it does not exist\n Path(paho_csv_reports_dir).mkdir(parents=False, exist_ok=True)\n # get all csv files in this dir\n all_paho_csv_files = glob.glob(paho_csv_reports_dir+os.path.sep+\"*.csv\")\n # add all files in this dir to the repo index\n repo.index.add(all_paho_csv_files)\n logging.info(\"Added all .csv files from \"+paho_csv_reports_dir+\" to repo index.\")\n # set the commit message\n repo.index.commit(\"Automatic commit by \"+os.path.basename(__file__))\n # git push \n origin.push()\n logging.info(\"All csv files pushed to github repo successfully.\")", "def _create_github_repo(self):\n\n repo_dir = join(self.temp_dir, 'repo')\n subprocess.check_output(['git', 'init', repo_dir])\n\n subprocess.check_output(\n ['git', 'config', 'user.email', os.environ['GIT_EMAIL']],\n cwd=repo_dir\n )\n subprocess.check_output(\n ['git', 'config', 'user.name', os.environ['GIT_NAME']],\n cwd=repo_dir\n )\n\n content = statiki.get_travis_files_content(TEST_REPO, 'BOGUS', {})\n\n for info in content:\n path = join(repo_dir, info['name'])\n with open(path, 'w') as f:\n f.write(info['content'])\n\n subprocess.check_output(['git', 'add', path], cwd=repo_dir)\n subprocess.check_output(\n ['git', 'commit', '-m', '%s' % info['message']], cwd=repo_dir\n )\n\n subprocess.check_output(\n shlex.split('git remote add origin ..'), cwd=repo_dir\n )\n\n return repo_dir", "def file(c, path=local.http_path):\r\n c = conn(c)\r\n print(\"make file repo on {}, path [{}]\".format(c.host, path))\r\n\r\n system.install(c, 'createrepo')\r\n c.run('createrepo {}'.format(path))", "def clone_from_git() -> co.Exec:\n git_url = \"https://github.com/conducto/demo.git\"\n image = co.Image(\n dockerfile=\"./docker/Dockerfile.git\", copy_url=git_url, copy_branch=\"main\",\n )\n return co.Exec(\"python cicd/code/test.py\", image=image, doc=co.util.magic_doc())", "def mkweb(project_name, mode):\n\n MAIN_FOLDER = data.get_base_path(data.WEB)\n\n if mode != 'MAIN':\n MAIN_FOLDER += f'{mode}/'\n \n webproject = folders.WebProject(project_name, MAIN_FOLDER)\n\n webproject.create_project()\n click.echo(f'Project created succesfull in {webproject.project_path}')\n cli_commands.start_git(webproject.project_path)\n cli_commands.show_dir_path(webproject.project_path)\n # cli_commands.start_vscode(webproject.project_path)\n\n click.echo('Project Path copied to clipboard...')", "def sync(args: argparse.Namespace) -> None:\n\tdel args\n\trepo_path = _find_repo()\n\tmanifest_file = os.path.join(repo_path, MANIFEST_DIRECTORY, storas.manifest.DEFAULT_MANIFEST_FILE)\n\tmanifest = storas.manifest.load(manifest_file)\n\tfor project in manifest.projects:\n\t\tfull_path = os.path.join(repo_path, \"..\", project.path)\n\t\tremote = project.remote\n\t\tfull_fetch_url = urllib.parse.urljoin(remote.fetch_host, project.name)\n\t\tif not os.path.exists(full_path):\n\t\t\tos.makedirs(full_path, exist_ok=True)\n\t\t\tLOGGER.debug(\"Created '%s'\", full_path)\n\t\t\t_run_git([\"clone\", \"-b\", project.revision, full_fetch_url], cwd=full_path)", "async def github(self, ctx):\n await ctx.send('https://github.com/nick411077/nickcan_bot')", "def pub_download(args, project=\"\", base_url=\"\", api_key=\"\"):\n project, base_url, api_key, updated = get_project_config(\n project=project, base_url=base_url, api_key=api_key)\n if updated:\n save_config()\n download_theme(args, base_url, api_key, prefix=project)", "def clone_into_project(git_repo_name):\n repo_dir = git_dir + \"/%s.git\" % git_repo_name\n with cd(remote_dir):\n run('rm -rf myproject')\n run(\"git clone %s %s\" % (repo_dir, project_name))\n run(\"echo 'MY_ENV=\\\"prod\\\"' > %s/%s/site_settings.py\" % (project_name,project_name))\n update_conf_file()", "def new(url):\n from grit import Repo\n return Repo.new(url=url, bare=True)", "def download(self):\n logger.info(f\"downloading project {self}\")\n self.project.storage.download(f\"{self.path}/releasemanifest\", None)\n self.extract()", "def clone():\n with cd(os.path.dirname(env.proj_root.rstrip('/'))):\n run('git clone --recursive %s' % (git_repo,))", "def github_download_and_extract(\n repository_ctx,\n repository,\n commit,\n mirrors,\n output = \"\",\n sha256 = \"0\" * 64,\n extra_strip_prefix = \"\",\n commit_pin = None):\n urls = _urls(\n repository = repository,\n commit = commit,\n mirrors = mirrors,\n )\n\n repository_ctx.download_and_extract(\n urls,\n output = output,\n sha256 = _sha256(sha256),\n type = \"tar.gz\",\n stripPrefix = _strip_prefix(repository, commit, extra_strip_prefix),\n )\n\n # Create a summary file for Drake maintainers.\n generate_repository_metadata(\n repository_ctx,\n repository_rule_type = \"github\",\n repository = repository,\n commit = commit,\n version_pin = commit_pin,\n sha256 = sha256,\n urls = urls,\n )", "def git_install(projects_yaml):\n if git_install_requested():\n git_pre_install()\n projects_yaml = git_default_repos(projects_yaml)\n git_clone_and_install(projects_yaml, core_project='keystone')\n git_post_install(projects_yaml)", "def cmd_create(self):\n self.repo.create()\n\n # Add .gitignore.\n self.repo.add_files({'.gitignore': '.swp\\n'}, FIRST_COMMIT_MSG)\n\n # Create the etc and timestamps branches.\n self.repo.checkout('etc', create=True)\n self.repo.checkout('timestamps', create=True)\n\n self.repo.checkout('master')\n self.repo.init()\n self.update_repository()\n print('Git repository created at %s' % self.repodir)", "def repository_create_hosted_maven(ctx: click.Context, **kwargs):\n _create_repository(ctx, 'hosted', **kwargs)", "def create_new_python_project():\n\t# Create the different variables\n\tfolder_name = str(sys.argv[1])\n\tdir_name = my_project_folder + folder_name\n\tpy_file = dir_name + '/' + folder_name + '.py'\n\treadme_file = dir_name + '/' + 'README.md'\n\ttodo_file = dir_name + '/' + 'TODO.txt'\n\n\t# Create directory if it does not exist yet\n\tif not os.path.exists(dir_name):\n\t\tos.mkdir(dir_name)\n\t\tprint(\"Directory \" , dir_name , \" Created \")\n\n\t\t# Create Python file\n\t\tdata = ''\n\t\twith open(template_py, 'r') as file:\n\t\t\tdata += file.read()\n\n\t\twith open(py_file, 'w') as f:\n\t\t\tf.write(data)\n\t\t\tprint(\"Python file created\")\n\n\t\t# Create README file\n\t\tdata = ''\n\t\twith open(template_readme, 'r') as file:\n\t\t\tdata += file.read()\n\n\t\twith open(readme_file, 'w') as f:\n\t\t\tf.write(data)\n\t\t\tprint(\"Readme file created\")\n\n\t\t# Create Todo file\n\t\twith open(todo_file, 'w') as f:\n\t\t\tprint(\"TODO file created\")\n\n\t\t# Create Github repo\n\t\twith open(\".env\", \"r\") as f:\n\t\t\tdata = f.read()\n\n\t\tindex_1 = data.find('TOKEN=\"') + len('TOKEN=\"')\n\t\ttoken = data[index_1:-1]\n\t\tg = Github(token)\n\t\tuser = g.get_user()\n\t\trepo = user.create_repo(folder_name)\n\t\tprint(\"Succesfully created repository {}\".format(folder_name))\n\n\n\telse: \n\t\tprint(\"Directory \" , dir_name , \" already exists\")", "def update_code_from_git():\n if not files.exists(REMOTE_REPO_DIR):\n with cd(HOME_DIR):\n run(\"git clone %s\" % MAIN_GITHUB_REP )\n with cd(REMOTE_REPO_DIR):\n run(\"git pull\")", "def create_clowder_repo(self, url, branch, depth=0):\n\n if self.existing_git_repository(self.repo_path):\n return\n self._init_repo()\n self._create_remote(self.remote, url, remove_dir=True)\n self._checkout_new_repo_branch(branch, depth)", "def fork(args):\n subprocess.check_call([\"git\", \"config\", \"--global\",\n \"--add\", \"safe.directory\", args.src])\n head = subprocess.check_output([\"git\", \"rev-parse\", args.rev], cwd=args.src).strip()\n obj_dir = subprocess.check_output([\"git\", \"rev-parse\", \"--git-path\", \"objects\"],\n cwd=args.src)\n obj_dir = os.path.join(args.src, obj_dir.decode())\n\n # Create an empty git repository. Native clone is too slow because the\n # typical gerrit source repo has a huge number of refs and git has to\n # inspect all of them. This approach lets us ignore all of that to only\n # use the rev we were asked to build.\n os.mkdir(\"/build/%s\" %(args.project))\n os.chdir(\"/build/%s\" %(args.project))\n subprocess.check_call([\"git\", \"init\", \"-q\"])\n\n # Setup alternates so we can see all the objects in the source repo\n with open(\".git/objects/info/alternates\", \"w\") as F:\n F.write(obj_dir)\n F.write(\"\\n\")\n\n # Create a branch using the only remote HEAD we care about\n subprocess.check_call([\"git\", \"checkout\", \"-q\", \"-b\", \"build\", \"--no-progress\", head])\n subprocess.check_call([\"git\", \"--no-pager\", \"log\", \"--oneline\", \"-n1\"])\n\n if args.project == \"kernel\":\n copy(\"%s/.config\" %(args.src), \"/build/%s\" %(args.project))\n\n args.src = \"/build/%s\" %(args.project)\n args.rev = head", "def git_clone(git_url, git_folder):\n run(\"git clone %s %s &> /dev/null\" % (git_url, git_folder))", "def create_from_git(self, token: Any, repo: str):\n params = [token, repo, ]\n method = \"ProjectAPI.CreateFromGit\"\n self.__add_request(method, params, lambda payload: Definition.from_json(payload))", "def newrepo():\n form = AddRepoForm()\n if form.validate_on_submit():\n\n # make the directory for this package\n os.mkdir(DATA + form.name.data)\n\n flash('Repo created successfully')\n\n # redirect to the login page\n return redirect(url_for('home.dashboard'))\n\n # load registration template\n return render_template('home/add.html', form=form, title='Local Repo', target=\"add\")", "def create_repo_clone(self, path, https):\n _, _, login, remote_dir = path.split('/', 3) # 3 x '/' before real path\n remote_dir = os.path.dirname(remote_dir) # final segment from clone\n print remote_dir\n cmd = ['ssh', login, 'mkdir', '-p', remote_dir]\n print cmd\n check_output(cmd)\n cmd = ['ssh', login, 'cd', remote_dir, ';', 'hg', 'clone', https]\n #cmd = ['ssh', login, 'cd {} ; hg clone {}'.format(remote_dir, path.replace('ssh:', 'https:'))]\n print cmd\n check_output(cmd)", "def clone(ctx, path_base, repo_url, dir_target):\n if 'github' in repo_url:\n # Just to make sure ssh agent forwarding works well.\n ctx.run('ssh-keyscan -t rsa github.com >> ~/.ssh/known_hosts', warn=True)\n ctx.run('ssh -T git@github.com', warn=True)\n\n with ctx.cd(path_base):\n ctx.run(f'git clone -v {repo_url} {dir_target}')", "def connect_to_github():\n\n # Authentication\n from os.path import isfile\n if isfile(\"github-logins.json\"):\n with open(\"github-logins.json\", \"r\") as loginfile:\n logins = json.load(loginfile)\n gh = login(username=logins[\"username\"], password=logins[\"password\"])\n else:\n from getpass import getpass\n password = getpass()\n gh = login(username=\"yourusername\", password=password)\n\n # Connect to the repo\n repo = gh.repository(\"ghostofgoes\", \"botnet-example\")\n branch = repo.branch(\"master\")\n return gh, repo, branch", "def add(name, url):\n click.echo(\"registered repo {} at url {}\".format(name, url))", "async def github(self, ctx: Message):\n\t\tawait self.send(\n\t\t f\"{ctx.author.mention} ㅤㅤ I'm open-source! You can look at my source code here!ㅤ https://github.com/asxlvm/DogeBoss :GitHub:\"\n\t\t)", "def git():\n pass", "def repo_link(repo):\n return \"https://github.com/\" + repo", "def clone():\n require('PROJECT_NAME')\n require('PROJECT_REPO')\n require('MERCURIAL_BIN')\n\n # Create the \"apps\" directory if it does not exist.\n run('mkdir -p {}'.format(utils.home('apps')))\n\n if files.exists(utils.home('apps', env.PROJECT_NAME)):\n delete()\n\n with cd(utils.home('apps')):\n run('{0} clone {1} {2}'.format(env.MERCURIAL_BIN,\n env.PROJECT_REPO,\n env.PROJECT_NAME))", "def deploy_project(name, apikey, changed_files=None, repo=None,\n branch='master'):\n zbuff = StringIO()\n if changed_files is not None:\n changed_files = list(set(changed_files) | REQUIRED_FILES)\n _archive_project(name, zbuff, changed_files, repo, branch)\n zbuff.reset()\n payload = {'apikey': apikey, 'project': name}\n req = requests.post(\n DASH_API_URL + 'as/import.json?version=portia',\n files=[('archive', ('archive', zbuff, 'application/zip'))],\n params=payload\n )\n if req.status_code == 200:\n project_url = DASH_API_URL.rsplit('/', 2)[0] + '/p/' + name\n return {\n 'status': 'ok',\n 'schedule_url': project_url\n }\n else:\n raise DeployError('Deploy to Dash failed: %s' % req.text)", "def _fetch_script(info,\n script_path,\n domain=\"raw.githubusercontent.com\",\n urlpath=_GITHUB_URLPATH):\n if not os.path.exists(info.fs_path):\n with open_and_force_mkdir(info.fs_path, \"w\") as scr:\n remote = \"%s/%s/%s\" % (domain, urlpath, script_path)\n retrycount = 100\n while retrycount != 0:\n try:\n contents = urlopen(\"http://{0}\".format(remote)).read()\n scr.write(contents.decode())\n scr.truncate()\n retrycount = 0\n except URLError:\n retrycount -= 1", "def create_repository(cfg):\n if os.path.isdir(cfg[\"repo_dir\"]):\n shutil.rmtree(cfg[\"repo_dir\"], ignore_errors=True)\n return Repo.init(cfg[\"repo_dir\"])", "def __download(self):\n\n # Use the default repository if set to True\n if self.repository is True:\n self.repository = self.__default_repository\n\n if not self.repository and not self.url:\n tarball = 'ucx-{}.tar.gz'.format(self.__version)\n self.url = '{0}/v{1}/{2}'.format(self.__baseurl, self.__version,\n tarball)", "def api_repo_create():\n form = NewRepoForm()\n if form.validate_on_submit():\n # On the miniscule chance we generate a non-unique access key, loop and try again.\n success = False\n while not success:\n new_repo = Repo.create(\n pass_phrase = form.pass_phrase.data,\n title = form.title.data,\n description = form.description.data,\n is_private = form.is_private.data\n )\n db.session.add(new_repo)\n try:\n db.session.commit()\n success = True\n except:\n db.session.rollback()\n success = False\n session['working_repo'] = new_repo.access_key\n return jsonify(message='success', created=new_repo.access_key)\n else:\n return jsonify(message=\"failed\", errors=form.errors_to_json()), 400", "def deploy():\n remote_dir = os.path.abspath(os.path.join(REMOTE_BASE_DIR, REPO_NAME))\n \n with settings(warn_only=True):\n if run(\"test -d %s\" % (remote_dir)).failed:\n puts(red(\"[Repo %s does not exist on remote at: %s]\" % (REPO_NAME, remote_dir)))\n with cd(REMOTE_BASE_DIR):\n run(\"git clone %s %s\" % (REPO_URL, REPO_NAME))\n\n puts(yellow(\"[Write logs]\"))\n run(\"echo '-----------------------------' > %s\" % REMOTE_ERR_FILE)\n run(\"echo `date` >> %s\" % REMOTE_ERR_FILE)\n run(\"echo '-----------------------------' >> %s\" % REMOTE_ERR_FILE)\n run(\"echo '-----------------------------' > %s\" % REMOTE_LOG_FILE)\n run(\"echo `date` >> %s\" % REMOTE_LOG_FILE)\n run(\"echo '-----------------------------' >> %s\" % REMOTE_LOG_FILE)\n\n puts(yellow(\"[Update repo: %s]\" % REPO_NAME))\n with cd(remote_dir):\n run(\"git pull origin master >> %s 2>> %s\" %\n (REMOTE_LOG_FILE, REMOTE_ERR_FILE))\n\n # reminder new static files\n puts(yellow('Do not forget to run collect staticfiles on DJANGO server.'))", "def getProjectURL():", "def rupture(url, outpath=None, branch='master', dirname=None, release=None):\n try:\n file, filename = _download(\n url, outpath=outpath, \n dirname=dirname, branch=branch, \n release=release\n )\n base, cs = _unzip(filename)\n _delete(filename)\n if release or branch != 'master':\n return\n to_find = \"{}/{}-{}\".format(base, file, branch)\n _newname = dirname or file\n shutil.move(to_find, base+\"/\"+_newname)\n except Exception as e:\n six.print_(traceback.format_exc())\n six.print_(\"Cannot download the repo. Could you check the repo url ?\")", "def test_fetch_valid_github_repo(self):\n url = 'https://github.com/ivacf/archi'\n repo = GitHubRepoFetcher().fetch(url)\n self.assertEqual('archi', repo['name'])", "def update_code_from_git():\n if not files.exists(CODE_DIR):\n with cd(HOME_DIR):\n run(\"git clone %s\" % MAIN_GITHUB_REP )\n\n with cd(CODE_DIR):\n git_pull()", "def project():", "def project():", "def project():", "def git_clone(repo_path, path):\n r = envoy.run('git clone {repo} {path}'.format(repo=repo_path, path=path))\n if r.status_code != 0 and r.std_err != '':\n return False\n return True", "def addRepository(self, name, url):\n sslVerify = \"yes\" if url.startswith(\"https\") else \"no\"\n self.manager.addKickstartRepository(self.currentProject, baseurl=url,\n name=name,\n ssl_verify=sslVerify)\n self.manager.saveKickstartFile(self.currentProject)\n self.refresh()", "async def create_from_git(self, token: Any, repo: str) -> Definition:\n response = await self._invoke({\n \"jsonrpc\": \"2.0\",\n \"method\": \"ProjectAPI.CreateFromGit\",\n \"id\": self.__next_id(),\n \"params\": [token, repo, ]\n })\n assert response.status // 100 == 2, str(response.status) + \" \" + str(response.reason)\n payload = await response.json()\n if 'error' in payload:\n raise ProjectAPIError.from_json('create_from_git', payload['error'])\n return Definition.from_json(payload['result'])", "def download(self):\n cmd = mccli() + \" d f \" + self.localpath + \" -p \" + self.project.name\n \n set_cli_remote(self.project.remote)\n \n child = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = child.communicate()\n return CLIResult(out, err, child.returncode)", "def create(ctx, template_name, website_name):\n try:\n # Check if the destination directory already exists\n path = os.path.join(ctx.obj['BASEDIR'], website_name)\n if os.path.exists(path):\n answer = input('Do you want to delete the existing directory? [Y] ')\n if answer.lower() == 'y' or answer == '':\n shutil.rmtree(path)\n\n # Generate github repo string\n github_name = template_name\n if '/' not in template_name:\n github_name = 'docker-hosting/%s-template' % template_name\n \n # Try to download repository\n link = 'https://github.com/%s/archive/master.zip' % github_name\n urlretrieve(link, 'master.zip')\n\n # Unzip downloaded file to destination directory\n zip_ref = zipfile.ZipFile('master.zip', 'r')\n zip_ref.extractall(path)\n zip_ref.close()\n\n # The destination folder contains another folder named [github-repo-name]-master.\n # We need to move all files within this directory and delete it afterwards.\n repo_name = github_name.split('/')[1]\n master_dir = os.path.join(path, repo_name + '-master')\n for file in os.listdir(master_dir):\n shutil.move(os.path.join(master_dir, file), path)\n os.rmdir(os.path.join(path, repo_name + '-master'))\n\n # Now remove the file master.zip\n os.remove('master.zip')\n except PermissionError as e:\n # TODO: handle and log exceptions\n print('%s\\n%s' % (e, 'Note: Try to running this program as Administrator.'))\n except Exception as e:\n # TODO: handle and log exceptions\n print(e)", "def test_clone_repository(koan, assert_cloned_repo_exists):\n koan.shell('')", "def _clone_gitrepo():\n # Puts git repo in ~/.ssh/config to avoid interaction due to missing known_hosts\n git_server = urllib.splituser(urllib.splittype(env.project['git_repo'])[0])[1]\n if not files.exists('~/.ssh/config') or not files.contains('~/.ssh/config', git_server):\n files.append('~/.ssh/config', ['host %s' % git_server, ' StrictHostKeyChecking no'])\n\n branch = env.project.get('git_branch', 'master')\n if files.exists(_interpolate(DJANGO_PROJECT_DIR)):\n print _interpolate('project %(project)s already exists, updating')\n remote('git pull origin %s' % branch)\n else:\n with cd(_interpolate(VIRTUALENV_DIR)):\n run(_interpolate('git clone %(git_repo)s %(project)s'))\n if branch != 'master':\n remote('git fetch origin %s:%s' % (branch, branch))\n remote('git checkout %s' % branch)", "def pushrepo(projectjson, repourl):\n try:\n components = projectjson['components']\n name = projectjson['name']\n reponame = name + '_sc'\n logger.debug(f\"repourl is : {repourl}\")\n bb_split = repourl.split(\"//\")\n bb_split[1] = f\"{username}:{escape_password}@\"+bb_split[1]\n newrepourl = \"//\".join(bb_split)\n local_code_setup(reponame, newrepourl)\n dst_makefile_path = f\"/tmp/{reponame}/Makefile\"\n if not os.path.exists(dst_makefile_path):\n src_makefile_path = f\"/tmp/skeleton-build/Makefile\"\n copy2(src_makefile_path, dst_makefile_path)\n print(\"Makefile added\")\n createcomponents(components, reponame, newrepourl, name)\n bitbucket.push_repo_to_bitbucket(f\"/tmp/{reponame}\")\n rmtree('/tmp/skeleton-build')\n rmtree(f'/tmp/{reponame}')\n return True\n except Exception as e:\n print(\"caught exception.: \", e)\n return False", "def export_to_git(course_id, repo, user='', rdir=None):\r\n # pylint: disable=R0915\r\n\r\n if not GIT_REPO_EXPORT_DIR:\r\n raise GitExportError(GitExportError.NO_EXPORT_DIR)\r\n\r\n if not os.path.isdir(GIT_REPO_EXPORT_DIR):\r\n raise GitExportError(GitExportError.NO_EXPORT_DIR)\r\n\r\n # Check for valid writable git url\r\n if not (repo.endswith('.git') or\r\n repo.startswith(('http:', 'https:', 'file:'))):\r\n raise GitExportError(GitExportError.URL_BAD)\r\n\r\n # Check for username and password if using http[s]\r\n if repo.startswith('http:') or repo.startswith('https:'):\r\n parsed = urlparse(repo)\r\n if parsed.username is None or parsed.password is None:\r\n raise GitExportError(GitExportError.URL_NO_AUTH)\r\n if rdir:\r\n rdir = os.path.basename(rdir)\r\n else:\r\n rdir = repo.rsplit('/', 1)[-1].rsplit('.git', 1)[0]\r\n\r\n log.debug(\"rdir = %s\", rdir)\r\n\r\n # Pull or clone repo before exporting to xml\r\n # and update url in case origin changed.\r\n rdirp = '{0}/{1}'.format(GIT_REPO_EXPORT_DIR, rdir)\r\n branch = None\r\n if os.path.exists(rdirp):\r\n log.info(_('Directory already exists, doing a git reset and pull '\r\n 'instead of git clone.'))\r\n cwd = rdirp\r\n # Get current branch\r\n cmd = ['git', 'symbolic-ref', '--short', 'HEAD']\r\n try:\r\n branch = cmd_log(cmd, cwd).strip('\\n')\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Failed to get branch: %r', ex.output)\r\n raise GitExportError(GitExportError.DETACHED_HEAD)\r\n\r\n cmds = [\r\n ['git', 'remote', 'set-url', 'origin', repo],\r\n ['git', 'fetch', 'origin'],\r\n ['git', 'reset', '--hard', 'origin/{0}'.format(branch)],\r\n ['git', 'pull'],\r\n ]\r\n else:\r\n cmds = [['git', 'clone', repo]]\r\n cwd = GIT_REPO_EXPORT_DIR\r\n\r\n cwd = os.path.abspath(cwd)\r\n for cmd in cmds:\r\n try:\r\n cmd_log(cmd, cwd)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Failed to pull git repository: %r', ex.output)\r\n raise GitExportError(GitExportError.CANNOT_PULL)\r\n\r\n # export course as xml before commiting and pushing\r\n root_dir = os.path.dirname(rdirp)\r\n course_dir = os.path.splitext(os.path.basename(rdirp))[0]\r\n try:\r\n export_to_xml(modulestore('direct'), contentstore(), course_id,\r\n root_dir, course_dir, modulestore())\r\n except (EnvironmentError, AttributeError):\r\n log.exception('Failed export to xml')\r\n raise GitExportError(GitExportError.XML_EXPORT_FAIL)\r\n\r\n # Get current branch if not already set\r\n if not branch:\r\n cmd = ['git', 'symbolic-ref', '--short', 'HEAD']\r\n try:\r\n branch = cmd_log(cmd, os.path.abspath(rdirp)).strip('\\n')\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Failed to get branch from freshly cloned repo: %r',\r\n ex.output)\r\n raise GitExportError(GitExportError.MISSING_BRANCH)\r\n\r\n # Now that we have fresh xml exported, set identity, add\r\n # everything to git, commit, and push to the right branch.\r\n ident = {}\r\n try:\r\n user = User.objects.get(username=user)\r\n ident['name'] = user.username\r\n ident['email'] = user.email\r\n except User.DoesNotExist:\r\n # That's ok, just use default ident\r\n ident = GIT_EXPORT_DEFAULT_IDENT\r\n time_stamp = timezone.now()\r\n cwd = os.path.abspath(rdirp)\r\n commit_msg = 'Export from Studio at {1}'.format(user, time_stamp)\r\n try:\r\n cmd_log(['git', 'config', 'user.email', ident['email']], cwd)\r\n cmd_log(['git', 'config', 'user.name', ident['name']], cwd)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Error running git configure commands: %r', ex.output)\r\n raise GitExportError(GitExportError.CONFIG_ERROR)\r\n try:\r\n cmd_log(['git', 'add', '.'], cwd)\r\n cmd_log(['git', 'commit', '-a', '-m', commit_msg], cwd)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Unable to commit changes: %r', ex.output)\r\n raise GitExportError(GitExportError.CANNOT_COMMIT)\r\n try:\r\n cmd_log(['git', 'push', '-q', 'origin', branch], cwd)\r\n except subprocess.CalledProcessError as ex:\r\n log.exception('Error running git push command: %r', ex.output)\r\n raise GitExportError(GitExportError.CANNOT_PUSH)", "def test_create_repository(koan, assert_repo_exists):\n koan.shell('')", "def _make_github_repo(github_login, entity, reponame, existing,\n access_protocol, private, dryrun):\n repo = None\n access_url = None\n try:\n repo = entity.get_repo(reponame)\n access_url = get_repo_url(repo, access_protocol, github_login)\n except gh.GithubException as e:\n if e.status != 404:\n # this is not a not found message, raise\n raise e\n lgr.debug(\n 'To be created repository \"%s\" does not yet exist on Github',\n reponame)\n\n if repo is not None:\n res = dict(\n url=access_url,\n preexisted=True,\n )\n if existing in ('skip', 'reconfigure'):\n return dict(\n res,\n status='notneeded',\n preexisted=existing == 'skip',\n )\n elif existing == 'error':\n return dict(\n res,\n status='error',\n message=('repository \"%s\" already exists on Github', reponame),\n )\n elif existing == 'replace':\n _msg = ('repository \"%s\" already exists on GitHub.', reponame)\n # Since we are running in the loop trying different tokens,\n # this message might appear twice. TODO: avoid\n if ui.is_interactive:\n remove = ui.yesno(\n \"Do you really want to remove it?\",\n title=_msg[0] % _msg[1],\n default=False\n )\n else:\n return dict(\n res,\n status='impossible',\n message=(\n _msg[0] + \" Remove it manually first on GitHub or \"\n \"rerun datalad in an interactive shell to confirm \"\n \"this action.\",\n _msg[1]),\n )\n if not remove:\n return dict(\n res,\n status='impossible',\n message=_msg,\n )\n repo.delete()\n repo = None\n else:\n RuntimeError('must not happen')\n\n if repo is None and not dryrun:\n try:\n repo = entity.create_repo(\n reponame,\n # TODO description='',\n # TODO homepage='',\n private=private,\n has_issues=False,\n has_wiki=False,\n has_downloads=False,\n auto_init=False)\n except gh.GithubException as e:\n if e.status == 404:\n # can happen if credentials are not good enough!\n raise\n msg = \"Github {} ({})\".format(\n e.data.get('message', str(e) or 'unknown'),\n e.data.get('documentation_url', 'no url')\n )\n if e.data.get('errors'):\n msg += ': {}'.format(\n ', '.join(\n [\n err.get('message')\n for err in e.data.get('errors', [])\n if 'message' in err\n ]))\n return dict(\n res,\n status='error',\n message=msg,\n )\n\n if repo is None and not dryrun:\n raise RuntimeError(\n 'something went wrong, we got no Github repository')\n\n # get definitive URL:\n # - use previously determined one\n # - or query a newly created project\n # - or craft one in dryrun mode\n access_url = access_url or '{}github.com{}{}/{}.git'.format(\n 'https://' if access_protocol == 'https' else 'git@',\n '/' if access_protocol == 'https' else ':',\n # this will be the org, in case the repo will go under an org\n entity.login,\n reponame,\n ) if dryrun else get_repo_url(repo, access_protocol, github_login)\n\n return dict(\n status='ok',\n url=access_url,\n preexisted=False,\n )", "def create_project(opts):\n if opts['django']:\n structure.create_django_proj(opts)\n if opts['cookiecutter_template']:\n structure.create_cookiecutter(opts)\n proj_struct = structure.make_structure(opts)\n structure.create_structure(proj_struct,\n update=opts['update'] or opts['force'])\n if not opts['update'] and not repo.is_git_repo(opts['project']):\n repo.init_commit_repo(opts['project'], proj_struct)", "def download():\n \"\"\"\n \"The book p.79 have error.\n \"https://github.com/login/oauth/authorize?client_id=7e0a3cd836d3e544dbd9&redirect_uri=https%3A%2F%2Fgist.github.com%2Fauth%2Fgithub%2Fcallback%3Freturn_to%3Dhttps%253A%252F%252Fgist.github.com%252Fyoungsoul%252Ffc69665c5d08e189c57c0db0e93017a6&response_type=code&state=9b385430ee7cd1a75ca91c1d1cb6c565111f6b81e54a71f42ae9b22035241b9b\n \"\"\"\n subprocess.call([\n 'wget',\n 'https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat', \n '-P',\n 'origin_data/'\n ])\n logger.info('Download success!')", "def command_new_repo(self):\n repoinit.new_repo(*self.args())", "def gh_repo(token, repo_full_name):\n (owner, repo,) = repo_full_name.split('/')\n\n gh = github3.login(token=token)\n return gh.repository(owner, repo)", "def install():\n return InstallGit()", "def build_and_deploy():\n\n with shell_env(TZ=_get_timezone()):\n _create_output_branch()\n _build_html()\n _git_commit_all()\n _git_push(_get_output_branch())", "def main(github_token, branch_name, repository, sha):\n create_branch(github_token, branch_name, repository, sha)\n click.echo(f\"Successfully created branch {branch_name}\")", "def test_vcs_url_scheme_to_object(tmpdir):\n git_repo = create_repo_from_pip_url(\n **{\n 'pip_url': 'git+git://git.myproject.org/MyProject.git@da39a3ee5e6b4b',\n 'repo_dir': str(tmpdir.join('myproject1')),\n }\n )\n\n # TODO cwd and name if duplicated should give an error\n\n assert isinstance(git_repo, GitRepo)\n assert isinstance(git_repo, BaseRepo)\n\n hg_repo = create_repo_from_pip_url(\n **{\n 'pip_url': 'hg+https://hg.myproject.org/MyProject#egg=MyProject',\n 'repo_dir': str(tmpdir.join('myproject2')),\n }\n )\n\n assert isinstance(hg_repo, MercurialRepo)\n assert isinstance(hg_repo, BaseRepo)\n\n svn_repo = create_repo_from_pip_url(\n **{\n 'pip_url': 'svn+svn://svn.myproject.org/svn/MyProject#egg=MyProject',\n 'repo_dir': str(tmpdir.join('myproject3')),\n }\n )\n\n assert isinstance(svn_repo, SubversionRepo)\n assert isinstance(svn_repo, BaseRepo)", "def fetch(git=''):\n cloned = False\n if 'dotfiles' in env:\n git = env.dotfiles\n\n # Check project is already cloned\n if git != '' and not is_dir('/home/%(user)s/dotfiles' % env):\n cmd = 'cd ; git clone %(git)s' % locals()\n run(cmd)\n cloned = True\n\n # update locally dotfiles\n dotfiles = '/home/%(user)s/dotfiles' % env\n if not cloned and not is_dir(dotfiles):\n abort(red(\"Please execute dotfiles.fetch\"))\n\n cmd = 'cd %(dotfiles)s ; git pull' % locals()\n run(cmd)", "def git_clone(git_url=QMK_GIT_URL, git_branch=QMK_GIT_BRANCH):\n repo = repo_name(git_url)\n zipfile_name = repo + '.zip'\n command = ['git', 'clone', '--single-branch', '-b', git_branch, git_url, repo]\n\n try:\n check_output(command, stderr=STDOUT, universal_newlines=True)\n os.chdir(repo)\n hash = check_output(['git', 'rev-parse', 'HEAD'])\n open('version.txt', 'w').write(hash.decode('cp437') + '\\n')\n repo_cloned = True\n\n except CalledProcessError as build_error:\n repo_cloned = False\n logging.error(\"Could not clone %s: %s (returncode: %s)\" % (repo, build_error.output, build_error.returncode))\n logging.exception(build_error)\n\n os.chdir('..')\n\n if repo_cloned:\n store_source(zipfile_name, repo, 'cache')\n\n return True", "def clone(connection, url, rid, vsid='6IT', start_dir='src/', vcs_token=None, error_exists=True,\n role='SOURCE', typ='GITHUB'):\n\n config = {}\n\n if start_dir:\n config['VCS_TARGET_DIR'] = start_dir\n\n if vcs_token:\n config['CLIENT_VCS_AUTH_TOKEN'] = vcs_token\n\n repo = Repository(connection, rid)\n\n try:\n repo.create(url, vsid, config=config, role=role, typ=typ)\n except GCTSRepoAlreadyExistsError as ex:\n if error_exists:\n raise ex\n\n _mod_log().debug(ex)\n _mod_log().info(str(ex))\n\n repo.wipe_data()\n\n if not repo.is_cloned:\n repo.clone()\n else:\n _mod_log().info('Not cloning the repository \"%s\": already performed')\n\n return repo", "def clone_repo():\n\n with cd(env.root):\n sudo('git clone %(repo)s %(code_root)s' % env, user=env.deploy_user)", "def clone_repo(start=0,end=100000):\n repo_list=repo_url['URLs']\n count=0\n\n for url in repo_list[start:end]:\n url=str(url)\n name=url.rsplit('/', 2) #get the repo name (last 2 part) of the repository url\n last=name[-2]+'-'+name[-1]\n try:\n if not os.path.exists(last):\n os.mkdir(last) #Make folder for a repo if it does not exist\n repo=str(url) + '.git'\n folder= r'repos'\n Repo.clone_from(repo,last)\n count+=1\n print('cloned ' , repo)\n except:\n continue\n return count", "def fetch_and_validate_project(\n launch_project: LaunchProject, api: Api\n) -> LaunchProject:\n if launch_project.source == LaunchSource.DOCKER:\n return launch_project\n if launch_project.source == LaunchSource.LOCAL:\n if not launch_project._entry_points:\n wandb.termlog(\n f\"{LOG_PREFIX}Entry point for repo not specified, defaulting to `python main.py`\"\n )\n launch_project.add_entry_point([\"python\", \"main.py\"])\n elif launch_project.source == LaunchSource.JOB:\n launch_project._fetch_job()\n else:\n launch_project._fetch_project_local(internal_api=api)\n\n assert launch_project.project_dir is not None\n # this prioritizes pip, and we don't support any cases where both are present\n # conda projects when uploaded to wandb become pip projects via requirements.frozen.txt, wandb doesn't preserve conda envs\n if os.path.exists(\n os.path.join(launch_project.project_dir, \"requirements.txt\")\n ) or os.path.exists(\n os.path.join(launch_project.project_dir, \"requirements.frozen.txt\")\n ):\n launch_project.deps_type = \"pip\"\n elif os.path.exists(os.path.join(launch_project.project_dir, \"environment.yml\")):\n launch_project.deps_type = \"conda\"\n\n return launch_project", "def make_repos_public(tools_to_build, credentials):\n with open(credentials, 'r') as f:\n token = f.read().strip()\n for tool in tools_to_build:\n print 'Making tool: {} a publically visible repository.'.format(tool)\n url = 'https://quay.io/api/v1/repository/ucsc_cgl/{}/changevisibility'.format(tool)\n payload = {'visibility': 'public'}\n headers = {'Authorization': 'Bearer {}'.format(token), 'content-type': 'application/json'}\n response = requests.post(url, data=json.dumps(payload), headers=headers)\n assert response.status_code == 200, 'POST call failed. Code: {}. 403 = bad token'.format(response.status_code)", "def test_get_repo_pulled(self):\n repo = Repository(\"https://github.com/qcoumes/gitload_test.git\")\n self.assertTrue(repo.get_repo())\n self.assertTrue(os.path.exists(MEDIA_ROOT+\"/gitload_test\"))\n if (os.path.exists(MEDIA_ROOT+\"/gitload_test\")):\n shutil.rmtree(MEDIA_ROOT+\"/gitload_test\")", "async def git(ctx):\n if can_answer(ctx):\n msg = \"https://github.com/Eerolz/launchbot\"\n await ctx.send(msg)" ]
[ "0.6979374", "0.6772234", "0.65194136", "0.6498187", "0.6447341", "0.6415485", "0.63567287", "0.62652856", "0.62006974", "0.6198321", "0.6183075", "0.61510575", "0.61366624", "0.6131653", "0.6120334", "0.6097073", "0.60869515", "0.6049519", "0.6030718", "0.60099995", "0.59977555", "0.59819007", "0.59721935", "0.5970539", "0.5956272", "0.5951225", "0.5925638", "0.5917762", "0.5913114", "0.59099126", "0.59027153", "0.5899848", "0.58829314", "0.5870758", "0.5870615", "0.58653235", "0.5848792", "0.58386344", "0.58373034", "0.5834087", "0.5833705", "0.58090436", "0.5796617", "0.5784453", "0.5784099", "0.57338506", "0.5733535", "0.5721413", "0.5711788", "0.5704231", "0.5700258", "0.56947786", "0.5684462", "0.56832767", "0.56740814", "0.56723124", "0.56683046", "0.5663257", "0.5659311", "0.5653245", "0.5652487", "0.5649389", "0.5644096", "0.56402314", "0.5635186", "0.5634206", "0.563352", "0.56317466", "0.56296647", "0.5629494", "0.5629494", "0.5629494", "0.56205267", "0.5609127", "0.55970764", "0.55955005", "0.55904573", "0.55874896", "0.558519", "0.55833", "0.5583205", "0.55778784", "0.55767983", "0.5574728", "0.55690104", "0.5553826", "0.553978", "0.5530045", "0.55113035", "0.5493751", "0.5484916", "0.5482422", "0.54816747", "0.5479542", "0.5478894", "0.54649043", "0.545092", "0.54503715", "0.5437888", "0.54367524" ]
0.55959404
75
Archive a GIT project and upload it to Dash.
def deploy_project(name, apikey, changed_files=None, repo=None, branch='master'): zbuff = StringIO() if changed_files is not None: changed_files = list(set(changed_files) | REQUIRED_FILES) _archive_project(name, zbuff, changed_files, repo, branch) zbuff.reset() payload = {'apikey': apikey, 'project': name} req = requests.post( DASH_API_URL + 'as/import.json?version=portia', files=[('archive', ('archive', zbuff, 'application/zip'))], params=payload ) if req.status_code == 200: project_url = DASH_API_URL.rsplit('/', 2)[0] + '/p/' + name return { 'status': 'ok', 'schedule_url': project_url } else: raise DeployError('Deploy to Dash failed: %s' % req.text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __gitCreateArchive(self):\n self.vcs.gitCreateArchive(self.project.getProjectPath())", "def upload_tar_from_git():\n require(\"release\", provided_by=[deploy])\n tree = prompt(\"Please enter a branch or SHA1 to deploy\", default=\"master\")\n local(\"git archive --format=tar %s | gzip > %s.tar.gz\" % (tree, env['release']))\n sudo(\"mkdir %(path)s/releases/%(release)s\" % env)\n put(\"%(release)s.tar.gz\" % env, \"%(path)s/packages/\" % env, use_sudo=True)\n sudo(\"cd %(path)s/releases/%(release)s && tar zxf ../../packages/%(release)s.tar.gz\" % env)\n local(\"rm %(release)s.tar.gz\" % env)", "def archive_projectbuild(projectbuild, archive):\n transport = get_transport_for_projectbuild(projectbuild, archive)\n transport.archive()", "def _archive_repository(\n owner: str, project_name: str, secret_token: str\n) -> Tuple[bool, str]:\n project_settings = {\"archived\": \"true\"}\n\n headers = {\n \"Authorization\": f\"token {secret_token}\",\n }\n\n url = f\"https://{REST_HOST}/repos/{owner}/{project_name}\"\n\n response = patch(url, json=project_settings, headers=headers, verify=VERIFY_CERT)\n return response.ok, (\n f\"Status: {response.status_code}. \" f'Error: \"{response.text}\".'\n )", "def archive(project, filename, pack_envs=False):\n return archiver._archive_project(project, filename, pack_envs)", "def deploy():\n\n project_dir = '/home/gastosabertos/gastos_abertos_website'\n with cd(project_dir):\n local('tar -cvzf build.tar.gz build')\n run('cp -r build build-old')\n put('build.tar.gz', '.')\n run('tar -xvf build.tar.gz')", "def deploy():\n build()\n collect()\n commit()\n push()", "def sync(args: argparse.Namespace) -> None:\n\tdel args\n\trepo_path = _find_repo()\n\tmanifest_file = os.path.join(repo_path, MANIFEST_DIRECTORY, storas.manifest.DEFAULT_MANIFEST_FILE)\n\tmanifest = storas.manifest.load(manifest_file)\n\tfor project in manifest.projects:\n\t\tfull_path = os.path.join(repo_path, \"..\", project.path)\n\t\tremote = project.remote\n\t\tfull_fetch_url = urllib.parse.urljoin(remote.fetch_host, project.name)\n\t\tif not os.path.exists(full_path):\n\t\t\tos.makedirs(full_path, exist_ok=True)\n\t\t\tLOGGER.debug(\"Created '%s'\", full_path)\n\t\t\t_run_git([\"clone\", \"-b\", project.revision, full_fetch_url], cwd=full_path)", "def upload_tar_from_git(path):\n require('release', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('branch', provided_by=[prod])\n local('git checkout %s' % (env.branch))\n local('git archive --format=tar %s | gzip > %s.tar.gz' % (env.branch, env.release))\n sudo('mkdir -p %s' % (path))\n put('%s.tar.gz' % (env.release), '/tmp/', mode=0755)\n sudo('mv /tmp/%s.tar.gz %s/packages/' % (env.release, env.code_root))\n sudo('cd %s && tar zxf ../../../packages/%s.tar.gz' % (env.whole_path, env.release))\n local('rm %s.tar.gz' % (env.release))\n sudo('rm %s/packages/%s.tar.gz' % (env.code_root, env.release))", "def deploy():\n build()\n rsync_project(\n local_dir=os.path.abspath(env.config['destination']) + \"/\",\n remote_dir=env.remote_dir,\n delete=True,\n extra_opts='--exclude=\".DS_Store\"',\n )", "def git_project(soup, github_user, github_pass, github_repo, github_name):\n giturl = 'https://{user}:{password}@github.com/{user}/{repo}.git'.format(\n user=github_user, password=github_pass, repo=github_repo\n )\n oldcwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n gitdir = os.path.join(tmpdir, github_repo)\n cmd = 'git clone {} {}'.format(shlex.quote(giturl), shlex.quote(gitdir))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(gitdir)\n rhinoscrape(soup, github_user, github_name)\n cmd = 'git add .'\n subprocess.run(shlex.split(cmd), check=False)\n msg = 'Project committed by Rhino Repo'\n cmd = 'git commit -m {}'.format(shlex.quote(msg))\n subprocess.run(shlex.split(cmd), check=False)\n cmd = 'git push {}'.format(shlex.quote(giturl))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(oldcwd)\n shutil.rmtree(tmpdir, ignore_errors=True)", "def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)", "def git_archive_all(path, archive_file_name):\n import os\n import tarfile\n\n def ls_files(prefix=''):\n \"\"\"\n Does a `git ls-files` on every git repository (eg: submodules)\n found in the working git repository and returns a list with all the\n filenames returned by each `git ls-files`\n\n --full-name Forces paths to be output relative to the project top\n directory\n --exclude-standard adds standard git exclusions\n (.git/info/exclude, .gitignore, ...)\n \"\"\"\n cmd = 'git ls-files --full-name --exclude-standard'\n raw_files = local(cmd, capture=True)\n files = []\n\n for filename in raw_files.split('\\n'):\n if (os.path.isdir(filename) and\n os.path.exists(os.path.join(filename, '.git'))):\n os.chdir(filename)\n files.extend(ls_files(prefix=filename))\n else:\n files.append(os.path.join(prefix, filename))\n\n return files\n\n cwd = os.getcwd()\n os.chdir(path)\n files = ls_files()\n os.chdir(path)\n project_tar = tarfile.open(archive_file_name, 'w:gz')\n\n for filename in files:\n project_tar.add(filename)\n\n project_tar.close()\n os.chdir(cwd)\n\n print(green('Archive created at %s/%s' % (path, archive_file_name)))", "def pack(**kwargs):\n require('repository')\n #if env.repository.startswith('svn://'):\n if env.repository.type == 'svn':\n execute(svn.pack, **kwargs)\n if env.repository.type == 'git':\n execute(git.pack, **kwargs)\n else:\n abort('Unsupported repository type %s' % env.repository)", "def deploy():\n archive_path = do_pack()\n if archive_path is None:\n print(\"pass\")\n return False\n return do_deploy(archive_path)", "def deploy_django_project(self):\n\n if self.no_files:\n return\n\n local_dir = \"{0}\".format(self.app_dir)\n app_dir = \"{0}\".format(self.app_remote_dir)\n\n if not exists(app_dir):\n mkdir(app_dir)\n\n zip_name = make_zip(local_dir, self.app_name)\n put(zip_name, self.app_remote_dir)\n\n with cd(self.app_remote_dir):\n run(\"unzip -o {0}\".format(zip_name))\n\n os.remove(zip_name)", "def pub_deploy(args, project=\"\", account=\"\", api_key=\"\"):\n base_url, api_key, updated = get_project_connect(\n 'djaodjin',\n base_url=DEFAULT_API_ENDPOINT,\n api_key=api_key)\n project, account, updated = get_project_account(\n project=project, account=account)\n if updated:\n save_config()\n\n api_container_url = \\\n \"%(base_url)s/api/containers/%(organization)s/apps/%(app)s/\" % {\n 'base_url': base_url,\n 'organization': str(account),\n 'app': str(project)}\n data = None\n container_location = args[0] if args else None\n if container_location:\n data = {'location': container_location}\n resp = requests.post(api_container_url, data=data, auth=(api_key, \"\"))\n LOGGER.info(\"POST %s returns %d %s\",\n api_container_url, resp.status_code, resp.text)", "def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()", "def push(self) -> None:\n\n with ImportExtensions(required=True):\n import requests\n\n pkg_path = Path(self.args.path)\n if not pkg_path.exists():\n self.logger.critical(f'`{self.args.path}` is not a valid path!')\n exit(1)\n\n request_headers = self._get_request_header()\n\n try:\n # archive the executor package\n with TimeContext(f'Packaging {self.args.path}', self.logger):\n md5_hash = hashlib.md5()\n bytesio = archive_package(pkg_path)\n content = bytesio.getvalue()\n md5_hash.update(content)\n\n md5_digest = md5_hash.hexdigest()\n\n # upload the archived package\n form_data = {\n 'public': self.args.public if hasattr(self.args, 'public') else False,\n 'private': self.args.private\n if hasattr(self.args, 'private')\n else False,\n 'md5sum': md5_digest,\n 'force': self.args.force,\n 'secret': self.args.secret,\n }\n\n method = 'put' if self.args.force else 'post'\n\n hubble_url = get_hubble_url()\n # upload the archived executor to Jina Hub\n with TimeContext(\n f'Pushing to {hubble_url} ({method.upper()})',\n self.logger,\n ):\n resp = getattr(requests, method)(\n hubble_url,\n files={'file': content},\n data=form_data,\n headers=request_headers,\n )\n\n if 200 <= resp.status_code < 300:\n # TODO: only support single executor now\n image = resp.json()['executors'][0]\n\n uuid8 = image['id']\n secret = image['secret']\n visibility = image['visibility']\n\n info_table = [\n f'\\t🔑 ID:\\t\\t' + colored(f'{uuid8}', 'cyan'),\n f'\\t🔒 Secret:\\t'\n + colored(\n f'{secret}',\n 'cyan',\n )\n + colored(\n ' (👈 Please store this secret carefully, it wont show up again)',\n 'red',\n ),\n f'\\t👀 Visibility:\\t' + colored(f'{visibility}', 'cyan'),\n ]\n\n if 'alias' in image:\n info_table.append(f'\\t📛 Alias:\\t' + colored(image['alias'], 'cyan'))\n\n self.logger.success(f'🎉 Executor `{pkg_path}` is pushed successfully!')\n self.logger.info('\\n' + '\\n'.join(info_table))\n\n usage = (\n f'jinahub://{uuid8}'\n if visibility == 'public'\n else f'jinahub://{uuid8}:{secret}'\n )\n\n self.logger.info(f'You can use it via `uses={usage}` in the Flow/CLI.')\n elif resp.text:\n # NOTE: sometimes resp.text returns empty\n raise Exception(resp.text)\n else:\n resp.raise_for_status()\n except Exception as e: # IO related errors\n self.logger.error(\n f'Error while pushing `{self.args.path}` with session_id={request_headers[\"jinameta-session-id\"]}: '\n f'\\n{e!r}'\n )", "def deploy():\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n with settings(warn_only=True):\n maintenance_up()\n \n checkout_latest()\n gzip_assets()\n deploy_to_s3()\n maintenance_down()", "def build_and_deploy():\n\n with shell_env(TZ=_get_timezone()):\n _create_output_branch()\n _build_html()\n _git_commit_all()\n _git_push(_get_output_branch())", "def archive(\n self,\n ostream: Union[TextIO, BinaryIO],\n treeish: Optional[str] = None,\n prefix: Optional[str] = None,\n **kwargs: Any,\n ) -> Repo:\n if treeish is None:\n treeish = self.head.commit\n if prefix and \"prefix\" not in kwargs:\n kwargs[\"prefix\"] = prefix\n kwargs[\"output_stream\"] = ostream\n path = kwargs.pop(\"path\", [])\n path = cast(Union[PathLike, List[PathLike], Tuple[PathLike, ...]], path)\n if not isinstance(path, (tuple, list)):\n path = [path]\n # end assure paths is list\n self.git.archive(\"--\", treeish, *path, **kwargs)\n return self", "def deploy():\n build()\n copy()\n install()", "def push_sources():\n ensure_src_dir()\n push_rev = getattr(env, 'push_rev', None)\n if push_rev is None:\n push_rev = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n local(\"git tag -a {0} -m \\\"Tagged for release\\\"\".format(push_rev))\n local(\"git push origin master --tags\")\n\n with cd(SRC_DIR):\n run(\"git pull origin master\")\n run(\"git fetch -t\")\n run(\"git checkout {0}\".format(push_rev))", "def upload():\n run('mkdir -p /srv/images/'+env.project_name+'/')\n rsync_project(\n env.project_dir, './',\n exclude=(\n '.git', '.gitignore', '__pycache__', '*.pyc', '.DS_Store', 'environment.yml',\n 'fabfile.py', 'Makefile', '.idea', 'bower_components', 'node_modules',\n '.env.example', 'README.md', 'var'\n ), delete=True)", "def deploy():\n new_archive = do_pack()\n\n if new_archive is None:\n return False\n\n res = do_deploy(new_archive)\n return res", "def push(self):\n out, err, code = self.command( [\"git\", \"push\"], self.directory )", "def upload(self, request, pk=None):\n app = self.get_object()\n deployment = Revision()\n deployment.compressed_archive = request.FILES['file']\n deployment.app = app\n deployment.save()\n app.deploy()\n response = {}\n return Response(response)", "def push ():\n\n tagname = get_tag (comp_versions, 'ACE')\n\n if opts.push:\n if opts.take_action:\n vprint (\"Pushing ACE_TAO\", opts.ace_tao_branch, \"to origin\")\n ex (\"cd $DOC_ROOT/ACE_TAO && git push origin \" + opts.ace_tao_branch)\n\n vprint (\"Pushing tag %s on ACE_TAO\" % (tagname))\n ex (\"cd $DOC_ROOT/ACE_TAO && git push origin tag \" + tagname)\n\n vprint (\"Pushing tag %s on MPC\" % (tagname))\n ex (\"cd $DOC_ROOT/MPC && git push origin tag \" + tagname)\n\n # Push release branches\n latest_branch_helper (push_latest_branch, opts.release_type)\n else:\n vprint (\"Pushing tag %s on ACE_TAO\" % (tagname))\n vprint (\"Pushing tag %s on MPC\" % (tagname))\n print (\"Pushing tags:\\n\")\n print (\"Pushing tag \" + tagname + \"\\n\")", "def archive(ctx, config):\n log.info('Creating archive directory...')\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'install', '-d', '-m0755', '--', archive_dir,\n ],\n wait=False,\n )\n )\n\n try:\n yield\n except Exception:\n # we need to know this below\n set_status(ctx.summary, 'fail')\n raise\n finally:\n passed = get_status(ctx.summary) == 'pass'\n if ctx.archive is not None and \\\n not (ctx.config.get('archive-on-error') and passed):\n log.info('Transferring archived files...')\n logdir = os.path.join(ctx.archive, 'remote')\n if (not os.path.exists(logdir)):\n os.mkdir(logdir)\n for rem in ctx.cluster.remotes.iterkeys():\n path = os.path.join(logdir, rem.shortname)\n misc.pull_directory(rem, archive_dir, path)\n # Check for coredumps and pull binaries\n fetch_binaries_for_coredumps(path, rem)\n\n log.info('Removing archive directory...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'rm',\n '-rf',\n '--',\n archive_dir,\n ],\n wait=False,\n ),\n )", "def push(ref='origin/master'):\n from fabric.api import local, run, cd\n from fabric.contrib.project import rsync_project\n local('pelican -s %s -d' % env.config_file)\n rsync_project(\n remote_dir=env.host_site_path,\n local_dir='output/',\n delete=True\n )\n if env.host_type != 'production':\n run(\"chown -R %(user)s:%(host_webserver_user)s %(host_site_path)s \"\n \"&& chmod -R 02750 %(host_site_path)s\" % env)", "def upload(repo_name, root_dir, incoming_dir, hashalgo, github_token=None):\n\n if github_token:\n github_release._github_token_cli_arg = github_token\n\n hashcmd = get_hashcmd(hashalgo)\n if not hashcmd:\n raise ValueError('hashalgo \"' + hashalgo + '\" not found')\n\n if not os.path.isdir(incoming_dir):\n raise ValueError(\"Missing \" + incoming_dir + \" directory\")\n\n hashalgo_dir = os.path.join(root_dir, hashalgo)\n if not os.path.isdir(hashalgo_dir):\n os.mkdir(hashalgo_dir)\n\n # Download information about current release\n\n # Get current fileindex\n try:\n hashalgo_csv = download_fileindex_csv(\n repo_name, hashalgo_dir, hashalgo, github_token\n )\n fileindex = read_fileindex_csv(hashalgo_csv)\n except ValueError:\n # New release\n hashalgo_csv = os.path.join(hashalgo_dir, hashalgo + \".csv\")\n fileindex = []\n\n # Get list of successfully uploaded assets (to avoid uploading them again)\n # and delete partially uploaded ones.\n uploaded_assets = (\n github_release.get_assets(repo_name, hashalgo) if fileindex else []\n )\n uploaded_hashes = []\n for asset in uploaded_assets:\n if asset[\"state\"] == \"uploaded\":\n uploaded_hashes.append(asset[\"name\"])\n else:\n # Remove asset partially uploaded\n github_release.gh_asset_delete(repo_name, hashalgo, asset[\"name\"])\n\n # Update release information with incoming data\n\n # Add incoming files to fileindex and hashalgo_dir\n filenames = [\n f\n for f in os.listdir(incoming_dir)\n if os.path.isfile(os.path.join(incoming_dir, f)) and not f.startswith(\".\")\n ]\n for filename in filenames:\n filepath = os.path.join(incoming_dir, filename)\n checksum = hashcmd(filepath)\n filedate = date_to_utc_string(get_filedate(filepath))\n\n existingItems = [fileindex_item for fileindex_item in fileindex\n if fileindex_item[COLUMN_CHECKSUM] == checksum and fileindex_item[COLUMN_FILENAME] == filename]\n if not existingItems:\n # new item\n fileindex.append([checksum, filename, filedate])\n # Make sure the hash-named file is present\n hashfilepath = os.path.join(hashalgo_dir, checksum)\n if not os.path.isfile(hashfilepath):\n copyfile(filepath, hashfilepath)\n\n # Create new hashalgo.csv from existing and incoming files\n fileindex.sort(key=lambda a: (a[COLUMN_FILENAME].casefold(), a[COLUMN_FILEDATE]))\n write_fileindex_csv(hashalgo_csv, fileindex)\n hashalgo_md = os.path.join(root_dir, hashalgo_dir, hashalgo + \".md\")\n write_fileindex_md(hashalgo_md, fileindex, repo_name, hashalgo)\n\n # Upload updated releaes info and new data files\n\n # Create hashalgo release (in case it does not exist)\n github_release.gh_release_create(repo_name, hashalgo, publish=True)\n\n # Delete old hashalgo.csv and hashalgo.md\n github_release.gh_asset_delete(repo_name, hashalgo, hashalgo + \".csv\")\n github_release.gh_asset_delete(repo_name, hashalgo, hashalgo + \".md\")\n\n # Upload new hashalgo.csv and hashalgo.md\n github_release.gh_asset_upload(repo_name, hashalgo, hashalgo_csv)\n github_release.gh_asset_upload(repo_name, hashalgo, hashalgo_md)\n\n # Upload new data files\n for fileindex_item in fileindex:\n checksum = fileindex_item[COLUMN_CHECKSUM]\n filename = fileindex_item[COLUMN_FILENAME]\n if checksum in uploaded_hashes:\n # already uploaded\n continue\n filepath = os.path.join(hashalgo_dir, checksum)\n github_release.gh_asset_upload(repo_name, hashalgo, filepath)\n\n # Copy md file content into release notes\n with open(hashalgo_md, \"r\") as file:\n release_notes = file.read()\n\n if len(release_notes) > 125000:\n note = \"Since the release description is > 125000 characters, the corresponding markdown file is instead pushed into the repository.\"\n release_notes = f\"See [{hashalgo}.md](https://github.com/{repo_name}/blob/main/{hashalgo}/{hashalgo}.md)\\n\\n_{note}_\"\n logging.warning(f\"{hashalgo}: {note}\")\n\n github_release.gh_release_edit(repo_name, hashalgo, body=release_notes)", "def deploy():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('pwd')\n run('git stash')\n run('git pull -f origin master')\n run('fig -f prod.yml stop')\n run('fig -f prod.yml build')\n run('fig -f prod.yml up -d')", "def __gitPush(self):\n self.vcs.gitPush(self.project.getProjectPath())", "def sync_git_repo():\n # get the current dir of this script\n current_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n repo_path = os.path.join(current_dir,REPO_NAME)\n logging.info(\"Repository path is: \"+repo_path)\n # check to see if a repo has been init already\n try: \n repo = git.Repo(repo_path)\n logging.info(\"Git repo has already been created.\")\n except (git.exc.InvalidGitRepositoryError,git.exc.NoSuchPathError):\n logging.info(\"No git repo has been initialized for this module. Cloning from github.com now.\")\n repo_url = \"https://\"+REPO_USERNAME+\":\"+REPO_PERSONAL_ACCESS_TOKEN+\"@github.com/\"+REPO_USERNAME+\"/\"+REPO_NAME+\".git\"\n git.Repo.clone_from(repo_url,repo_path)\n logging.info(\"Repo cloned successfully.\")\n repo = git.Repo(repo_path)\n # now we have a valid repo created \n # pull the latest data from the repo\n origin = repo.remotes.origin\n origin.pull()\n # create the csv output dir if it does not exist\n Path(paho_csv_reports_dir).mkdir(parents=False, exist_ok=True)\n # get all csv files in this dir\n all_paho_csv_files = glob.glob(paho_csv_reports_dir+os.path.sep+\"*.csv\")\n # add all files in this dir to the repo index\n repo.index.add(all_paho_csv_files)\n logging.info(\"Added all .csv files from \"+paho_csv_reports_dir+\" to repo index.\")\n # set the commit message\n repo.index.commit(\"Automatic commit by \"+os.path.basename(__file__))\n # git push \n origin.push()\n logging.info(\"All csv files pushed to github repo successfully.\")", "def deploy_to_s3():\n env.gzip_path = '%(path)s/repository/gzip/assets/' % env\n run(('s3cmd -P --add-header=Content-encoding:gzip --guess-mime-type --rexclude-from=%(path)s/repository/s3exclude sync %(gzip_path)s s3://%(s3_bucket)s/%(project_name)s/') % env)", "def import_project(name, apikey, repo):\n\n def validump_resource(jsonres, restype):\n get_schema_validator(restype).validate(jsonres)\n return json.dumps(jsonres)\n\n def split_templates(spider, spider_filename, files):\n templates = spider['templates']\n spider['templates'] = []\n spider['template_names'] = []\n for template in templates:\n template['name'] = template['page_id']\n spider['template_names'].append(template['name'])\n template_fname = os.path.join(\n spider_filename.rpartition('.')[0],\n str(template['name']) + '.json')\n files[template_fname] = validump_resource(template, 'template')\n\n archive = zipfile.ZipFile(StringIO(_download_project(name, apikey)))\n files = {}\n for filename in archive.namelist():\n contents = archive.read(filename)\n if filename == 'items.json':\n resource = 'items'\n elif filename == 'extractors.json':\n resource = 'extractors'\n elif filename.startswith('spiders'):\n resource = 'spider'\n else:\n resource = None\n if resource in ['items', 'spider', 'extractors']:\n as_json = json.loads(contents)\n if resource == 'items':\n as_json = _fix_items(as_json)\n elif resource == 'spider':\n split_templates(as_json, filename, files)\n contents = validump_resource(as_json, resource)\n files[filename] = contents\n if 'extractors.json' not in files:\n files['extractors.json'] = '{}'\n if ('items.json' not in files or not files['items.json'] or\n files['items.json'] == '{}'):\n files['items.json'] = DEFAULT_DASH_ITEM\n repo.save_files(files, 'master', 'Publishing initial import.')\n # XXX: Tell dash that project has been opened in Portia\n deploy_project(name, apikey, changed_files=[])", "def _archive_project(name, buff, files=None, repo=None, branch='master',\n ignore_deleted=False):\n if repo is None:\n repo = Repoman.open_repo(name)\n now = datetime.now().timetuple()[:6]\n archive = zipfile.ZipFile(buff, \"w\", zipfile.ZIP_DEFLATED)\n files_list = files if files is not None else \\\n repo.list_files_for_branch(branch)\n all_files = files_list if files is None else \\\n repo.list_files_for_branch(branch)\n\n template_paths = defaultdict(list)\n for file_path in all_files:\n split_file_path = file_path.split('/')\n if len(split_file_path) > 2:\n template_paths[split_file_path[1]].append(file_path)\n extractors = json.loads(repo.file_contents_for_branch('extractors.json',\n branch) or '{}')\n\n seen_files = set()\n spiders = set()\n for file_path in files_list:\n if file_path.startswith('spiders'):\n try:\n parts = file_path.split(\"/\")\n if len(parts) >= 2:\n spider_name = parts[1]\n if spider_name.endswith('.json'):\n spider_name = spider_name[:-5]\n if spider_name not in spiders:\n # Load spider if necessary\n if len(parts) > 2:\n file_path = 'spiders/' + spider_name + '.json'\n file_contents = repo.file_contents_for_branch(\n file_path, branch)\n as_json = json.loads(file_contents)\n templates = []\n # Load all spider templates\n spider_templates = template_paths.get(spider_name, [])\n for template_path in spider_templates:\n seen_files.add(template_path)\n existing = {}\n # Ignore deleted templates\n try:\n templ_contents = repo.file_contents_for_branch(\n template_path, branch)\n except (TypeError, ValueError):\n continue\n json_template = json.loads(templ_contents)\n # Validate extractors\n template_extractors = json_template.get(\n 'extractors', {})\n for field, eids in template_extractors.items():\n existing[field] = [eid for eid in eids\n if eid in extractors]\n json_template['extractors'] = existing\n spider_name = parts[1]\n templates.append(json_template)\n spiders.add(spider_name)\n as_json.pop('template_names', None)\n as_json['templates'] = templates\n _add_to_archive(archive, file_path,\n json.dumps(as_json), now)\n except TypeError:\n if ignore_deleted:\n continue\n # Handle Deleted Spiders\n file_contents = repo.file_contents_for_branch(file_path,\n 'master')\n file_info = {'deleted': True}\n if file_contents:\n as_json = json.loads(file_contents)\n _add_to_archive(archive, file_path, json.dumps(file_info), now)\n else:\n file_contents = repo.file_contents_for_branch(file_path, branch)\n _add_to_archive(archive, file_path, file_contents, now)\n seen_files.add(file_path)\n\n # Add empty placeholders for missing files required by dash\n for file_path in {'extractors.json', 'items.json'} - seen_files:\n _add_to_archive(archive, file_path, '{}', now)\n archive.close()", "def git_install(projects_yaml):\n if git_install_requested():\n git_pre_install()\n projects_yaml = git_default_repos(projects_yaml)\n git_clone_and_install(projects_yaml, core_project='keystone')\n git_post_install(projects_yaml)", "def upload_project(self, workflow_snapshot_id: Text, project_path: Text) -> Text:\n with tempfile.TemporaryDirectory() as temp_dir:\n zip_file_name = 'workflow_{}_project.zip'.format(workflow_snapshot_id)\n temp_dir_path = Path(temp_dir)\n zip_file_path = temp_dir_path / zip_file_name\n make_dir_zipfile(project_path, zip_file_path)\n with open(zip_file_path, 'rb') as f:\n self.s3_client.upload_fileobj(f, self.bucket_name, zip_file_name)\n return zip_file_name", "def do_deploy(archive_path):\n try:\n test = put(archive_path, \"/tmp/\")\n lista = archive_path.split('/')\n folder = lista[-1][:lista[-1].find(\".\")]\n dest = \"/data/web_static/releases/\" + folder\n run(\"mkdir -p {}\".format(dest))\n run(\"tar -xzf {} -C {}\".format(test[0], dest))\n run(\"rm {}\".format(test[0]))\n run(\"mv /data/web_static/releases/{}/web_static/* \\\n /data/web_static/releases/{}/\".format(folder, folder))\n run(\"rm -rf /data/web_static/releases/{}/web_static\".format(folder))\n run(\"rm -rf /data/web_static/current\")\n run(\"ln -s /data/web_static/releases/{} /data/web_static/current\".\n format(folder))\n return True\n except Exception:\n return False", "def get_archive_async(\n hostname, project, treeish, dir_path=None, **fetch_kwargs):\n _validate_args(hostname, project, treeish, dir_path)\n dir_path = (dir_path or '').strip('/')\n if dir_path:\n dir_path = '/%s' % dir_path\n return gerrit.fetch_async(\n hostname,\n '%s/+archive/%s%s.tar.gz' % _quote_all(project, treeish, dir_path),\n **fetch_kwargs)", "def main():\n with open('config.json') as config_file:\n configs = json.load(config_file)\n\n jar_list = utilities.upload_jars(configs)\n utilities.sign_jars(configs)\n\n artifact_folder = utilities.prepare_artifacts(configs, jar_list)\n\n repo_id = utilities.create_staging_repo(configs)\n utilities.deploy_to_staging_repo(configs, artifact_folder, repo_id)\n utilities.close_staging_repo(configs, repo_id)", "def deploy():\n archive_path = do_pack()\n\n if not archive_path:\n return False\n\n return do_deploy(archive_path)", "def git_push(c):\n c.run(\"git submodule foreach git push \")", "def pushrepo(projectjson, repourl):\n try:\n components = projectjson['components']\n name = projectjson['name']\n reponame = name + '_sc'\n logger.debug(f\"repourl is : {repourl}\")\n bb_split = repourl.split(\"//\")\n bb_split[1] = f\"{username}:{escape_password}@\"+bb_split[1]\n newrepourl = \"//\".join(bb_split)\n local_code_setup(reponame, newrepourl)\n dst_makefile_path = f\"/tmp/{reponame}/Makefile\"\n if not os.path.exists(dst_makefile_path):\n src_makefile_path = f\"/tmp/skeleton-build/Makefile\"\n copy2(src_makefile_path, dst_makefile_path)\n print(\"Makefile added\")\n createcomponents(components, reponame, newrepourl, name)\n bitbucket.push_repo_to_bitbucket(f\"/tmp/{reponame}\")\n rmtree('/tmp/skeleton-build')\n rmtree(f'/tmp/{reponame}')\n return True\n except Exception as e:\n print(\"caught exception.: \", e)\n return False", "def deploy():\n remote_dir = os.path.abspath(os.path.join(REMOTE_BASE_DIR, REPO_NAME))\n \n with settings(warn_only=True):\n if run(\"test -d %s\" % (remote_dir)).failed:\n puts(red(\"[Repo %s does not exist on remote at: %s]\" % (REPO_NAME, remote_dir)))\n with cd(REMOTE_BASE_DIR):\n run(\"git clone %s %s\" % (REPO_URL, REPO_NAME))\n\n puts(yellow(\"[Write logs]\"))\n run(\"echo '-----------------------------' > %s\" % REMOTE_ERR_FILE)\n run(\"echo `date` >> %s\" % REMOTE_ERR_FILE)\n run(\"echo '-----------------------------' >> %s\" % REMOTE_ERR_FILE)\n run(\"echo '-----------------------------' > %s\" % REMOTE_LOG_FILE)\n run(\"echo `date` >> %s\" % REMOTE_LOG_FILE)\n run(\"echo '-----------------------------' >> %s\" % REMOTE_LOG_FILE)\n\n puts(yellow(\"[Update repo: %s]\" % REPO_NAME))\n with cd(remote_dir):\n run(\"git pull origin master >> %s 2>> %s\" %\n (REMOTE_LOG_FILE, REMOTE_ERR_FILE))\n\n # reminder new static files\n puts(yellow('Do not forget to run collect staticfiles on DJANGO server.'))", "def deploy():\n git_pull()\n# build_virtualenv()\n# collectstatic()\n migrate()\n# reload_gunicorn()\n# restart_celery()\n puts(green(\"Deployment done!\"))", "def deploy():\n archive_path = do_pack()\n if archive_path is False:\n return false\n\n deploy_return = do_deploy(archive_path)\n return deploy_return", "def do_pack():\n now = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n local('mkdir -p versions')\n result = local('tar -czvf versions/web_static_{}.tgz web_static'\n .format(now))\n if result.failed:\n return None\n else:\n return result", "def deploy():\n with cd(\"~/public_html/\"):\n run(\"/usr/local/cpanel/3rdparty/bin/git pull\")\n\n with cd(\"~/public_html/skin/frontend/gemz/default/tools/\"):\n run(\"grunt default\")\n #sudo(\"/scripts/enablefileprotect\")", "def do_deploy(archive_path):\n if path.exists(archive_path):\n\n # File name without .tgz\n file_ext = archive_path.split('/')[1]\n file_alone = file_ext.split(\".\")[0]\n curr_release = \"/data/web_static/releases/\" + file_alone + '/'\n\n result = True\n\n # Deploy compressed file to the server /tmp/ directory\n upload = put(archive_path, \"/tmp/\")\n if upload.failed:\n result = False\n\n # Make dir to store the release\n dir_release = run(\"sudo mkdir -p \" + curr_release)\n if dir_release.failed:\n result = False\n\n # Uncompress file inside the folder created\n uncompress = run(\"sudo tar -xzf \" + \"/tmp/\\\n\" + file_ext + \" -C \" + curr_release)\n if uncompress.failed:\n result = False\n\n # Move all files from web_static to folder release\n move_info = run(\"sudo mv \" + curr_release + \"\\\nweb_static/* \" + curr_release)\n if move_info.failed:\n result = False\n\n # Remove empty web_static directory\n rm_empty = run(\"sudo rm -rf \" + curr_release + \"\\\nweb_static/\")\n if rm_empty.failed:\n result = False\n\n # Remove symbolic link current\n rm_link = run(\"sudo rm -rf /data/\\\nweb_static/current\")\n if rm_link.failed:\n result = False\n\n # Make new symbolic link\n new_link = run(\"sudo ln -s \" + curr_release + \" /data/\\\nweb_static/current\")\n if new_link.failed:\n result = False\n\n return result\n else:\n return False", "def deploy():\n require(\"hosts\", provided_by=[production, staging])\n env.release = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n upload_tar_from_git()\n install_requirements()\n setup_webserver()\n symlink_current_release()\n restart_webserver()", "def deploy():\n setup()\n builddir = get_build_dir()\n if sys.platform == 'win32':\n # Support cygwin rsync on windows:\n build_path = cygpath(slashed(builddir))\n else:\n build_path = slashed(builddir)\n rsync_project(env.admin_webroot, build_path, exclude=\".*\", delete=True)\n sudo(\"chmod -R 755 %(admin_webroot)s\" % env)", "def unpackToProject(self,archive,project,progress=None):\n progress = progress or bolt.Progress()\n files = self.sortFiles([x[0] for x in self.fileSizeCrcs])\n if not files: return 0\n #--Clear Project\n destDir = dirs['installers'].join(project)\n if destDir.exists(): destDir.rmtree(safety='Installers')\n #--Extract\n progress(0,project.s+_(\"\\nExtracting files...\"))\n self.unpackToTemp(archive,files,SubProgress(progress,0,0.9))\n #--Move\n progress(0.9,project.s+_(\"\\nMoving files...\"))\n count = 0\n tempDir = self.tempDir\n for file in files:\n srcFull = tempDir.join(file)\n destFull = destDir.join(file)\n if srcFull.exists():\n srcFull.moveTo(destFull)\n count += 1\n self.clearTemp()\n return count", "def deploy():\n\n archive_path = do_pack()\n\n if archive_path is None:\n return False\n\n return do_deploy(archive_path)", "def publish(self, path, recipient, *args):\n if not os.environ.get(\"OS_PROJECT_ID\", None):\n logging.log(\n logging.ERROR,\n \"Openstack RC file hasn't been sourced in the working %s%s\",\n \"environment. Please source an Openstack RC file to enable\",\n \" the use of Openstack tools.\",\n )\n sys.exit(-1)\n\n container = \"shared-upload-\" + recipient + \"-\" + time.strftime(\"%Y%m%d-%H%M%S\")\n\n subprocess.call([\"swift\", \"upload\", container, path]) # nosec\n\n self.share(container, recipient, *args)", "def upload(project, private=None, site=None, username=None, token=None, suffix='.tar.bz2', log_level=None):\n failed = _check_problems(project)\n if failed is not None:\n return failed\n\n # delete=True breaks on windows if you use tmp_tarfile.name to re-open the file,\n # so don't use delete=True.\n tmp_tarfile = NamedTemporaryFile(delete=False, prefix=\"anaconda_upload_\", suffix=suffix)\n tmp_tarfile.close() # immediately un-use it to avoid file-in-use errors on Windows\n try:\n status = archive(project, tmp_tarfile.name)\n if not status:\n return status\n status = client._upload(project,\n tmp_tarfile.name,\n uploaded_basename=(project.name + suffix),\n private=private,\n site=site,\n username=username,\n token=token,\n log_level=log_level)\n return status\n finally:\n os.remove(tmp_tarfile.name)", "def call_git_push():\n print(\"This will commit and push the git repo\")\n today = datetime.datetime.today()\n call([\"git\", \"add\", \".\"])\n call([\"git\", \"commit\", \"-m\", \"Updated notes. {:%Y-%m-%d %H:%M:%S}\".format(today)])\n call([\"git\", \"push\", \"origin\", \"master\"])", "def archive_repository(self, repository_id, new_id):\n if not repository_id:\n _exit_if_errors(['--id is required'])\n if not new_id:\n _exit_if_errors(['--new-id is required'])\n\n _, errors = self.rest.archive_backup_repository(repository_id, new_id)\n _exit_if_errors(errors)\n _success('Archived repository')", "def push(ctx):\n dufl_root = ctx.obj['dufl_root']\n git = Git(ctx.obj.get('git', '/usr/bin/git'), dufl_root)\n git.run('push', 'origin', git.working_branch())", "def do_deploy(archive_path):\n\n if not os.path.exists(archive_path):\n return(False)\n try:\n put(archive_path, \"/tmp/\")\n folder_path = \"/data/web_static/releases/\" + archive_path[9:-4]\n name_file = archive_path[9:]\n name_folder = archive_path[9:-4]\n date = archive_path[21:-4]\n releases = \"/data/web_static/releases/\"\n\n run(\"mkdir -p {}\".format(folder_path))\n run(\"tar -xzf /tmp/{} -C {}\".format(name_file, folder_path))\n run(\"rm /tmp/{}\".format(name_file))\n run(\"mv {}{}/web_static/* {}{}/\"\n .format(releases, name_folder, releases, name_folder))\n run(\"rm -rf {}{}/web_static\".format(releases, name_folder))\n run(\"rm -rf /data/web_static/current\")\n run(\"ln -s {} /data/web_static/current\".format(folder_path))\n print(\"New version deployed!\")\n\n return(True)\n except BaseException:\n return (False)", "def do_deploy(archive_path):\n\n if not os.path.exists(archive_path):\n return False\n\n ret = True\n\n tmpfolder = put(archive_path, '/tmp/')\n\n if tmpfolder.failed:\n ret = False\n\n dirc = archive_path.replace(\".tgz\", \"\").replace(\"versions/\", \"\")\n dest = run('mkdir -p /data/web_static/releases/' + dirc + '/')\n\n if dest.failed:\n ret = False\n\n unpack = run('tar -xzf /tmp/' + dirc + '.tgz' +\n ' -C /data/web_static/releases/' + dirc + '/')\n\n if unpack.failed:\n ret = False\n\n clean = run('rm /tmp/' + dirc + '.tgz')\n\n if clean.failed:\n ret = False\n\n move = run('mv /data/web_static/releases/' + dirc +\n '/web_static/* /data/web_static/releases/' + dirc + '/')\n\n if move.failed:\n ret = False\n\n cleanfolder = run('rm -rf /data/web_static/releases/' + dirc +\n '/web_static')\n\n if cleanfolder.failed:\n ret = False\n\n rmold = run('rm -rf /data/web_static/current')\n\n if rmold.failed:\n ret = False\n\n new = run('ln -sf /data/web_static/releases/' + dirc +\n '/' + ' /data/web_static/current')\n\n if new.failed:\n ret = False\n\n if ret:\n print(\"New version deployed!\")\n\n return ret", "def upload():\n\n # Our credentials are only available from within the main repository and not forks.\n # We need to prevent uploads from all BUT the branches in the main repository.\n # Pull requests and master-branches of forks are not allowed to upload.\n is_pull_request = (\n (\"TRAVIS_PULL_REQUEST\" in os.environ and os.environ[\"TRAVIS_PULL_REQUEST\"] != \"false\") or\n \"APPVEYOR_PULL_REQUEST_NUMBER\" in os.environ\n )\n if is_pull_request:\n click.echo(\"Refusing to upload artifacts from a pull request!\")\n return\n\n if \"AWS_ACCESS_KEY_ID\" in os.environ:\n subprocess.check_call([\n \"aws\", \"s3\", \"cp\",\n \"--acl\", \"public-read\",\n DIST_DIR + \"/\",\n \"s3://snapshots.mitmproxy.org/{}/\".format(UPLOAD_DIR),\n \"--recursive\",\n ])\n\n upload_pypi = (\n TAG and\n \"WHEEL\" in os.environ and\n \"TWINE_USERNAME\" in os.environ and\n \"TWINE_PASSWORD\" in os.environ\n )\n if upload_pypi:\n whl = glob.glob(join(DIST_DIR, 'mitmproxy-*-py3-none-any.whl'))[0]\n click.echo(\"Uploading {} to PyPi...\".format(whl))\n subprocess.check_call([\n \"twine\",\n \"upload\",\n whl\n ])\n\n upload_docker = (\n (TAG or BRANCH == \"master\") and\n \"DOCKER\" in os.environ and\n \"DOCKER_USERNAME\" in os.environ and\n \"DOCKER_PASSWORD\" in os.environ\n )\n if upload_docker:\n docker_tag = \"dev\" if BRANCH == \"master\" else VERSION\n\n click.echo(\"Uploading Docker image to tag={}...\".format(docker_tag))\n subprocess.check_call([\n \"docker\",\n \"login\",\n \"-u\", os.environ[\"DOCKER_USERNAME\"],\n \"-p\", os.environ[\"DOCKER_PASSWORD\"],\n ])\n subprocess.check_call([\n \"docker\",\n \"push\",\n \"mitmproxy/mitmproxy:{}\".format(docker_tag),\n ])", "def _deploy_app():\n rsync_project(env.remote_directory, env.local_directory,\n exclude=['.git/', '*.pyc', 'tests.py', 'migrations/'])\n sudo('service installer_app restart')", "def archive(self, user: User, snapshot: str, path: str, **callback) -> Job:\n # Get the upload policy\n policy = snapshots_storage().generate_post_policy(path)\n url = policy.get(\"url\") if policy else None\n secrets = policy.get(\"fields\") if policy else None\n\n return Job.objects.create(\n project=self,\n creator=user,\n method=JobMethod.archive.name,\n params=dict(project=self.id, snapshot=snapshot, path=path, url=url,),\n secrets=secrets,\n description=f\"Archive project '{self.name}'\",\n **callback,\n )", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def do_deploy(archive_path):\n if not os.path.isfile(archive_path):\n return False\n\n try:\n file_name = archive_path[9:]\n file_n_short = file_name[:-4]\n curr_path = os.getcwd()\n full_path = curr_path + \"/\" + archive_path\n put(full_path, \"/tmp/\")\n run(\"mkdir -p /data/web_static/releases/\" + file_n_short)\n run(\"tar -xzf /tmp/\" + file_name + \" -C /data/web_static/releases/\" +\n file_n_short + \"/\")\n run(\"rm /tmp/\" + file_name)\n run(\"mv /data/web_static/releases/\" + file_n_short +\n \"/web_static/* /data/web_static/releases/\" + file_n_short + \"/\")\n run(\"rm -rf /data/web_static/current\")\n run(\"ln -s /data/web_static/releases/\" +\n file_n_short + \" /data/web_static/current\")\n return True\n except:\n print(\"error\")\n return False", "def do_deploy(archive_path):\n if not os.path.isfile(archive_path):\n return False\n with api.cd(\"/tmp\"):\n basename = os.path.basename(archive_path)\n root, ext = os.path.splitext(basename)\n opath = \"/data/web_static/releases/{}\".format(root)\n try:\n ppath = api.put(archive_path)\n if files.exists(opath):\n api.run(\"rm -rdf {}\".format(opath))\n api.run(\"mkdir -p {}\".format(opath))\n api.run(\"tar -xzf {} -C {}\".format(ppath[0], opath))\n api.run(\"rm -f {}\".format(ppath[0]))\n api.run(\"mv -u {}/web_static/* {}\".format(opath, opath))\n api.run(\"rm -rf {}/web_static\".format(opath))\n api.run(\"rm -rf /data/web_static/current\")\n api.run(\"ln -s {} /data/web_static/current\".format(opath))\n print(\"New version deployed!\")\n except:\n return False\n else:\n return True", "def zip_repo(src_path, dest_path):\n tar = tarfile.open(dest_path, \"w:gz\")\n for file_name in glob.glob(os.path.join(src_path, \"*\")):\n tar.add(file_name, os.path.basename(file_name))\n\n tar.close()", "def do_deploy(archive_path):\n if not os.path.exists(archive_path):\n return False\n\n file_ext = archive_path[archive_path.find('/') + 1:]\n file_name = archive_path[archive_path.find('/') + 1: -4]\n\n result = put(archive_path, '/tmp/' + file_ext)\n if result.failed:\n return False\n\n result = run('mkdir -p /data/web_static/releases/' + file_name + '/')\n if result.failed:\n return False\n\n result = run('tar -xzf /tmp/' + file_ext +\n ' -C /data/web_static/releases/' + file_name + '/')\n if result.failed:\n return False\n\n result = run('rm /tmp/' + file_ext)\n if result.failed:\n return False\n\n result = run('mv /data/web_static/releases/' + file_name +\n '/web_static/* /data/web_static/releases/' + file_name + '/')\n if result.failed:\n return False\n\n result = run('rm -rf /data/web_static/releases/' + file_name +\n '/web_static')\n if result.failed:\n return False\n\n result = run('rm -rf /data/web_static/current')\n if result.failed:\n return False\n\n result = run('ln -s /data/web_static/releases/' +\n file_name + '/ /data/web_static/current')\n if result.failed:\n return False\n\n print('New version deployed!')\n return True", "def do_deploy(archive_path):\n if os.path.isfile('{}'.format(archive_path)) is False:\n return False\n tgz_file = archive_path.split('/')[-1]\n storage_location = '/tmp/'\n new_location = '/data/web_static/releases/'\n upload = put('{}'.format(archive_path), storage_location)\n if upload is False:\n return False\n create_dir = run('mkdir -p {}'.format(new_location +\n tgz_file.replace('.tgz', '')))\n if create_dir is False:\n return False\n uncompress = run('tar -xzvf {} -C {}'.format(storage_location +\n tgz_file, new_location +\n tgz_file.replace('.tgz', '')))\n if uncompress is False:\n return False\n delete_file = run('rm -f {}'.format(storage_location + tgz_file))\n if delete_file is False:\n return False\n new_route = \"{}\".format(new_location + tgz_file.replace('.tgz', ''))\n move_files = run('mv {}/web_static/* {}'.format(new_route, new_route))\n if move_files is False:\n return False\n delete_folder = run('rm -rf {}/web_static'.format(new_route))\n if delete_folder is False:\n return False\n sym_link_name = '/data/web_static/current'\n delete_sym = run('rm -f {}'.format(sym_link_name))\n if delete_sym is False:\n return False\n create_sym = run('ln -sT {} {}'.format(new_location +\n tgz_file.replace('.tgz', ''),\n sym_link_name))\n if create_sym is False:\n return False\n return True", "def do_deploy(archive_path):\n if not exists(archive_path):\n return False\n fileNameExt = archive_path.split('/')[-1]\n fileName = fileNameExt.split(\".\")[0]\n result = put(archive_path, '/tmp/{}'.format(fileNameExt))\n if result.failed:\n return False\n result = run(\"rm -rf /data/web_static/releases/{}/\".format(fileName))\n if result.failed:\n return False\n result = run(\"mkdir -p /data/web_static/releases/{}/\".format(fileName))\n if result.failed:\n return False\n result = run(\"tar -xzf /tmp/{} -C /data/web_static/releases/{}/\"\n .format(fileNameExt, fileName))\n if result.failed:\n return False\n result = run(\"rm /tmp/{}\".format(fileNameExt))\n if result.failed:\n return False\n input = \"mv /data/web_static/releases/{}/web_static/*\\\n /data/web_static/releases/{}/\".format(fileName, fileName)\n result = run(input)\n if result.failed:\n return False\n result = run(\"rm -rf /data/web_static/releases/{}/web_static\"\n .format(fileName))\n if result.failed:\n return False\n result = run(\"rm -rf /data/web_static/current\")\n if result.failed:\n return False\n result = run(\"ln -s /data/web_static/releases/{}/ /data/web_static/current\"\n .format(fileName))\n if result.failed:\n return False\n print(\"New version deployed!\")\n return True", "def makeArchiveGitSubcommand(self, prefix, revision=u\"HEAD\", outputFilePath=None):\n command = \"archive --prefix=%s/ %s \"\n command = command % (prefix, revision)\n if outputFilePath is not None:\n command += \" -o %s\" % outputFilePath\n return command", "def do_pack():\n\n now = datetime.now()\n # format the name of the file with the timestamps\n now_year = now.year\n now_month = now.month\n now_day = now.day\n now_hour = now.hour\n now_minute = now.minute\n now_second = now.second\n # apply the format\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n now_year, now_month, now_day, now_hour, now_minute, now_second\n )\n # All archives must be stored in the folder versions\n local('mkdir -p versions')\n # execute locally the compression of the folder\n command = local(\"tar -cvzf \" + file_name + \" ./web_static/\")\n # return the archive path if the archive has been correctly generated\n if command.succeeded:\n return file_name\n else:\n return None", "def do_deploy(archive_path):\n if not os.path.isfile(archive_path):\n return False\n with api.cd('/tmp'):\n basename = os.path.basename(archive_path)\n root, ext = os.path.splitext(basename)\n outpath = '/data/web_static/releases/{}'.format(root)\n try:\n putpath = api.put(archive_path)\n if files.exists(outpath):\n api.run('rm -rdf {}'.format(outpath))\n api.run('mkdir -p {}'.format(outpath))\n api.run('tar -xzf {} -C {}'.format(putpath[0], outpath))\n api.run('rm -f {}'.format(putpath[0]))\n api.run('mv -u {}/web_static/* {}'.format(outpath, outpath))\n api.run('rm -rf {}/web_static'.format(outpath))\n api.run('rm -rf /data/web_static/current')\n api.run('ln -s {} /data/web_static/current'.format(outpath))\n print('New version deployed!')\n except:\n return False\n else:\n return True", "def push():\n local('hg push jvacx')", "def deploy(git_branch=None, restart_url='http://rnacentral.org', quick=False):\n if env.deployment == 'remote':\n deploy_remotely(git_branch, restart_url, quick)\n elif env.deployment == 'local':\n deploy_locally(git_branch, restart_url, quick)\n else:\n print('Check usage')", "def do_deploy(archive_path):\n if not os.path.isfile(archive_path):\n return False\n with api.cd(\"/tmp\"):\n basename = os.path.basename(archive_path)\n root, ext = os.path.splitext(basename)\n opath = \"/data/web_static/releases/{}\".format(root)\n try:\n ppath = api.put(archive_path)\n if files.exists(opath):\n api.run(\"rm -rdf {}\".format(opath))\n api.run(\"mkdir -p {}\".format(opath))\n api.run(\"tar -xzf {} -C {}\".format(ppath[0], opath))\n api.run(\"rm -f {}\".format(ppath[0]))\n api.run(\"mv -u {}/web_static/* {}\".format(opath, opath))\n api.run(\"rm -rf {}/web_static\".format(opath))\n api.run(\"rm -rf /data/web_static/current\")\n api.run(\"ln -sf {} /data/web_static/current\".format(opath))\n print(\"New version deployed!\")\n except:\n return False\n else:\n return True", "def deploy():", "def do_pack():\n date = datetime.datetime.now()\n archive = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(date.year,\n date.month,\n date.day,\n date.hour,\n date.minute,\n date.second)\n local('mkdir -p versions')\n check = local('tar -cvzf {} web_static'.format(archive))\n if check.failed:\n return None\n else:\n return archive", "def do_pack():\n\n now = datetime.now()\n time_now = now.strftime(\"%Y%m%d%H%M%S\")\n archive_name = \"versions/web_static_\" + time_now + \".tgz\"\n local('mkdir -p versions')\n archive_command = local(\"tar -zcvf \" + archive_name + \" web_static\")\n\n if archive_command.succeeded:\n return archive_name\n\n return None", "def createproject(project_name):\n app_clone_script = 'git clone https://github.com/jaarce/falcon-bp.git %s' % project_name\n subprocess.call(app_clone_script.split(' '))", "def do_deploy(archive_path):\n if not path.exists(archive_path):\n return False\n splitted = archive_path.split(\"/\")\n noexten = path.splitext(splitted[1])[0]\n\n try:\n put(archive_path, \"/tmp/\")\n run(\"mkdir -p /data/web_static/releases/{}\".format(noexten))\n run(\"tar -xzf /tmp/{} -C /data/web_static/releases/{}/\"\n .format(splitted[1], noexten))\n run(\"rm /tmp/{}\".format(splitted[1]))\n run(\"mv /data/web_static/releases/{}/web_static/* \\\n /data/web_static/releases/{}/\".format(noexten, noexten))\n run(\"rm -rf /data/web_static/releases/{}/web_static\".format(noexten))\n run(\"rm -rf /data/web_static/current\")\n run(\"ln -s /data/web_static/releases/{}/\\\n /data/web_static/current\".format(noexten))\n except Exception:\n return False", "def push():\n branch = git.current_branch().name\n shell.run('git push -u origin {}'.format(branch))", "def upload(self, connection):\n if not self.already_deployed(connection):\n if self.config.project_type == \"java\":\n print(blue('Pushing jar to nexus server'))\n connection.local('mvn deploy')\n self._already_deployed = True\n else:\n raise Exception(f\"Unsupported project type: {self.config.project_type}\")", "def deploy_code(ref=None):\n ref = ref or env.default_deploy_ref\n puts(\"Deploying %s\" % ref)\n if not files.exists(env.code_dir):\n sudo('git clone %s %s' % (env.git_url, env.code_dir))\n with cd(env.code_dir):\n sudo('git fetch && git reset --hard %s' % ref)", "def do_deploy(archive_path):\n if not archive_path:\n return False\n if not os.path.exists(archive_path):\n return False\n\n filename = archive_path.split(\"/\")[-1]\n put(archive_path, \"/tmp/{}\".format(filename))\n\n run(\"sudo mkdir -p /data/web_static/releases/{}\".format(filename))\n run(\"sudo tar -xzf /tmp/{} -C /data/web_static/releases/{}\"\n .format(filename, filename))\n run(\"sudo rm /tmp/{}\".format(filename))\n run(\"sudo mv /data/web_static/releases/{}/web_static/*\"\n \" /data/web_static/releases/{}\"\n .format(filename, filename))\n run(\"sudo rm -rf /data/web_static/releases/{}/web_static\"\n .format(filename))\n run(\"sudo rm -rf /data/web_static/current\")\n run(\"sudo ln -s /data/web_static/releases/{}/ /data/web_static/current\"\n .format(filename))\n print(\"New version successfully deployed!\")", "def submit(self, root=None, force=False, repo=None):\n import ambry.util as du\n \n if repo:\n self.repo_name = repo\n self.set_api()\n \n import os\n from os.path import basename\n \n ckb = self.remote.update_or_new_bundle_extract(self.bundle)\n \n sent = set()\n \n self.remote.put_package(ckb)\n \n for doc in self.bundle.config.group('about').get('documents',[]):\n self.store_document(ckb, doc)\n\n zip_inputs = {}\n\n for extract_data in self.generate_extracts(root=root):\n\n zip = extract_data.get('zip', False)\n will_zip = False\n \n if zip == 'dir':\n zip_inputs[os.path.dirname(extract_data['path'])] = extract_data\n will_zip = True\n elif zip == 'file':\n zip_inputs[extract_data['path']] = extract_data\n will_zip = True\n\n file_ = self._do_extract(extract_data, force=force)\n \n if will_zip:\n self.bundle.log(\"{} will get submitted as a zip\".format(file_))\n elif file_ not in sent:\n r = self._send(ckb, extract_data,file_)\n sent.add(file_)\n url = r['ckan_url']\n self.bundle.log(\"Submitted {} to {}\".format(basename(file_), url))\n else:\n self.bundle.log(\"Already processed {}, not sending.\".format(basename(file_)))\n \n \n zip_outputs = self.zip(zip_inputs.keys() )\n \n \n print zip_outputs\n \n for in_zf, out_zf in zip_outputs.items():\n extract_data = zip_inputs[in_zf]\n extract_data['name'] = extract_data['zipname'] if 'zipname' in extract_data else extract_data['name']\n r = self._send(ckb, extract_data,out_zf)\n \n url = r['ckan_url']\n self.bundle.log(\"Submitted {} to {}\".format(basename(out_zf), url))\n \n \n return True", "def ingest_project(project_id, version=None):\n container_path = project_id\n if version:\n container_path = '{}v{}'.format(container_path, str(version))\n fedora_post(container_path)\n project_meta = format_metadata_for_fedora(project_id, version=version)\n res = fedora_update(container_path, project_meta)\n upload_manifest_other(project_id, version=version)\n return res", "def prepare_deploy():\n from fabdeploy.django import test as django_test\n django_test()\n git.add_commit_pull()\n git.push()", "def deploy(branch=None, to='master', keep=False, heroku_app=HEROKU_APP):\n if branch is None:\n proc = subprocess.run(['git', 'branch'], stdout=subprocess.PIPE)\n lines = [\n line[2:]\n for line in proc.stdout.decode('utf8').splitlines()\n if line.startswith('* ')\n ]\n branch = lines[0]\n\n assert branch != to\n\n subprocess.run(['git', 'checkout', to])\n subprocess.run(['git', 'merge', branch])\n if not keep:\n subprocess.run(['git', 'branch', '--delete', branch])\n subprocess.run(['git', 'push'])\n\n migrate(heroku_app)", "def git_sync(commit_ish, force, last_tag, reset, url, directory):\n git_sync_(url, directory, commit_ish, force=force, last_tag=last_tag, reset=reset)", "async def main():\n \n # workflow status\n global status\n\n # Mode says which objects must be archived: DB dump, source files or both.\n try:\n mode=sys.argv[1]\n except IndexError:\n mode = 'all'\n\n # queue of files to be archived\n files_to_upload = deque()\n \n logger.trace(\"Archiving ...\")\n # Tasks to archive files and database dump\n list_of_threads = get_list_of_threads(mode=mode)\n\n tar_names = await asyncio.gather(*list_of_threads)\n\n # Clear names list, removing None elements if exist\n tar_names = [name for name in tar_names if name]\n\n files_to_upload.extend(tar_names)\n logger.trace(\"Ok.\")\n\n logger.trace(\"Uploading ...\")\n\n # Connect to the ftp-server and upload the archived files.\n await upload_to_ftp_server(host=FTP.SERVER.value,\n port=FTP.PORT.value,\n login=FTP.LOGIN.value,\n password=FTP.PASSWORD.value,\n files=files_to_upload)\n\n # Remove archived and dump files on the server site.\n clear_garbage(mode=mode, files=tar_names)\n\n # Check the workflow status. If it's not empty, send an error email.\n if len(status) > 0 and ERROR_NOTIFICATION_BY_EMAIL:\n backup_email()", "def _push(project: Project):\n repo = project.repo\n remote_name = project.config.get('github.remote')\n remote = repo.remote(remote_name)\n result = _call_remote_push(remote)\n failures = lfilter(complement(did_git_push_succeed), result)\n if failures:\n for push_info in failures:\n logger.error(\n 'Failed to push ref {from_ref} to {to_ref}'\n .format(from_ref=push_info.local_ref.name,\n to_ref=push_info.remote_ref.name))\n raise BalletError('Push failed')", "def do_pack():\n\n local(\"mkdir -p versions\")\n current = dt.now()\n current = current.now()\n tgz = \"web_static_{}.tgz\".format(current.strftime(\"%Y%m%d%H%M%S\"))\n working = local(\"tar -cavf versions/{} web_static\".format(tgz))\n\n if working.failed:\n return None\n else:\n return \"versions/{}\".format(tgz)", "def push(api_client, folder, verbose):\n local_folder, remote_folder = _get_local_and_remote_folders(folder)\n workspace = WorkspaceApi(api_client)\n\n def work():\n workspace.import_workspace_dir(local_folder, remote_folder,\n True, False, verbose=verbose)\n if not verbose:\n with loadingbar(msg=\"Pushing to {}\".format(remote_folder), width=10,\n fill_char=\"o\", interval=.25):\n work()\n else:\n work()", "def create_from_git(self, token: Any, repo: str):\n params = [token, repo, ]\n method = \"ProjectAPI.CreateFromGit\"\n self.__add_request(method, params, lambda payload: Definition.from_json(payload))" ]
[ "0.75925666", "0.6769014", "0.6449582", "0.6398534", "0.6263004", "0.6191235", "0.6138252", "0.61108285", "0.60195297", "0.60129213", "0.5945884", "0.5934544", "0.59011316", "0.58871365", "0.5878439", "0.586114", "0.58004034", "0.57955575", "0.5776379", "0.5756278", "0.5727968", "0.57231957", "0.5685913", "0.5675735", "0.56729144", "0.56666195", "0.5657752", "0.5652568", "0.56432843", "0.56374943", "0.56348205", "0.56194335", "0.5604499", "0.5584393", "0.55796677", "0.5556988", "0.55504614", "0.55457765", "0.5519987", "0.55139756", "0.55119437", "0.5510205", "0.5509971", "0.5490029", "0.5488811", "0.5488229", "0.54876524", "0.5486082", "0.5466931", "0.54387605", "0.54291666", "0.54244506", "0.5423888", "0.5419647", "0.5415554", "0.5400006", "0.53957707", "0.5394312", "0.539382", "0.53866136", "0.53840333", "0.53769344", "0.5374031", "0.53734404", "0.5371691", "0.5369572", "0.5354824", "0.5354824", "0.5354824", "0.534213", "0.53406835", "0.53385866", "0.5334302", "0.5334022", "0.53323936", "0.532982", "0.5327669", "0.53270805", "0.53253096", "0.5322829", "0.5322127", "0.5321929", "0.53167355", "0.53148586", "0.5311343", "0.530484", "0.5304707", "0.5297166", "0.52957004", "0.52921575", "0.52921134", "0.52888006", "0.52862597", "0.52785796", "0.5278272", "0.5271172", "0.52709776", "0.5266298", "0.52605194", "0.52601236" ]
0.72552186
1
Search existing spider names in a project
def search_spider_names(project, apikey, name=''): payload = {'project': project, 'apikey': apikey, 'spider': name} req = requests.get(DASH_API_URL + 'spiders/list.json', params=payload) if req.status_code == 200: return [s.get('id') for s in req.json().get('spiders', [])] return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dorkScanner():\n pysearch.PySearch()\n openfile = open(\"sites.txt\", 'r')\n urls = openfile.read()\n openfile.close()\n return urls", "def _search(self, log, progressbar):\n self._urls = []\n for filename in os.listdir(self._path):\n url = 'file:////' + filename\n self._urls.append(url)\n self._urls.sort()", "def addCrawler(name):\n global allCrawlerNames\n if name == 'scihub':\n allCrawlers.append(ScihubCrawler())\n allCrawlerNames = [ c.name for c in allCrawlers ]", "def search(name):\n try:print(f'Searching for {name}...');os.system(f'python -m pip search {name}')\n except Exception as e:print(\"something went wrong\\n{e}\")", "def go_search(self, driver, pid):\n return [self.search_url(website, pid)]", "def test_search_checkname(self):\n self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')\n self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')", "def query_project(self):\n\n # Find stylesheets.\n found = False\n for filename in self.project.namelist():\n if os.path.basename(filename) == 'styles.xml':\n found = True\n print(filename)\n if not found:\n print(\"not found!\")", "def test_search_project(self):\n title = Project.search_project(\"dee\")\n self.assertTrue(len(title) > 0)", "def search(self, name: str) -> \"Navaids\":\n return self.__class__(\n self.data.query(\n \"description == @name.upper() or name == @name.upper()\"\n )\n )", "def search():\n pass", "def main():\n domain_list = []\n base_url = \"http://localhost:9200/latest-tor/_search?pretty&size=9000&_source=title,domain\"\n keywords_list = ['preteen', 'loli', 'lolita', 'jailbait', 'pthc', 'best cp',\n '\"child porn\"', '\"kid porn\"', '\"child sex\"', '\"cp video\"',\n '\"nude children\"', '\"cp porn\"', '\"free child porn\"', 'kinderporn',\n '\"child rape\"', '\"toddler porn\"', '\"kids videos\"', '\"cp videos\"',\n 'lolilust', '\"pedo porno\"', '\"pedo content\"', 'underage', '\"cp pack\"',\n 'loliporn', 'pedofamily', '\"cp database\"', '\"pedo webcams\"', 'lolitacity']\n '\"xxx child\"', '\"xxx underage\"', '\"young forbidden\"']\n search_terms = []\n for index, term in enumerate(keywords_list):\n search_terms.append(term)\n if len(search_terms) >= 10 or index + 1 == len(keywords_list):\n url = base_url + \"&q=(\" + \" OR \".join(search_terms).replace(\" \", \"%20\") + \")\"\n search(url, domain_list)\n search_terms = []", "def parse_search_page(self, response):\n ###############################################################\n search_name_url_xpath = '//*[@id=\"dnn_dnnLEFTMENU_RadPanel1\"]/ul/li/div/ul/li[2]/a/@href'\n ###############################################################\n search_name_url = response.xpath(search_name_url_xpath).extract_first()\n yield scrapy.Request(response.urljoin(search_name_url), callback = self.parse_search_name_page)", "def spiders(args):\n _projects = lib.get_projects(\n args.target, args.project, username=args.username, password=args.password\n )\n for project in _projects:\n project_spiders = lib.get_spiders(\n args.target, project, username=args.username, password=args.password\n )\n if not args.verbose:\n print(f\"{project}:\")\n if project_spiders:\n print(indent(\"\\n\".join(project_spiders), INDENT_PREFIX))\n else:\n print(INDENT_PREFIX + \"No spiders.\")\n elif project_spiders:\n print(\"\\n\".join(f\"{project} {x}\" for x in project_spiders))", "def __call__(self, *paths):\n\n for item in self.site.items:\n if item.is_page() and item.match(*paths):\n yield item", "def search_entries(search):\n _, filenames = default_storage.listdir(\"entries\")\n result = []\n for filename in filenames: \n if filename.endswith(\".md\"):\n nameonly = re.sub(r\"\\.md$\", \"\", filename)\n \n if nameonly.lower() == search.lower():\n #print(\"name only :\", nameonly)\n #print(\"search :\", search)\n return (nameonly)\n elif search.lower() in nameonly.lower():\n result.append(nameonly)\n return(result)", "def test_search_in_name(self, es_with_collector, name_term, matched_company_name):\n CompanyFactory(\n name='whiskers and tabby',\n trading_names=['Maine Coon', 'Egyptian Mau'],\n )\n CompanyFactory(\n name='1a',\n trading_names=['3a', '4a'],\n )\n es_with_collector.flush_and_refresh()\n\n url = reverse('api-v3:search:basic')\n response = self.api_client.get(\n url,\n data={\n 'term': name_term,\n 'entity': 'company',\n },\n )\n\n match = Company.objects.filter(name=matched_company_name).first()\n if match:\n assert response.data['count'] == 1\n assert len(response.data['results']) == 1\n assert response.data['results'][0]['id'] == str(match.id)\n assert [{'count': 1, 'entity': 'company'}] == response.data['aggregations']\n else:\n assert response.data['count'] == 0\n assert len(response.data['results']) == 0", "def search(self, title):\n close_matches = self.get_close_matches_by_title(title)\n count = 0\n for item in self.item_list.values():\n if item.title in close_matches:\n print(item)\n count += 1\n if count == 0:\n print(\"No result found.\")", "def finddocname(string):\r\n for x in doclist:\r\n foundvar = f\"-->Doc name = {x.title()}\"\r\n if x in string:\r\n print(foundvar)\r\n break", "def filter_projects():\n with open('../results/01.crawling/01.project_ci_services.json', 'r') as infile:\n projects = json.load(infile)\n tr_projects = []\n for project, value in projects.items():\n if \"GitHub\" in value or \"Travis\" in value:\n tr_projects.append(project)\n return tr_projects", "def run_search(self, links):\n for s in links:\n self._run_command(\" s \\\"{}\\\" \\n\".format(s))", "def find(self, egg):", "def name_search(self, search):\n if isinstance(search, str):\n name_re = re.compile(search)\n else:\n name_re = search\n matches = [\n entry\n for entry in self\n if entry is not None and name_re.search(entry.name)\n ]\n return matches", "def runSpider(spider, searchterm = None, fullink = None, spiderbotid = -1):\n sclogic.runSpider(spider, searchterm, fullink, spiderbotid)", "def Collection_search_name(C:list, name:str) -> list:\r\n restaurants = []\r\n for r in C:\r\n for dish in r.menu:\r\n if name in dish.name:\r\n restaurants.append(r)\r\n return restaurants", "def get_items_to_find(self):\n self.items_to_find = ['sole', 'farina', 'innaffiatoio']", "def find_with_deps(self, package_names):", "def search(self, term):", "def search(self, *args, **kwargs):", "def __init__(self, name=\"\"):\n super().__init__(\"search\", name)", "def search_service(self, name_filter):\n rs=search_service(name_filter)\n for el in rs:\n print(el)", "def test_several_folders(self):\n spider_path = 'tests/sample_spiders/'\n test_data = [\n ('valid_metadata', 1),\n ('no_metadata', 0),\n ('incomplete_metadata', 0),\n ('two_spiders_one_file', 0),\n ('no_basespider_inheritance', 0)\n ]\n\n m = SpiderManager()\n for spidername, valid_spiders in test_data:\n path = spider_path + spidername\n os.environ['SPIDER_PATH'] = path\n\n m.load(path)\n spiders = m.get_spiders()\n\n self.assertEqual(type(spiders), list)\n self.assertEqual(len(spiders), valid_spiders)", "def search_main() -> None:\n\n logger.info(\"Starting search\")\n links = run_search(grab_search_info())\n if links:\n logger.info(\"Printing links\")\n for key in links:\n print(f\"{key.upper()}: {links[key]}\")", "def _search_old_applications(self,path,filename=None):\n\t\t# iterate through the applications and their top \n\t\t# level directories and search for the given 'path'\n\t\tresult_objs = []\n\t\tfor app,dirs in self.OLD_APPS.iteritems():\n\t\t\tfor top_lvl in dirs:\n\t\t\t\tpath_folder = \"%s/%s\" % (top_lvl,path)\n\t\t\t\tif filename:\n\t\t\t\t\tobj = Tina.tina_find(\n\t\t\t\t\t\tapplication=app,\n\t\t\t\t\t\tpath_folder=path_folder,\n\t\t\t\t\t\tpattern=filename,\n\t\t\t\t\t\tlist_all=True,\n\t\t\t\t\t\tskip_filter=self.skip_filter)\n\t\t\t\telse:\n\t\t\t\t\t#print \"Searching for:\",app,path_folder\n\t\t\t\t\tobj = Tina.tina_find(\n\t\t\t\t\t\tapplication=app,\n\t\t\t\t\t\tpath_folder=path_folder,\n\t\t\t\t\t\tlist_all=True,\n\t\t\t\t\t\tskip_filter=self.skip_filter)\n\t\t\t\tif obj:\n\t\t\t\t\tresult_objs.append(obj)\n\t\treturn result_objs", "def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', '').get('hits', []):\n return\n for hit in resp.json()[\"hits\"][\"hits\"]:\n domain = hit.get(\"_source\", {}).get(\"domain\", \"\")\n if not domain:\n continue\n if not domain in domain_list:\n domain_list.append(domain)\n #print(hit[\"_source\"].get(\"title\", \"\").encode(\"ascii\",\"ignore\"))\n if domain not in ALLOWED_DOMAINS:\n print(domain)", "def search(name_search: str, limit: int, no_cache: bool, log_level: bool, as_json: bool, threads: int) -> None:\n\n if no_cache:\n cache.clear()\n\n logging.basicConfig(level=log_level)\n\n try:\n found_packages = find_packages(name_search, click.progressbar, threads)\n except AssertionError as e:\n print(e)\n return\n\n sorted_packages = list(reversed(sorted(found_packages, key=attrgetter('score'))))\n if limit:\n sorted_packages = sorted_packages[:limit]\n\n print_func = _print_as_json if as_json else _print_as_text\n print_func(sorted_packages)", "async def team_search(self, ctx: commands.Context, username: str):\n all_usernames = {team_id: team.username for team_id, team in self.teams.items()\n if team is not None}\n suggestions = []\n log.info(repr(fuzzywuzzy.process.extract(\n username, all_usernames, limit=5)))\n for fuzz_username, rating, fuzz_id in fuzzywuzzy.process.extract(\n username, all_usernames, limit=5):\n if rating < 50:\n break\n fuzz_team = self.teams[fuzz_id]\n suggestions.append(\n f'(ID: **{fuzz_team.team_id}**) **{fuzz_team.display_name[:40]}**'\n f' -- {len(fuzz_team.users)} registered members')\n if suggestions:\n await ctx.send('\\n'.join(suggestions))\n else:\n await ctx.send(f\"Couldn't find any teams whose usernames resembled `{username}`\")", "def searchItems(name, allPages = False):\n return Gw2Spidy._paginatedRequest(allPages, 'item-search', name)", "def search_past_items(request):\n query = request.GET.get('q')\n\n if query:\n results = PastSold.objects.filter(Q(name__icontains=query) | Q(description__icontains=query))\n else:\n results = PastSold.objects.all()\n pages = pagination(request, results, num=4)\n\n context = {\n 'items': pages[0],\n 'page_range': pages[1]\n }\n\n return render(request, \"index.html\", context)", "def assets_search(ctx, text, pretty):\n ocean = ctx.obj['ocean']\n response = ocean.search(text, pretty)\n echo(response)", "async def search(self, *args, **kwargs):\n pass", "def __ui_search_discipline_by_name(self, search):\n try:\n result = self.__discipline_controller.search_by_name(search)\n for discipline in result:\n print(str(discipline))\n\n except RepositoryException as re:\n print(re)\n return", "def _search(self, log, progressbar):\n # FIXME: This should be part of the initialization.\n # The urls() method must return a list.\n self._urls = {}\n\n # Can't allow an empty search string; the repository is too big and\n # will return too many files. An empty search string is legal because\n # local repositories are small, and an empty string asks for all the\n # files.\n if len(self._search_params) == 0:\n return\n\n esgf_node = self._repo_parameters['search_node']\n conn = SearchConnection(esgf_node, distrib=True)\n ctx = conn.new_context(**self._search_params)\n hit_count = ctx.hit_count\n\n # Each search clears the files from before. The pyesgf library allows\n # for searches to be refined. Consider utilizing that capability here.\n if hit_count > 0:\n progressbar.start(hit_count)\n# self._variable = self._search_params['variable']\n datasets = ctx.search()\n i = 1\n for dsresult in datasets:\n if 'variable' in self._search_params:\n remotefiles = dsresult.file_context().search(\n variable=self._search_params['variable'])\n else:\n remotefiles = dsresult.file_context().search()\n msg = \"Searching %s of %s. %s files.\" % (i, hit_count, len(remotefiles))\n log.debug(msg)\n \n for remotefile in remotefiles:\n try:\n urlobj = urlparse(remotefile.opendap_url)\n filename = urlobj.path.split('/')[-1]\n self._urls[filename] = remotefile.opendap_url\n except AttributeError:\n print \"Missing OPeNDAP URL found.\"\n i += 1\n progressbar.update(msg)", "def scrape_BI(url):\n response = requests.get(url)\n soup = BeautifulSoup(response.text)\n companies = soup.find_all('h3', class_='slide-title')\n #names = []\n driver = init_driver()\n for company in companies[:]:\n name = company.getText().strip()\n # if \" \" in name:\n # name.replace(' ','+')\n html_code = load_google(driver, name)\n #name, address = scrape_google(html_code)\n url = scrape_google(html_code)\n print(name,url)\n #names.append(name)\n driver.quit()\n #print(names)", "def scan(self):\n\n # Check for whether session is still alive\n if not self.__projects:\n return\n\n Console.info(\"Scanning projects...\")\n Console.indent()\n\n for project in self.__projects:\n project.scan()\n\n for postscan in self.__postscans:\n postscan()\n\n Console.outdent()", "def iter_spider_classes(module):\n ...", "def search(self, name, provider='thetvdb'):\n which = [self.providers[provider]]\n results = yield from self._invoke_providers('search', name, which=which, bubble=True)\n return list(itertools.chain.from_iterable(l for p, l in results))", "def test_filter_name():\n repo_list = filter_repos(fixtures.config_dict_expanded, name=\".vim\")\n\n assert len(repo_list) == 1\n for r in repo_list:\n assert r['name'] == '.vim'", "def search(self,path,key_words):\t#key_words must be tuple\n\t\ttry:\n\t\t\tall=os.walk(path,False)\t#os.walk() is a generator , the return is a tuple which is (dirpath,dirnames,filenames)\n\t\texcept:\n\t\t\tpass\n\t\telse:\n\t\t\tfor item in all:\n\t\t\t\tfilepath=item[0]\n\t\t\t\tfor filename in item[2]:\n\t\t\t\t\tfor key_word in key_words:\t#find all key_word\n\t\t\t\t\t\tif key_word in filename.lower():\t#ignore case of word , and only search filename\n\t\t\t\t\t\t\tself.result.append(os.path.join(filepath,filename))", "def contains(name):", "def scrap_keywords():\n ParScr = ParallelScraper()\n ParScr.create_and_run_threads()\n return", "def find_all(client):\n return list(map(lambda s: Site(s), client.get_api_resource(\"self/sites\")))", "def search(request):\n if 'find_project' in request.GET and request.GET['find_project']:\n project_name=request.GET.get('find_project')\n \n searched_project=Project.search_project(project_name)\n \n return render(request,'search_results.html',{'searched_project':searched_project})", "def search(self, word):", "def search():\n url = create_search_url()\n links = make_selenium_search(url)\n\n return links", "def find_names(s):\n \"*** YOUR CODE HERE ***\"", "def searchClientProject(self, name):\n for client in self.getClients():\n try:\n for project in self.getClientProjects(client['id']):\n if project['name'] == name:\n return project\n except Exception:\n continue\n\n print('Could not find client by the name')\n return None", "def find(self, name):\n return Search(self.request).find(name)", "def advanced_search(self, pattern):\n pass", "def find_all_by_name(folder, name):\n # return all entities by running the generator to it's end\n return list(find_by(folder, lambda e: e.name == name))", "def _find(self, key, items, places, human_name, join, multi):\r\n if key in self:\r\n return self[key]\r\n\r\n human_name = human_name or key\r\n\r\n # expand env variables in `places` and split on colons\r\n places = itertools.chain.from_iterable(os.path.expandvars(p).split(os.pathsep) for p in places)\r\n places = map(os.path.expanduser, places)\r\n\r\n glob_places = itertools.chain.from_iterable(glob(p) for p in places)\r\n \r\n print 'Searching for', human_name, '...',\r\n results = []\r\n for p in glob_places:\r\n for i in items:\r\n path = os.path.join(p, i)\r\n if os.path.exists(path):\r\n result = path if join else p\r\n if not multi:\r\n print colorize(result, 'green')\r\n self[key] = result\r\n return result\r\n results.append(result)\r\n\r\n if results:\r\n if len(results) > 1:\r\n formatted_results = ''.join(['\\n - ' + x for x in results])\r\n print colorize('found multiple: %s' % formatted_results, 'green')\r\n else:\r\n print colorize(results[0], 'green')\r\n\r\n self[key] = results\r\n return results\r\n\r\n print colorize('FAILED', 'red')\r\n raise Abort(\"%s not found. Searched in following places: %s\" %\r\n (human_name, ''.join(['\\n - ' + p for p in places])))", "def search(self, values: dict):\n self.results.clear()\n self.matches, self.records = 0, 0\n # Extensions to be ignored.\n if values[\"-EXT-\"].endswith(\";\"):\n values[\"-EXT-\"] = values[\"-EXT-\"][:-1]\n if values[\"-DIR-\"].endswith(\";\"):\n values[\"-DIR-\"] = values[\"-DIR-\"][:-1]\n ignore_extensions = tuple(values[\"-EXT-\"].split(\";\")) \\\n if values[\"-EXT-\"] else ()\n # Folders to be ignored.\n ignore_folders = tuple(\"/\" + folder + \"/\"\n for folder in values[\"-DIR-\"].split(\";\")\n if values[\"-DIR-\"])\n \n # Check whether to ignore or search dot files/folders\n if values[\"-DOT-\"]:\n ignore_folders = (\"/.\",) + ignore_folders\n \n if values[\"CONTAINS\"]:\n function = self.contains\n elif values[\"STARTSWITH\"]:\n function = self.startswith\n else:\n function = self.endswith\n \n search_term = values[\"TERM\"].lower()\n for path, files in self.file_index:\n if any(ignored_folder in path + \"/\"\n for ignored_folder in ignore_folders):\n continue\n for file in files:\n if file.endswith(ignore_extensions) or \\\n values[\"-DOT-\"] and file.startswith(\".\"):\n continue\n self.records += 1\n if function(file.lower(), search_term):\n result = os.path.join(path, file)\n self.results.append(result)\n self.matches += 1\n \n with open(\"search_results.txt\", \"w\") as f:\n f.writelines(self.results)", "def stract_scans(self, projects):\n pass", "def search_title(self):\n\t\tnew_name = self.removez_all(self.init_str)\n\t\tresult = self.search_ext(new_name)\n\t\tresult = self.search_encoder(result)\n\t\tresult = self.search_quality(result)\n\t\tresult = self.search_codec(result)\n\t\tresult = self.search_lang(result)\n\t\tresult = self.search_version(result)\n\t\tresult = self.search_source(result)\n\t\tresult = self.search_audio(result)\n\t\tresult = self.search_year(result)\n\t\tresult = result.replace('...', '.')\n\t\tresult = result.replace('..', '.')\n\t\tself.title = self.remove_lasts_dots(result)", "def search_runs(*args, **kwargs):\n return fluent.search_runs(*args, **kwargs)", "def _search(dork): \n retVal = [] \n paths = [] \n\n if not dork: \n return None \n\n headers = {} \n\n headers[HTTP_HEADER.USER_AGENT] = dict(conf.httpHeaders).get(HTTP_HEADER.USER_AGENT, DUMMY_SEARCH_USER_AGENT) \n headers[HTTP_HEADER.ACCEPT_ENCODING] = HTTP_ACCEPT_ENCODING_HEADER_VALUE \n\n gpage = conf.googlePage if conf.googlePage > 1 else 1 \n\n#polluted by xi4okv QQ£º48011203 \n\n for gpage in xrange(1,10): \n logger.info(\"using search result page #%d\" % gpage) \n\n url = \"https://m.baidu.com/s?\" \n url += \"word=%s&\" % urlencode(dork, convall=True) \n url += \"&pn=%d\" % ((gpage - 1) * 10) \n\n try: \n req = urllib2.Request(url, headers=headers) \n conn = urllib2.urlopen(req) \n\n requestMsg = \"HTTP request:\\nGET %s\" % url \n requestMsg += \" %s\" % httplib.HTTPConnection._http_vsn_str \n logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg) \n\n page = conn.read() \n code = conn.code \n status = conn.msg \n\n responseHeaders = conn.info() \n page = decodePage(page, responseHeaders.get(\"Content-Encoding\"), responseHeaders.get(\"Content-Type\")) \n #print page \n\n responseMsg = \"HTTP response (%s - %d):\\n\" % (status, code) \n\n if conf.verbose <= 4: \n responseMsg += getUnicode(responseHeaders, UNICODE_ENCODING) \n elif conf.verbose > 4: \n responseMsg += \"%s\\n%s\\n\" % (responseHeaders, page) \n\n logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg) \n except urllib2.HTTPError, e: \n pass \n\n urls = [urllib.unquote(match.group(0) or match.group(1)) for match in re.finditer(GOOGLE_REGEX, page, re.I)] \n #retVal = re.findall(GOOGLE_REGEX, page, re.I) \n\n import urlparse \n\n for url in urls: \n urls_pat = re.compile(r\"http://(.*)[^']\") \n aurl = re.findall(urls_pat, url) \n if \"?\" in url and \"baidu\" not in url: \n xpath = urlparse.urlparse(url).path \n if xpath not in paths: \n paths.append(xpath) \n retVal.append(aurl[0]) \n\n #print retVal \n\n return retVal", "def gen_find(filepat, top):\n for path, dir_list, file_list in os.walk(top):\n for name in fnmatch.filter(file_list, filepat):\n yield os.path.join(path, name)", "def find_all(self):", "def name_search(self, name, args=None, operator='ilike', limit=1000):\n args = self.compute_domain_args(args)\n recs = self.search([('name', operator, name)] + args, limit=limit)\n return recs.name_get()", "def search_brands_by_name(mystr):\n brands = Brand.query.filter(Brand.name.like('%'+mystr+'%')).all()\n return brands", "def search(request, is_my_list=\"False\"):\n\n search_type = request.GET.get(\"submit\")\n if search_type:\n\n # get query field\n query = ''\n if request.GET.get(search_type):\n query = request.GET.get(search_type)\n\n proj_ids = []\n cod_ids = []\n\n valid_searches = [constants.STRING_TITLE, constants.STRING_DESCRIPTION, constants.STRING_PROTOCOL,\n constants.STRING_CODER, constants.STRING_AREA, constants.STRING_WORKINGGROUP]\n\n search_in_all = True\n for v in valid_searches:\n if v in request.GET:\n search_in_all = False\n break\n\n if search_in_all or request.GET.get(constants.STRING_TITLE):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.title.lower():\n cod_ids.append(cod.id)\n\n if search_in_all or request.GET.get(constants.STRING_DESCRIPTION):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.additional_information.lower():\n cod_ids.append(cod.id)\n\n if request.GET.get(constants.STRING_PROTOCOL):\n proj_ids += ProjectContainer.objects.filter(protocol__icontains=query).values_list('id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_CODER):\n for pr in ProjectContainer.objects.all():\n for cd in pr.codings.all():\n user = Person.objects.using('datatracker').get(id=cd.coder)\n if query.lower() in user.name.lower():\n proj_ids.append(pr.id)\n break\n\n if search_in_all or request.GET.get(constants.STRING_AREA):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(DocAlias.objects.using('datatracker').filter(name__in=keys).values_list(\n 'document__group__parent__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n # ids += ProjectContainer.objects.filter(docs__document__group__parent__name__icontains=query).values_list(\n # 'id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_WORKINGGROUP):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(\n DocAlias.objects.using('datatracker').filter(name__in=keys).values_list('document__group__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n \n if cod_ids:\n cod_ids = list(set(cod_ids))\n proj_ids += ProjectContainer.objects.filter(codings__id__in=cod_ids).values_list('id', flat=True)\n project_containers = ProjectContainer.objects.filter(id__in=list(set(proj_ids)))\n \n request.session[constants.ALL_CODINGS] = cod_ids\n request.session[constants.ALL_PROJECTS] = project_containers\n\n request.session[constants.MAINTAIN_STATE] = True\n\n return HttpResponseRedirect(\n settings.CODESTAND_PREFIX + '/codestand/matches/show_list/' + \n is_my_list + '/{0}/'.format(constants.ATT_CREATION_DATE) + 'True')\n\n else:\n return render_page(request, constants.TEMPLATE_MATCHES_SEARCH, {\n \"form\": SearchForm()\n })", "def main():\n argument_parser = argparse.ArgumentParser()\n argument_parser.add_argument(\"name\", nargs=\"+\",\n help=\"DNS name(s) to look up\")\n argument_parser.add_argument(\"-v\", \"--verbose\",\n help=\"increase output verbosity\",\n action=\"store_true\")\n program_args = argument_parser.parse_args()\n fuckall = []\n for a_domain_name in program_args.name:\n if a_domain_name not in fuckall:\n print_results(collect_results(a_domain_name))\n fuckall.append(a_domain_name)", "def test_name_test_queries(self):\n url = self.get_url(self.study.pk)\n for query in self.TEST_NAME_QUERIES.keys():\n response = self.client.get(url, {'q': query})\n returned_pks = get_autocomplete_view_ids(response)\n expected_matches = self.TEST_NAME_QUERIES[query]\n self.assertEqual(len(returned_pks), len(expected_matches),\n msg='Did not find correct number of matches for query {}'.format(query))\n # Make sure the matches found are those that are expected.\n for expected_name in expected_matches:\n name_queryset = models.SourceDataset.objects.filter(dataset_name__regex=r'^{}$'.format(expected_name))\n self.assertEqual(name_queryset.count(), 1)\n expected_pk = name_queryset.first().pk\n self.assertIn(expected_pk, returned_pks,\n msg='Could not find expected dataset name {} with query {}'.format(expected_name, query))", "def test_name_test_queries(self):\n url = self.get_url(self.study.pk)\n for query in self.TEST_NAME_QUERIES.keys():\n response = self.client.get(url, {'q': query})\n returned_pks = get_autocomplete_view_ids(response)\n expected_matches = self.TEST_NAME_QUERIES[query]\n self.assertEqual(len(returned_pks), len(expected_matches),\n msg='Did not find correct number of matches for query {}'.format(query))\n # Make sure the matches found are those that are expected.\n for expected_name in expected_matches:\n name_queryset = models.SourceDataset.objects.filter(dataset_name__regex=r'^{}$'.format(expected_name))\n self.assertEqual(name_queryset.count(), 1)\n expected_pk = name_queryset.first().pk\n self.assertIn(expected_pk, returned_pks,\n msg='Could not find expected dataset name {} with query {}'.format(expected_name, query))", "def test_spires_syntax_detected_naked_title(self):\n converter = search_engine_query_parser.SpiresToInvenioSyntaxConverter()\n spi_search = converter.is_applicable(\"t muon\")\n self.assertEqual(spi_search, True)", "def search(query_string):", "def search_by_contains(self, tl):\n print(\"Search by string\")\n string = input(\"Please enter search string: \")\n return tl.findall_contains(string)", "def find_project(hw, r):\n\n # Search in the current directory for a CMakeLists.txt file that\n # contains something like the given project.\n cmd = \"find . -name CMakeLists.txt -exec grep -Hi {0} {{}} \\; | grep -i project\".format(hw)\n p = Popen(cmd, shell=True, stdout=PIPE)\n out = p.stdout.read()\n p.stdout.close()\n p.wait()\n\n # Transform the output into something readable.\n for i in out:\n found = i.split(':')\n \n # Scrub the path name\n path = os.path.dirname(found[0])[2:]\n if not path:\n path = \"top-level directory\"\n else:\n path = \"directory '{0}'\".format(path)\n r.note(\" possible candidate in the {0}\".format(path))", "def find_projects_by_name(self, name):\n projects = []\n for i in storage_utils.get_proj_ids(self._storage_location):\n project = self.find_project_by_id(i)\n if name.upper() in project.name.upper():\n projects.append(project)\n return projects", "def search_multiple_words(words):\n # YOUR CODE HERE #\n pass # delete this when you write your code", "def find(self, words):\n for result in self.index.simple_search(self.name, words, field='content'):\n title = result['title']\n score = int(result.score)\n yield score, title", "def search(self, *args, **kwargs): # real signature unknown\n pass", "def dep_searcher(sents):\n \n result = []\n for s in sents:\n lks = []\n deps = get_deps(s, dep_type)\n tokens = s.tokens\n for opt, pat in search.items():\n pat = filtermaker(pat)\n if opt == 'g':\n for l in deps.links:\n if re.match(pat, l.governor.text):\n lks.append(s.get_token_by_id(l.dependent.idx))\n elif opt == 'd':\n for l in deps.links:\n if re.match(pat, l.dependent.text):\n lks.append(s.get_token_by_id(l.governor.idx))\n elif opt == 'f':\n for l in deps.links:\n if re.match(pat, l.type):\n lks.append(s.get_token_by_id(l.dependent.idx))\n elif opt == 'p':\n for tok in tokens:\n if re.match(pat, tok.pos):\n lks.append(tok)\n elif opt == 'l':\n for tok in tokens:\n if re.match(pat, tok.lemma):\n lks.append(tok)\n elif opt == 'w':\n for tok in tokens:\n if re.match(pat, tok.word):\n lks.append(tok)\n elif opt == 'i':\n for tok in tokens:\n if re.match(pat, str(tok.id)):\n lks.append(tok)\n\n # only return results if all conditions are met\n if searchmode == 'all':\n counted = Counter(lks)\n lks = [k for k, v in counted.items() if v >= len(search.keys())]\n\n lks = list(set([x for x in lks if re.search(regex_nonword_filter, x.word)]))\n\n if exclude is not False:\n to_remove = []\n for op, pat in exclude.items():\n pat = filtermaker(pat)\n for tok in lks:\n if op == 'g':\n for l in deps.links:\n if re.match(pat, l.governor.text):\n to_remove.append(s.get_token_by_id(l.governor.idx))\n elif op == 'd':\n for l in deps.links:\n if re.match(pat, l.dependent.text):\n to_remove.append(s.get_token_by_id(l.dependent.idx))\n elif op == 'f':\n for l in deps.links:\n if re.match(pat, l.type):\n to_remove.append(s.get_token_by_id(l.dependent.idx))\n elif op == 'p':\n for tok in tokens:\n if re.match(pat, tok.pos):\n to_remove.append(tok)\n elif op == 'l':\n for tok in tokens:\n if re.match(pat, tok.lemma):\n to_remove.append(tok)\n elif op == 'w':\n for tok in tokens:\n if re.match(pat, tok.word):\n to_remove.append(tok)\n elif op == 'i':\n for tok in tokens:\n if re.match(pat, str(tok.id)):\n to_remove.append(tok)\n\n if excludemode == 'all':\n counted = Counter(to_remove)\n to_remove = [k for k, v in counted.items() if v >= len(exclude.keys())]\n for i in to_remove:\n try:\n lks.remove(i)\n except ValueError:\n pass\n\n if only_count:\n result.append(len(lks))\n continue\n\n # figure out what to show\n for lk in lks:\n single_result = {}\n node = deps.get_node_by_idx(lk.id)\n\n if 'w' in show:\n single_result['w'] = 'none'\n if lemmatise:\n single_result['w'] = lk.lemma\n else:\n single_result['w'] = lk.word\n\n if 'l' in show:\n single_result['l'] = lk.lemma\n\n if 'p' in show:\n single_result['p'] = 'none'\n postag = lk.pos\n if lemmatise:\n if postag.lower() in taglemma.keys():\n single_result['p'] = taglemma[postag.lower()]\n else:\n single_result['p'] = postag.lower()\n else:\n single_result['p'] = postag\n if not single_result['p']:\n single_result['p'] == 'none'\n\n if 'f' in show:\n single_result['f'] = 'none'\n for i in deps.links:\n if i.dependent.idx == lk.id:\n single_result['f'] = i.type\n break\n if single_result['f'] == '':\n single_result['f'] = 'root'\n\n if 'g' in show:\n single_result['g'] = 'none'\n for i in deps.links:\n if i.dependent.idx == lk.id:\n if s.get_token_by_id(i.governor.idx):\n if lemmatise: \n single_result['g'] = s.get_token_by_id(i.governor.idx).lemma\n else:\n single_result['g'] = i.governor.text\n else:\n single_result['g'] = 'root'\n break\n\n if 'd' in show:\n single_result['d'] = 'none'\n for i in deps.links:\n if i.governor.idx == lk.id:\n if s.get_token_by_id(i.dependent.idx): \n if lemmatise:\n single_result['d'] = s.get_token_by_id(i.dependent.idx).lemma\n else:\n single_result['d'] = i.dependent.text\n break\n\n if 'r' in show:\n all_lks = [l for l in deps.links]\n distance = distancer(all_lks, lk)\n if distance:\n single_result['r'] = str(distance)\n else:\n single_result['r'] = '-1'\n\n if 'i' in show:\n single_result['i'] = str(lk.id)\n\n if not only_count:\n \n # add them in order\n out = []\n for i in show:\n out.append(single_result[i])\n\n result.append('/'.join(out))\n \n if 'c' in show:\n result = sum(result)\n\n return result", "def run_spider_on_zyte(spider_name):\n print(f'Running spider {spider_name}...')\n data = dict(project=project_id, spider=spider_name)\n response = requests.post('https://app.scrapinghub.com/api/run.json', data=data, auth=(api_key, ''))\n return response.json()['jobid']", "def _mw_search(self, baseurl, searchquery):\n params = urllib.parse.urlencode({\n 'action': 'opensearch',\n 'search': searchquery,\n 'format': 'json',\n })\n api_data = self._mw_api_call(baseurl, params)\n\n search_result_titles = api_data[1]\n if not search_result_titles:\n raise callbacks.Error(f\"No search results for {searchquery!r}\")\n return search_result_titles", "def _get_results(tree, year):\n projects = []\n \n for t in tree.findAll('li', {'class': 'mdl-list__item mdl-list__item--one-line'}): \n org = _clean(t.findChildren('a')[0].text)\n a = t.findChildren('a')[0]['href']\n\n org_url = 'https://www.google-melange.com' + a\n org_tree = _get_tree(org_url)\n\n for t1 in org_tree.findAll('span', {'class': 'mdl-list__item-primary-content'}):\n a1 = t1.findChildren('a')\n projs = [a['href'] for a in a1]\n\n for p in projs:\n proj_url = 'https://www.google-melange.com' + p\n proj_tree = _get_tree(proj_url)\n \n title = _clean(proj_tree.findAll('h3')[0].text)\n p = proj_tree.findAll('p')\n bio = _clean(p[0].text)\n \n student = bio.split('by')[-1].split('for')[0]\n description = _clean(p[1].text)\n projects.append((title, org, student, description))\n\n _save_results(projects, year)", "def genspider(ctx, name, domain):\n spider_filename = _gen_spider(name, domain)\n print('Created {0}'.format(spider_filename))\n\n test_filename = _gen_tests(name, domain)\n print('Created {0}'.format(test_filename))", "def start_crawlers(spider_name: str, rules: List[Rule]) -> None:\n runner = CrawlerRunner(settings)\n crawlers = runner.spider_loader.list()\n crawlers = [c for c in crawlers if c.__contains__(spider_name)]\n if crawlers:\n for rule in rules:\n runner.crawl(crawlers[0], rule=rule)\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n reactor.run()\n launch_logger.debug('all finished.')\n else:\n launch_logger.warning('provide the right spider name.')", "def scan_fixtures(path):\n results = list()\n for root, dirs, files in os.walk(path):\n relative_path = root.replace(path + \"/\", \"\")\n if relative_path.startswith(\"static\") or relative_path.startswith(\"theme\"):\n continue\n\n for f in files:\n if not f.endswith(\".json\"):\n continue\n\n app_name = os.path.basename(os.path.dirname(relative_path))\n\n results.append((app_name, f, relative_path))\n\n return results", "def filter_projects(project_services):\n return [project for project, services in project_services.items() if \"Travis\" in services or \"GitHub\" in services]", "def test_spires_keyword_distribution_with_parens(self):\n spi_search = \"find cn d0 and (a abachi or abbott or abazov)\"\n inv_search = \"collaboration:d0 and (author:abachi or author:abbott or author:abazov)\"\n self._compare_searches(inv_search, spi_search)", "def subtitle_search_engines_links(search, deep=0, debug=0, links=[]):\n s = Subseek()\n for subtitle_search_engine in SUBTITLE_SEARCH_ENGINES:\n if debug == 1:\n print \"Searching '%s' in '%s'\" % (search,\n subtitle_search_engine['name'])\n links_aux = s.get_links(subtitle_search_engine, search, deep)\n if not links_aux or len(links_aux) == 0:\n if debug == 1:\n print \"No match found in '%s'\" % subtitle_search_engine['name']\n else:\n if debug == 1:\n print \"%s matches found in '%s'\" % (len(links_aux),\n subtitle_search_engine['name'])\n\n links = links_aux + links\n\n return links", "def find_repo_name(repo_name, all_repos):\n for repo in all_repos:\n all_names = repo.find_elements_by_xpath(\"./td[2]\")\n for name in all_names:\n if name.text == repo_name:\n name.click()\n return True\n return False", "def test_name_test_queries(self):\n url = self.get_url()\n for query in self.TEST_NAME_QUERIES.keys():\n response = self.client.get(url, {'q': query})\n returned_pks = get_autocomplete_view_ids(response)\n expected_matches = self.TEST_NAME_QUERIES[query]\n self.assertEqual(len(returned_pks), len(expected_matches),\n msg='Did not find correct number of matches for query {}'.format(query))\n # Make sure the matches found are those that are expected.\n for expected_name in expected_matches:\n name_queryset = models.SourceDataset.objects.filter(dataset_name__regex=r'^{}$'.format(expected_name))\n self.assertEqual(name_queryset.count(), 1)\n expected_pk = name_queryset.first().pk\n self.assertIn(expected_pk, returned_pks,\n msg='Could not find expected dataset name {} with query {}'.format(expected_name, query))", "def test_name_test_queries(self):\n url = self.get_url()\n for query in self.TEST_NAME_QUERIES.keys():\n response = self.client.get(url, {'q': query})\n returned_pks = get_autocomplete_view_ids(response)\n expected_matches = self.TEST_NAME_QUERIES[query]\n self.assertEqual(len(returned_pks), len(expected_matches),\n msg='Did not find correct number of matches for query {}'.format(query))\n # Make sure the matches found are those that are expected.\n for expected_name in expected_matches:\n name_queryset = models.SourceDataset.objects.filter(dataset_name__regex=r'^{}$'.format(expected_name))\n self.assertEqual(name_queryset.count(), 1)\n expected_pk = name_queryset.first().pk\n self.assertIn(expected_pk, returned_pks,\n msg='Could not find expected dataset name {} with query {}'.format(expected_name, query))", "def external_search_engines_links(search, deep=0, debug=0, links=[]):\n s = Subseek()\n for search_engine in SEARCH_ENGINES:\n for subtitle_search_engine in SUBTITLE_SEARCH_ENGINES:\n if debug == 1:\n print \"Searching '%s' in '%s'\" % (search,\n search_engine['name'])\n links_aux = s.get_links(search_engine, search, deep,\n subtitle_search_engine[\"name\"])\n if not links_aux or len(links_aux) == 0:\n if debug == 1:\n print \"No match found in '%s'\" % search_engine['name']\n else:\n if debug == 1:\n print \"%s matches found in '%s'\" % (len(links_aux),\n search_engine['name'])\n links = links_aux + links\n\n return links", "def search(self, search):\n raise NotImplementedError", "def pl_search(term, page=0, splash=True, is_user=False):\n if not term or len(term) < 2:\n g.message = c.r + \"Not enough input\" + c.w\n g.content = generate_songlist_display()\n return\n\n if isinstance(term, dict):\n is_user = term[\"is_user\"]\n term = term[\"term\"]\n\n if splash:\n g.content = logo(c.g)\n prog = \"user: \" + term if is_user else term\n g.message = \"Searching playlists for %s\" % c.y + prog + c.w\n screen_update()\n\n if is_user:\n ret = channelfromname(term)\n if not ret: # Error\n return\n user, channel_id = ret\n\n else:\n # playlist search is done with the above url and param type=playlist\n logging.info(\"playlist search for %s\", prog)\n max_results = min(getxy().max_results, 50) # Limit for playlists command\n qs = generate_search_qs(term, page, result_count=max_results)\n qs['type'] = 'playlist'\n if 'videoCategoryId' in qs:\n del qs['videoCategoryId'] # Incompatable with type=playlist\n\n pldata = call_gdata('search', qs)\n id_list = [i.get('id', {}).get('playlistId')\n for i in pldata.get('items', ())]\n # page info\n get_page_info_from_json(pldata, len(id_list))\n\n qs = {'part': 'contentDetails,snippet',\n 'maxResults': 50}\n\n if is_user:\n if page:\n qs['pageToken'] = token(page)\n qs['channelId'] = channel_id\n else:\n qs['id'] = ','.join(id_list)\n\n pldata = call_gdata('playlists', qs)\n playlists = get_pl_from_json(pldata)\n\n if playlists:\n g.last_search_query = {\"playlists\": {\"term\": term, \"is_user\": is_user}}\n g.browse_mode = \"ytpl\"\n g.current_page = page\n g.ytpls = playlists\n g.message = \"Playlist results for %s\" % c.y + prog + c.w\n g.content = generate_playlist_display()\n\n else:\n g.message = \"No playlists found for: %s\" % c.y + prog + c.w\n g.current_page = 0\n g.content = generate_songlist_display(zeromsg=g.message)", "def search_plans(self, term, planlove=False):\n get = {'mysearch': term,\n 'planlove': int(bool(planlove))}\n response = self._get_page('search.php', get=get)\n soup = bs4.BeautifulSoup(response.text, 'html5lib')\n results = soup.find('ul', {'id': 'search_results'})\n if results is None:\n return [] # no results\n # results are grouped by the plan\n # on which the result was found\n user_groups = results.findAll(\n 'div', {'class': 'result_user_group'})\n resultlist = []\n for group in user_groups:\n user = group.find('a', {'class': 'planlove'}).contents[0]\n count = group.find('span').contents[0]\n # now extract snippets\n snippetlist = group.findAll('li')\n snippets = []\n for li in snippetlist:\n tag = li.find('span')\n tag.hidden = True # prevents BS from wrapping contents in\n # <span> upon conversion to unicode string\n snip = tag.decode(formatter=self._html_esc) # soup to unicode\n snip = self._canonicalize_plantext(snip)\n snippets.append(snip)\n resultlist.append((str(user), int(count), snippets))\n return resultlist", "def getItems(self):\n fname = 'getItems'\n actionId = self._db.addAction('WebCrawler')\n actionId_ex = self._db.addAction('extractor')\n\n if not os.path.exists(self._haystackPath):\n self._haystackPath = os.path.expanduser(self._haystackPath)\n\n if not os.path.exists(self._haystackPath):\n self._haystackPath = os.path.abspath(self._haystackPath)\n\n print('\\t{0} [{1}]'.format(fname, self._haystackPath))\n\n for (pathStr, dirs, files) in os.walk(self._haystackPath):\n head, tail = os.path.split(pathStr)\n for fileStr in files:\n fileDTCheck = ''\n filePath = os.path.join(pathStr,fileStr)\n\n # get the file date...\n fileDT = datetime.datetime.fromtimestamp(os.path.getmtime(filePath)).replace(microsecond=0)\n fileSize = os.path.getsize(filePath)\n fileName, fileExt = os.path.splitext(filePath)\n\n # save the item to the database\n itemId = self._db.addItem(self._engine_id, \"file://%s\" % filePath, fileDT)\n \n # now check the data for this item...\n itemList = self._db.getItemDataAll(itemId) \n isMatch = False\n for item in itemList:\n if item[0] == 'FileDate':\n # we have a date string...\n fileDTCheck = datetime.datetime.strptime(item[1], \"%Y-%m-%d %H:%M:%S\")\n if fileDTCheck == fileDT:\n # the same time, no changes needed\n isMatch = True\n \n if isMatch:\n # get next item as this is already exists\n continue\n \n # print(the details)\n print(fileDTCheck, fileDT)\n print('>>\\t%s\\t%s\\t%s' % (fname, head, tail))\n \n # set the datetime and other details\n self._db.addItemData(itemId, 'Haystack', tail, 0)\n self._db.addItemData(itemId, 'FileName', fileName, 0)\n self._db.addItemData(itemId, 'FileExt', fileExt, 0)\n self._db.addItemData(itemId, 'FileDate', fileDT, 0)\n self._db.addItemData(itemId, 'FileSize', fileSize, 0)\n\n # now to process the file...\n # this will extract out metadata and add to the itemData table the value pairs.\n pattern = re.compile(r'^.*[.](?P<ext>htm|html)$')\n pattPNG = re.compile(r'^.*[.](?P<ext>mp.|mpeg|avi|swf|jpg|jpeg|png)$')\n pattTAR = re.compile(r'^.*[.](?P<ext>tar\\.gz|tar\\.bz2|\\.zip|\\.tar|\\.7z)$')\n\n m = pattern.match(filePath)\n if not m:\n m = pattPNG.match(filePath)\n\n if not m:\n m = pattTAR.match(filePath)\n\n if not m:\n self.getContents(itemId, filePath, tail)\n self._db.updateItem(self._engine_id, itemId, actionId_ex, datetime.datetime.now())\n\n else:\n # we have a file extension...\n if m.group('ext').startswith('.htm'):\n # add this as an event to be processed by the html link reader...\n self._db.addItemEvent(self._engine_id, actionId, itemId)\n\n if self._db:\n self._db.commit_db()", "def projects_matching_classifier(classifier):\n client = xmlrpc.client.ServerProxy('http://pypi.python.org/pypi')\n try:\n logging.info('Fetching project list for {!r}'.format(classifier))\n return (result[0].lower() for result in client.browse([classifier]))\n finally:\n client('close')()" ]
[ "0.56716776", "0.54362226", "0.54334825", "0.53485537", "0.53356576", "0.53022844", "0.52963454", "0.5276801", "0.5274018", "0.5273827", "0.5265692", "0.5207114", "0.51662695", "0.5152858", "0.514631", "0.512449", "0.5115938", "0.5112054", "0.51114047", "0.51075816", "0.5092334", "0.50902075", "0.5086198", "0.50772893", "0.5060954", "0.50554353", "0.5053591", "0.50406694", "0.5028122", "0.50260806", "0.50166214", "0.50151944", "0.5004649", "0.5003686", "0.49917364", "0.49890283", "0.49840227", "0.4982258", "0.49770185", "0.49692962", "0.4968346", "0.49674636", "0.49558264", "0.49506125", "0.4947986", "0.49406236", "0.49377382", "0.49375167", "0.49350366", "0.49349672", "0.49298796", "0.49294618", "0.49238294", "0.4922938", "0.49196994", "0.4916964", "0.4894946", "0.48923287", "0.4890687", "0.48889738", "0.48858404", "0.48830798", "0.4882083", "0.48781073", "0.4862579", "0.48577404", "0.48551053", "0.4852197", "0.48380604", "0.4833627", "0.4819965", "0.48178217", "0.48178217", "0.4815515", "0.48132938", "0.48126587", "0.4808151", "0.4806218", "0.4805423", "0.48041403", "0.47992504", "0.47960183", "0.47951716", "0.47937167", "0.47934848", "0.4790124", "0.47889832", "0.47859123", "0.47854692", "0.4782717", "0.47767764", "0.476761", "0.47650424", "0.47650424", "0.47589135", "0.47550288", "0.47545448", "0.47508806", "0.4748192", "0.47470933" ]
0.7393245
0
Fixes issues with the imported items.
def _fix_items(items): for _, item in items.iteritems(): if 'url' in item['fields']: del item['fields']['url'] return items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def OldItems(self) -> _n_1_t_7:", "def _update_items(self):\n\n self._item_list = []\n for f in os.listdir(self._folder):\n # Skip text files\n # -> It is important that we don't delete the list file if the user puts it here!\n ext = os.path.splitext(f)[1]\n if ext not in ['.csv', 'txt']:\n self._item_list.append(f)", "def imported(self, session, task):\n drop_feat = self.config['drop'].get(bool)\n\n for item in task.imported_items():\n self.ft_in_title(item, drop_feat)\n item.store()", "def test_dupe_imports(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # Now let's do some db sanity checks.\r\n self._delicious_xml_data_test()", "def test_dupe_imports(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_data_test()", "def removeOldItems(self):\n pass", "def fixup(self):\n raise Exception(\"Fixup not implemented yet!\")", "def clean_imports(import_list):\r\n\r\n mod_lst = []\r\n\r\n while import_list:\r\n if ',' in import_list[0]:\r\n if import_list[0][0:6] == 'import':\r\n temp_ = import_list[0][6:].split(',')\r\n for i in temp_:\r\n import_list.append('import ' + i)\r\n\r\n import_list[0] = 'Fixed'\r\n\r\n if import_list[0][0:4] == 'from':\r\n temp_ = import_list[0].split('import')\r\n part_1 = temp_[0]\r\n part_2 = temp_[1].lstrip().rstrip().split(',')\r\n for i in part_2:\r\n import_list.append(str(part_1) + ' import ' + str(i))\r\n\r\n import_list[0] = 'Fixed'\r\n\r\n temp = import_list[0].split()\r\n\r\n if len(temp) == 2 and temp[0] == 'import':\r\n mod_lst.append(temp[1])\r\n import_list[0] = 'Fixed'\r\n\r\n elif len(temp) == 4 and temp[0] == 'import' and temp[2] == 'as':\r\n mod_lst.append(temp[1])\r\n import_list[0] = 'Fixed'\r\n\r\n elif len(temp) == 4 and temp[0] == 'from':\r\n if temp[3] != '*':\r\n mod_lst.append(str(temp[1]) + '.' + str(temp[3]))\r\n else:\r\n mod_lst.append(str(temp[1]))\r\n import_list[0] = 'Fixed'\r\n\r\n del import_list[0]\r\n\r\n return mod_lst", "def parse_items(self):", "def preset_items(self):\r\n\r\n raise NotImplementedError", "def test_import_selected_items(self):\n item_id = self.response.context['items'][0].id\n self.client.post('/importer/', {'items': [item_id, ]})\n self.assertEquals(ModeratedObject.objects.all().count(), 1)", "def resolve_all(self, import_items):\n for import_item in import_items:\n try:\n yield self.resolve_import(import_item)\n except ImportException as err:\n logging.info('unknown module %s', err.module_name)", "def tearDown(self):\n builtins.__import__ = self.original_imports", "def _should_replace_eitems(self, eitem):\n existing_provider = self._get_record_import_provider(eitem)\n if not existing_provider:\n return False\n\n existing_priority = current_app.config[\"CDS_ILS_IMPORTER_PROVIDERS\"][\n existing_provider\n ][\"priority\"]\n\n # existing_priority = 0, self.priority = 1, returns False\n return existing_priority > self.current_provider_priority", "def loaditems(self, fh):\n pass", "def _link_items(self):\n pass", "def rebuild_items(self, identifiers):\n raise NotImplementedError", "def updateImportList(self):\n\t\tids_new = self.ids\n\t\tids_list = []\n\n\t\tif not os.path.exists(ids_file):\n\t\t\twith open(ids_file, 'a') as ids:\n\t\t\t\tpass\n\n\t\twith open(ids_file, \"rb\") as ids:\n\t\t\tids_reader = csv.reader(ids)\n\t\t\tfor row in ids_reader:\n\t\t\t\tids_list.append(row[0])\n\n\t\tids_new = set(ids_new) - set(ids_list)\n\n\t\twith open(ids_file, \"a\") as ids:\n\t\t\tids_writer = csv.writer(ids)\n\t\t\tfor idx in ids_new:\n\t\t\t\tids_writer.writerow( str(idx) )\n\n\t\tself.ids = ids_new", "def importItem(file_path):\n\n #Ouverture du fichier\n rb = open_workbook(file_path)\n r_sheet = rb.sheet_by_index(0)\n\n for row_index in range (1, r_sheet.nrows):\n #Hydratation or get Supplier Model\n item_supplier= r_sheet.cell(row_index, 4).value\n item_supplier, created = Supplier.objects.get_or_create(name=item_supplier)\n\n #Hydratation or get Category Model\n current_category = r_sheet.cell(row_index, 0).value\n item_category, created = Category.objects.get_or_create(name=current_category)\n\n #Hydratation Item\n item_name = r_sheet.cell(row_index, 1).value\n item_ref = current_supplier= r_sheet.cell(row_index, 3).value\n item_quantity = r_sheet.cell(row_index, 2).value\n item, created = Item.objects.get_or_create(ref=item_ref, name=item_name, category=item_category, supplier=item_supplier, quantity=item_quantity)", "def init_items(self):\r\n raise NotImplementedError()", "def load_references(self, collections, item):", "def test_legacy_items_at_day_1(manager):\n manager.update()\n compare_results_attrs(manager.items, fixtures.FIXTURES[1])", "def clean_up_map(self):\n self.items = [i for i in self.items if i.quantity != 0]", "def _clean_up(self):", "def items(self) -> 'ItemsView[str, str]':\n return _EntityFixupItems(self)", "def drop_invalid(self, item, line_reference, reason=''):\n logger.warning(\n f'Dropping invalid {line_reference} from import job \"{self.job}\" run {self.timestamp}: {reason}'\n )\n self.invalid_items.append({\n 'id': getattr(item, 'identifier', line_reference),\n 'timestamp': datetimestamp(digits_only=False),\n 'title': getattr(item, 'title', ''),\n 'uri': getattr(item, 'uri', ''),\n 'reason': reason\n })", "def repair(self):\n self._fix_varnames()\n self._fix_array_meta()\n self._fix_array_item_vals()\n self.repair_text_edits()\n self.restore_item_texts()\n self._clean_datafile_set()\n self._prevent_one_cat_set()\n self._add_secure_variables()\n return None", "async def assign_styled_items(all_styles: Iterable[Style], item: Item) -> None:\n # To do inheritance, we simply copy the data to ensure all items\n # have data defined for every used style.\n all_ver: list[Version] = list(item.versions.values())\n\n # Move default version to the beginning, so it's read first.\n # that ensures it's got all styles set if we need to fallback.\n all_ver.remove(item.def_ver)\n all_ver.insert(0, item.def_ver)\n\n for vers in all_ver:\n # We need to repeatedly loop to handle the chains of\n # dependencies. This is a list of (style_id, UnParsed).\n to_change: list[tuple[str, UnParsedItemVariant]] = []\n # We temporarily set values like this during parsing, by the end of this loop\n # it'll all be ItemVariant.\n styles: dict[str, UnParsedItemVariant | ItemVariant | None] = vers.styles # type: ignore\n for sty_id, conf in styles.items():\n assert isinstance(conf, UnParsedItemVariant)\n to_change.append((sty_id, conf))\n # Not done yet\n styles[sty_id] = None\n\n # If we have multiple versions, mention them.\n vers_desc = f' with version {vers.id}' if len(all_ver) > 1 else ''\n\n # Evaluate style lookups and modifications\n while to_change:\n # Needs to be done next loop.\n deferred: list[tuple[str, UnParsedItemVariant]] = []\n # UnParsedItemVariant options:\n # filesys: FileSystem # The original filesystem.\n # folder: str # If set, use the given folder from our package.\n # style: str # Inherit from a specific style (implies folder is None)\n # config: Property # Config for editing\n start_data: UnParsedItemVariant | ItemVariant | None\n for sty_id, conf in to_change:\n if conf.style:\n try:\n if ':' in conf.style:\n ver_id, base_style_id = conf.style.split(':', 1)\n start_data = item.versions[ver_id].styles[base_style_id]\n else:\n start_data = styles[conf.style]\n except KeyError:\n raise ValueError(\n f'Item {item.id}\\'s {sty_id} style{vers_desc} '\n f'referenced invalid style \"{conf.style}\"'\n )\n if start_data is None:\n # Not done yet!\n deferred.append((sty_id, conf))\n continue\n # Can't have both!\n if conf.folder:\n raise ValueError(\n f'Item {item.id}\\'s {sty_id} style has '\n f'both folder and style{vers_desc}!'\n )\n elif conf.folder:\n # Just a folder ref, we can do it immediately.\n # We know this dict should be set.\n try:\n start_data = item.folders[conf.filesys, conf.folder]\n except KeyError:\n LOGGER.info('Folders: {}', item.folders.keys())\n raise\n else:\n # No source for our data!\n raise ValueError(\n f\"Item {item.id}'s {sty_id} style has no data \"\n f\"source{vers_desc}!\"\n )\n\n if conf.config is None:\n styles[sty_id] = start_data.copy()\n else:\n styles[sty_id] = await start_data.modify(\n conf.pak_id,\n conf.config,\n f'<{item.id}:{vers.id}.{sty_id}>',\n )\n\n # If we defer all the styles, there must be a loop somewhere.\n # We can't resolve that!\n if len(deferred) == len(to_change):\n unresolved = '\\n'.join(\n f'{conf.style} -> {sty_id}'\n for sty_id, conf in deferred\n )\n raise ValueError(\n f'Loop in style references for item {item.id}'\n f'{vers_desc}!\\nNot resolved:\\n{unresolved}'\n )\n to_change = deferred\n\n # Ensure we've converted all these over.\n assert all(isinstance(variant, ItemVariant) for variant in styles.values()), styles\n\n # Fix this reference to point to the actual value.\n vers.def_style = vers.styles[cast(str, vers.def_style)]\n\n if DEV_MODE.get():\n # Check each editoritem definition for some known issues.\n for sty_id, variant in styles.items():\n assert isinstance(variant, ItemVariant), f'{item.id}:{sty_id} = {variant!r}!!'\n with logger.context(f'{item.id}:{sty_id}'):\n variant.editor.validate()\n for extra in variant.editor_extra:\n with logger.context(f'{item.id}:{sty_id} -> {extra.id}'):\n extra.validate()\n\n for style in all_styles:\n if style.id in styles:\n continue # We already have a definition\n for base_style in style.bases:\n if base_style.id in styles:\n # Copy the values for the parent to the child style\n styles[style.id] = styles[base_style.id]\n vers.inherit_kind[style.id] = InheritKind.INHERIT\n # If requested, log this.\n if not item.unstyled and config.APP.get_cur_conf(config.gen_opts.GenOptions).log_item_fallbacks:\n LOGGER.warning(\n 'Item \"{}\" using parent \"{}\" for \"{}\"!',\n item.id, base_style.id, style.id,\n )\n break\n else:\n # No parent matches!\n if not item.unstyled and config.APP.get_cur_conf(config.gen_opts.GenOptions).log_missing_styles:\n LOGGER.warning(\n 'Item \"{}\"{} using inappropriate style for \"{}\"!',\n item.id, vers_desc, style.id,\n )\n # Unstyled elements allow inheriting anyway.\n vers.inherit_kind[style.id] = InheritKind.INHERIT if item.unstyled else InheritKind.UNSTYLED\n # If 'isolate versions' is set on the item,\n # we never consult other versions for matching styles.\n # There we just use our first style (Clean usually).\n # The default version is always isolated.\n # If not isolated, we get the version from the default\n # version. Note the default one is computed first,\n # so it's guaranteed to have a value.\n styles[style.id] = (\n vers.def_style if\n item.isolate_versions or vers.isolate\n else item.def_ver.styles[style.id]\n )", "def import_and_clean():\n \n with open(\"inventory.csv\", newline=\"\") as csvfile:\n inventory = csv.DictReader(csvfile)\n rows = list(inventory)\n\n for row in rows:\n row[\"product_price\"] = row[\"product_price\"].replace(\"$\", \"\")\n row[\"product_price\"] = row[\"product_price\"].replace(\".\", \"\")\n row[\"product_price\"] = int(float(row[\"product_price\"]))\n row[\"date_updated\"] = datetime.datetime.strptime(row[\"date_updated\"], \"%m/%d/%Y\")\n row[\"product_quantity\"]= int(row[\"product_quantity\"])\n \n return rows", "def test_import_process(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_data_test()", "def _apply_template(self, original_item):\n\t\t# TODO: Performance optimization. Don't recursively call _apply_template on hosts we have already\n\t\t# applied templates to. This needs more work.\n\t\tif not original_item.has_key('use'):\n\t\t\treturn original_item\n\t\tobject_type = original_item['meta']['object_type']\n\t\t# Performance tweak, if item has been parsed. Lets not do it again\n\t\tif original_item.has_key('name') and self.item_apply_cache[object_type].has_key( original_item['name'] ):\n\t\t\treturn self.item_apply_cache[object_type][ original_item['name'] ]\n\t\t# End of performance tweak\n\t\tparent_names = original_item['use'].split(',')\n\t\tparent_items = []\n\t\tfor parent_name in parent_names:\n\t\t\tparent_item = self._get_item( parent_name, object_type )\n\t\t\tif parent_item == None: \n\t\t\t\terror_string = \"error in %s\\n\" % (original_item['meta']['filename'])\n\t\t\t\terror_string = error_string + \"Can not find any %s named %s\\n\" % (object_type,parent_name)\n\t\t\t\terror_string = error_string + self.print_conf(original_item)\n\t\t\t\tself.errors.append( ParserError(error_string,item=original_item) )\n\t\t\t\tcontinue\n\t\t\t# Parent item probably has use flags on its own. So lets apply to parent first\n\t\t\tparent_item = self._apply_template( parent_item )\n\t\t\tparent_items.append( parent_item )\n\t\tfor parent_item in parent_items:\n\t\t\tfor k,v in parent_item.iteritems():\n\t\t\t\tif k == 'use':\n\t\t\t\t\tcontinue\n\t\t\t\tif k == 'register':\n\t\t\t\t\tcontinue\n\t\t\t\tif k == 'meta':\n\t\t\t\t\tcontinue\n\t\t\t\tif k == 'name':\n\t\t\t\t\tcontinue\n\t\t\t\tif not original_item['meta']['inherited_attributes'].has_key(k):\n\t\t\t\t\toriginal_item['meta']['inherited_attributes'][k] = v\n\t\t\t\tif not original_item.has_key(k):\n\t\t\t\t\toriginal_item[k] = v\n\t\t\t\t\toriginal_item['meta']['template_fields'].append(k)\n\t\tif original_item.has_key('name'):\n\t\t\tself.item_apply_cache[object_type][ original_item['name'] ] = original_item\n\t\treturn original_item", "def drop_failed(self, item, line_reference, reason=''):\n logger.warning(\n f'Dropping failed {line_reference} from import job \"{self.job}\" run {self.timestamp}: {reason}'\n )\n self.failed_items.append({\n 'id': getattr(item, 'identifier', line_reference),\n 'timestamp': datetimestamp(digits_only=False),\n 'title': getattr(item, 'title', ''),\n 'uri': getattr(item, 'uri', ''),\n 'reason': reason\n })", "def test_importer_management(enaml_importer):\n standard_importers_numbers = len(enaml_importer.get_importers())\n enaml_importer.add_importer(WrongEnamlImporter)\n assert WrongEnamlImporter in enaml_importer.get_importers()\n enaml_importer.add_importer(WrongEnamlImporter)\n assert (len(enaml_importer.get_importers()) ==\n standard_importers_numbers + 1)\n enaml_importer.remove_importer(WrongEnamlImporter)\n\n # Test removing twice\n enaml_importer.remove_importer(WrongEnamlImporter)\n\n with pytest.raises(TypeError):\n enaml_importer.add_importer(object)", "def test_import_process(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_xml_data_test()", "def _general_import(self):\n # Lookups for method names and expected import-failure errors\n importers = {\n SourceTypes.BytesPlaintext: self._import_plaintext_bytes,\n SourceTypes.BytesZlib: self._import_zlib_bytes,\n SourceTypes.FnamePlaintext: self._import_plaintext_fname,\n SourceTypes.FnameZlib: self._import_zlib_fname,\n SourceTypes.DictJSON: self._import_json_dict,\n }\n import_errors = {\n SourceTypes.BytesPlaintext: TypeError,\n SourceTypes.BytesZlib: (zlib_error, TypeError),\n SourceTypes.FnamePlaintext: (OSError, TypeError, UnicodeDecodeError),\n SourceTypes.FnameZlib: (OSError, TypeError, zlib_error),\n SourceTypes.DictJSON: (ValidationError),\n }\n\n # Attempt series of import approaches\n # Enum keys are ordered, so iteration is too.\n for st in SourceTypes:\n if st not in importers:\n # No action for source types w/o a handler function defined.\n continue\n\n if self._try_import(importers[st], self._source, import_errors[st]):\n self.source_type = st\n return\n\n # Nothing worked, complain.\n raise TypeError(\"Invalid Inventory source type\")", "def test_legacy_items_at_day_0(manager):\n compare_results_attrs(manager.items, fixtures.FIXTURES[0])", "def file_import(self):\r\n\r\n try:\r\n self.process_file_import()\r\n except InputError as ex:\r\n print(ex)\r\n self.file_import()", "def test_modify_import_data_2(self):\n self.ticket_dict3[\"extra\"] = \"extra\"\n result = tickets.modify_import_data(self.ticket_dict3,\n self.required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)", "def load(self, items):\n for item in items:\n self.items.append(self.model_type(item))", "def process_items():\n global HAS_WATCH\n global HAS_FIRST_AID_KIT\n global HAS_FLASHLIGHT\n global HAS_RAINCOAT\n global HAS_COMPASS\n global HAS_BEARTRAP\n\n if \"Watch\" in ITEMS:\n HAS_WATCH = True\n if \"First Aid Kit\" in ITEMS:\n HAS_FIRST_AID_KIT = True\n if \"Flashlight\" in ITEMS:\n HAS_FLASHLIGHT = True\n if \"Raincoat\" in ITEMS:\n HAS_RAINCOAT = True\n if \"Compass\" in ITEMS:\n HAS_COMPASS = True\n if \"Bear Trap\" in ITEMS:\n HAS_BEARTRAP = True\n\n # Stupid little hack to provide 'immediate updates/effect' of having the below items\n if HAS_WATCH:\n update_title_area(\" Day: %d Time: %d:00 \" % (DAY, TIME))\n if HAS_COMPASS:\n DISCOVERED[ZERO_BASE_PLYR_POS] = \"Y\"", "def invalid_item(upload_items: List[JSONDict]) -> JSONDict:\n altered = upload_items[0]\n altered[\"language\"] = \"engl\"\n altered[\"date\"] = \"02-2031-01\"\n altered[\"url\"] = \"incorrect.com\"\n return altered", "def clean_up(self):\n\t\tpass", "def load_from_file(save_location): # This is a function for readability -\r\n # opens txt file in read mode and loads it\r\n # into an array (list) of ListItem variables\r\n data_file_r = open(save_location, \"r\") # Open txt file in read mode\r\n list_item = [\"Text\", -1, 2, True] # Item, Item Priority, group, is visible\r\n todo = [] # make a list of lists\r\n temp = 1 # Temporary counter variable to reconstruct lists from .txt file\r\n line_counter = 1\r\n try:\r\n for item in data_file_r: # loop through each line in the file, one at\r\n # a time - from w3schools.com\r\n if (line_counter - 1) % 5 != 0 and line_counter > 0:\r\n cleaned_item = \"\"\r\n for character_index in range(len(\r\n item)): # Loop through each character in the extracted\r\n # string\r\n if character_index != len(\r\n item) - 1: # if it is not the last character, add\r\n # it to the cleaned string\r\n cleaned_item += item[character_index]\r\n # Add every character to a\r\n # but \\n\r\n if temp == 1: # Item Text\r\n list_item[0] = cleaned_item\r\n temp = 2\r\n elif temp == 2: # Item Priority\r\n list_item[1] = int(cleaned_item)\r\n temp = 3\r\n elif temp == 3: # Item Group\r\n list_item[2] = int(cleaned_item)\r\n temp = 4\r\n elif temp == 4: # Is Visible\r\n if cleaned_item == \"False\":\r\n list_item[3] = False\r\n else: # Assume the item is visible if the text is not\r\n # False\r\n list_item[3] = True\r\n todo.insert(0, ListItem(list_item[0], list_item[1],\r\n list_item[2], list_item[3]))\r\n temp = 1\r\n else: # If some error occurred and a condition outside of the\r\n # possible four is met, restart\r\n temp = 1\r\n line_counter += 1\r\n except ValueError:\r\n print(\"An error has occurred trying to load the file\")\r\n result = int(clean_input(\r\n \"Please enter a 2 to overwrite the current save file and start \"\r\n \"over or any other number to exit the program\"))\r\n if result == 2:\r\n key = random.randint(2, 9) # Generate a random integer between 2\r\n # and 9 to be used as a second dynamic check\r\n if key == 2:\r\n key = 1 # If the random number is 2, set it to one so that\r\n # the same number (2) cannot be used as the verification number\r\n result2 = int(clean_input(\"Are you sure you want to delete all \"\r\n \"of your saved data\\nEnter {0} to \"\r\n \"proceed, or anything else to \"\r\n \"cancel\".format(str(key))))\r\n if result2 == key:\r\n data_file_w = open(\"C:Item_List.txt\", \"w\")\r\n data_file_w.close()\r\n todo = []\r\n print(\"Save Data Erased\")\r\n return todo # Return an empty list if file load failed\r\n else:\r\n print(\"Program Exiting\")\r\n quit(1)\r\n else:\r\n print(\"Program Exiting\")\r\n quit(1) # Exit the program with the exit code of 1\r\n data_file_r.close()\r\n # All the list functions above referenced from w3schools.com What is\r\n # happening above: Opening the file, initializing a list to hold all\r\n # four pieces of data, then after pulling the data from the file and\r\n # storing in the list, it is copied (not referenced) into my main list\r\n # of ListItem objects\r\n return todo", "def setup(self, ds: PetscDocStringImpl) -> None:\n items: list[InlineList.ItemsEntry] = []\n titles = set(map(str.casefold, self.titles))\n\n def inspector(ds: PetscDocStringImpl, loc: SourceRange, line: str, verdict: Verdict) -> None:\n rest = (line.split(':', maxsplit=2)[1] if ':' in line else line).strip()\n if not rest:\n return\n\n if ':' not in rest:\n # try and see if this is one of the bad-egg lines where the heading is missing\n # the colon\n bad_title = next(filter(lambda t: t.casefold() in titles, rest.split()), None)\n if bad_title:\n # kind of a hack, we just erase the bad heading with whitespace so it isnt\n # picked up below in the item detection\n rest = rest.replace(bad_title, ' ' * len(bad_title))\n\n start_line = loc.start.line\n offset = 0\n sub_items = []\n for sub in filter(bool, map(str.strip, rest.split(','))):\n subloc = ds.make_source_range(sub, line, start_line, offset=offset)\n offset = subloc.end.column - 1\n sub_items.append((subloc, sub))\n if sub_items:\n items.append(((line, rest), sub_items))\n return\n\n super()._do_setup(ds, inspector)\n self.items = tuple(items)\n return", "def test_patchorganizations_item(self):\n pass", "def test_keep_list(self):\n input_item = self.item_class(name=[\"foo\", \"bar\"])\n il = ItemLoader(item=input_item)\n loaded_item = il.load_item()\n self.assertIsInstance(loaded_item, self.item_class)\n self.assertEqual(ItemAdapter(loaded_item).asdict(), {\"name\": [\"foo\", \"bar\"]})", "def test_bad_items_pos():\n conf = load_yaml(\n \"\"\"\\\ndb_objects:\n - name: 42\n - name: aaa\n kind: nope\n - name: bbb\n kinds:\n - table\n - boh\n - sequence\n - mah\n\"\"\"\n )\n errors = get_config_errors(conf)\n assert len(errors) == 4\n assert \"<unicode string>:2\" in errors[0]\n assert \"<unicode string>:4\" in errors[1]\n assert \"<unicode string>:8\" in errors[2]\n assert \"<unicode string>:10\" in errors[3]", "def items(source, include):\n with commit():\n import_items_from_json(source, include=include)", "def check_unexpected_items(self, expected_items, scraped_items):\n\n\t\tfor scraped_item in scraped_items:\n\t\t\tself.assertIn(scraped_item, expected_items)", "def error_output_import(self):\n im_methods_string = ''\n for i in range(len(methods_of_import)):\n if i == 0:\n continue\n elif i != len(methods_of_import) - 1:\n im_methods_string += ('from_' + methods_of_import[i] + '() or ')\n else: # i == len(methods_of_import)\n im_methods_string += ('from_' + methods_of_import[i] + '()')\n print(\"please import by \" + im_methods_string)\n sys.exit(0)", "def __init__(self, items, abort_on_error=False):\n self._items = items\n self.abort_on_error = abort_on_error", "def test_flatten_inventory(self):\n pass", "def test_patchhardwares_item(self):\n pass", "def add_imported(products):\n \n for product in products:\n add_product(product[\"product_name\"], product[\"product_quantity\"], product[\"product_price\"], product[\"date_updated\"])", "def clean_up(self):\n pass", "def clean_up(self):\n pass", "def clean_up_data(self):\n pass", "def test_partial_twice_dependent_object_import(self):\n pass", "def test_twice_dependent_object_import(self):\n pass", "def invalid_items(self) -> ItemLog:\n if self._invalid_items is None:\n self._invalid_items = ItemLog(self.dir / 'dropped-invalid.log.csv', DROPPED_INVALID_FIELDNAMES, 'id')\n return self._invalid_items", "def collate(items):\n # return batch items as a list\n return items", "def NewItems(self) -> _n_1_t_7:", "def fixRotterdamItem(item):\n pywikibot.output(u'Working on %s' % (item.title(),))\n data = item.get()\n claims = data.get('claims')\n if not u'P217' in claims:\n pywikibot.output(u'No inventory number found, skipping')\n return\n if not len(claims.get(u'P217'))==1:\n pywikibot.output(u'Multiple inventory numbers found, skipping')\n return\n invclaim = claims.get(u'P217')[0]\n inventorynumber = invclaim.getTarget()\n if not u'_' in inventorynumber:\n pywikibot.output(u'No _ found in inventory number, skipping')\n return\n newinventorynumber = inventorynumber.replace(u'_', u'-')\n\n if not u'P973' in claims:\n pywikibot.output(u'No url found, skipping')\n return\n if not len(claims.get(u'P973'))==1:\n pywikibot.output(u'Multiple urls found, skipping')\n return\n urlclaim = claims.get(u'P973')[0]\n url = urlclaim.getTarget()\n newurl = url\n\n if not u'collectie.museumrotterdam.nl/objecten/' in url:\n pywikibot.output(u'Invalid url: %s, skipping' % (url,))\n return\n if not url.endswith(newinventorynumber):\n pywikibot.output(u'Url %s and inventory number %s don\\'t match, skipping' % (url,newinventorynumber))\n return\n\n museumpage = requests.get(url)\n if u'Pagina niet gevonden' in museumpage.text:\n newurl = newurl + u'-B'\n pywikibot.output(u'Current url %s broken, trying %s' % (url,newurl ))\n newinventorynumber = newinventorynumber + u'-B'\n museumpage = requests.get(newurl)\n if not u'content=\"Museum Rotterdam - van de stad\">' in museumpage.text:\n pywikibot.output(u'New url did not work, skipping')\n return\n\n summary = u'Fixing Rotterdam Museum'\n if inventorynumber!=newinventorynumber:\n invclaim.changeTarget(newinventorynumber, summary=summary)\n if url !=newurl:\n urlclaim.changeTarget(newurl, summary=summary)", "def run_import(self, expanded, unexpanded) : \n\t\tif not unexpanded :\n\t\t\treturn self.errormessage(\"Needs some filenames to import\")\n\t\tif not self.HasPerms(self.__context, 'Import/Export objects') :\n\t\t\treturn -1\n\t\tfor filename in unexpanded :\n\t\t\tself.__context.manage_importObject(filename)\n\t\t\tself.htmlmessage('%s imported successfully' % filename)", "def test_get_imports(self):\n pass", "def UpdateIds(self):\r\n removed = set(self.item_itemId.keys()) - set(self.data.keys())\r\n for item in removed:\r\n itemId = self.item_itemId[item]\r\n del self.item_itemId[item]\r\n del self.itemId_item[itemId]", "def populate_contents(self):\n raise Exception('Implement me!')", "def test_modify_import_data_5(self):\n self.ticket_dict4[\"type\"] = \"replace\"\n self.ticket_dict4[\"description_field\"] = \"product\"\n self.ticket_dict4[\"eval_mode\"] = \"final\"\n result = tickets.modify_import_data(self.ticket_dict4,\n self.required_keys, self.optional_keys, self.keywords)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"host_genus\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"cluster\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"subcluster\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"annotation_author\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"retrieve_record\"], \"retain\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"annotation_status\"], \"final\")\n with self.subTest():\n self.assertEqual(self.ticket_dict4[\"accession\"], \"retain\")", "def test_get_order_items(self):\n pass", "def discogsImport (discogs_folder):\n\n # Set collection \n collection = user.collection_folders\n\n # Populate import table\n for album in collection[discogs_folder].releases:\n query = None\n\n # Concatenate notes\n hashing_note = None\n if album.notes != None:\n for idx in range(len(album.notes)):\n hashing_note = str(hashing_note) + str(album.notes[idx]['field_id']) + str(album.notes[idx]['value'])\n\n # Hash the notes\n notes_chksum = hashNotes(hashing_note)\n\n # Query instance table for instance\n db_instance = dbq.exec_db_query_dict(dbq.get_instance_info, album.instance_id)\n\n # New items\n if db_instance == None:\n\n # Build insert data\n query_data = {'instance_id': album.instance_id,\n 'rating': album.rating,\n 'title': album.release.title,\n 'folder_id': album.folder_id,\n 'discogs_date_added': album.date_added,\n 'notes': str(album.notes),\n 'notes_chksum': notes_chksum.hexdigest(),\n 'release_id': album.id, \n 'insert_date': datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n query = dbq.add_instance\n dbq.exec_db_query(query, query_data, query_type='insert')\n\n # Test for existing and changed\n elif db_instance['instance_id'] == album.instance_id and \\\n (db_instance['notes_chksum'] != notes_chksum.hexdigest() or \n db_instance['folder_id'] != album.folder_id or \n db_instance['release_id'] != album.id ):\n\n # Update notes if hash is different\n if db_instance['notes_chksum'] != notes_chksum.hexdigest():\n query_data = {'notes': str(album.notes),\n 'notes_chksum': notes_chksum.hexdigest(),\n 'update_date': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), \n 'instance_id': album.instance_id, \n 'release_id': album.id}\n query = dbq.update_instance_notes_chksum\n dbq.exec_db_query(query, query_data, query_type='insert')\n\n # Update folder id\n if db_instance['folder_id'] != album.folder_id:\n query_data = {'folder_id': album.folder_id,\n 'update_date': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), \n 'instance_id': album.instance_id}\n query = dbq.update_instance_folder_id\n dbq.exec_db_query(query, query_data, query_type='insert')\n \n if db_instance['release_id'] != album.id:\n query_data = {'release_id': album.id, \n 'instance_id': album.instance_id}\n query = dbq.update_instance_release_id\n dbq.exec_db_query(query, query_data, query_type='insert')", "def _clean_item_data(self, item):\r\n item['location'] = item['_id']\r\n del item['_id']", "def setUp(self):\n self.new_inv_item = ['1', 'Knife Set', 10, 'n', 'n']\n self.new_furn_item = ['2', 'Couch', 25, 'y', 'Cloth', 'L']\n self.new_elec_item = ['3', 'Dryer', 100, 'n', 'y', 'Samsung', 12]", "def clean_up_inventory(self):\n self.inventory = [i for i in self.inventory if i.quantity != 0]", "def dispatch_items_randomly(self, level):\n for item in self.list:\n item.position = Item.define_random_position(item, level)", "def fix_item(d,v,metadata):\n\n id_to_name,nominalvars=setup_itemid_dict()\n vname=id_to_name[v]\n # variables that need to have one subtracted\n subtract_one=['ArrestedChargedLifeCount','ChildrenNumber',\n 'RelationshipNumber','TrafficAccidentsLifeCount',\n 'TrafficTicketsLastYearCount','RentOwn']\n if vname in subtract_one:\n tmp=[int(i) for i in d]\n d.iloc[:]=numpy.array(tmp)-1\n print('subtrated one:',v,vname)\n\n metadata=metadata_subtract_one(metadata)\n\n # replace zero for \"prefer not to say\" with nan\n replace_zero_with_nan=['CarDebt','CreditCardDebt','EducationDebt',\n 'MortgageDebt','OtherDebtAmount']\n if vname in replace_zero_with_nan:\n tmp=numpy.array([float(i) for i in d.iloc[:]])\n tmp[tmp==0]=numpy.nan\n d.iloc[:]=tmp\n print('replaced %d zeros with nan:'%numpy.sum(numpy.isnan(tmp)),v,vname)\n metadata=metadata_replace_zero_with_nan(metadata)\n\n # replace 2 for \"no\" with zero\n change_two_to_zero_for_no=['RetirementAccount']\n if vname in change_two_to_zero_for_no:\n tmp=numpy.array([float(i) for i in d.iloc[:]])\n tmp[tmp==2]=0\n d.iloc[:]=tmp\n print('changed two to zero for no:',v,vname)\n metadata=metadata_change_two_to_zero_for_no(metadata)\n\n return d,metadata", "def test_modify_import_data_1(self):\n result = tickets.modify_import_data(self.ticket_dict2,\n self.required_keys, self.optional_keys, self.keywords)\n self.assertFalse(result)", "def convert_items(self, items):\n if self.prewrap_fn:\n items = [self.prewrap_fn(i) for i in items]\n\n if self.wrap:\n items = self.wrap_items(items)\n else:\n # make a copy of items so the converted items can be mutated without\n # changing the original items\n items = items[:]\n return items", "def setUp(self):\n client = utils.create_test_datastore_client()\n self.resource = import_attempt.ImportAttemptByID(client)\n list_resource = import_attempt_list.ImportAttemptList(client)\n run_list_resource = system_run_list.SystemRunList(client)\n attempts = [{\n _ATTEMPT.provenance_url: 'google.com'\n }, {\n _ATTEMPT.provenance_url: 'facebook.com'\n }, {\n _ATTEMPT.provenance_url: 'bing.com'\n }]\n self.attempts = utils.ingest_import_attempts(run_list_resource,\n list_resource, attempts)", "def items(items_json_folder, locations_json):\n output_filepath = os.path.join(\n current_app.config['CDS_MIGRATOR_KIT_LOGS_PATH'],\n 'items_{0}.json'\n )\n\n with open(locations_json, 'r') as fp_locations:\n locations = json.load(fp_locations)\n internal_locations = locations['internal_locations']\n\n total_import_records = 0\n total_migrated_records = 0\n _files = glob.glob(os.path.join(items_json_folder, \"*.json\"))\n for i, items_json in enumerate(_files):\n _log = \"Importing #{0} file\".format(i)\n logger.info(_log)\n click.secho(_log, fg='yellow')\n\n with open(items_json, 'r') as fp_items:\n items = json.load(fp_items)\n total_import_records += len(items)\n\n records = ItemsMigrator(items, internal_locations).migrate()\n total_migrated_records += len(records)\n\n with open(output_filepath.format(i), 'w') as fp:\n json.dump(records, fp, indent=2)\n\n _log = \"Total number of migrated records: {0}/{1}\".format(\n total_migrated_records, total_import_records)\n logger.info(_log)\n\n click.secho(_log, fg='green')", "def importer():\n\n #Lager liste der eg legg transaksjonar som blir henta og ikkje laga:\n get_list = []\n\n #Gjer txt-fila i mappen om til csv-fil\n file_fixer()\n\n with open(out_path) as file:\n reader = csv.reader(file)\n r_0 = next(reader)\n r_0.append(\"type\")\n r_0.append('amount')\n r_0.append('category')\n r_0.append('account')\n r_0.append('project')\n\n\n for row in reader:\n #Legger til dei fire kollonenne (amount, account, subaacount, project), tomme.\n row.append(\"\")\n row.append(\"\")\n\n #Omformatterer rader:\n row = format_fix(row)\n row.append(\"\")\n row.append(\"\")\n row.append(\"\")\n print(row)\n\n\n try:\n obj, created = Transaction.objects.get_or_create(\n date=row[0],\n transaction_type=row[1],\n description=row[2],\n amount=row[3]\n )\n\n except Transaction.MultipleObjectsReturned:\n continue\n\n if not created:\n get_list.append(obj.pk)\n\n return get_list", "def _import(self, __button):\r\n# WARNING: Refactor _import; current McCabe Complexity metric = 18.\r\n Widgets.set_cursor(self.modulebook.mdcRTK, gtk.gdk.WATCH)\r\n\r\n _import_errors = 0\r\n self._import_log.info('The following records could not be imported to '\r\n 'the open RTK database:\\n')\r\n\r\n # Find the number of existing incidents.\r\n if Configuration.BACKEND == 'mysql':\r\n _query = \"SELECT COUNT(*) FROM rtk_incident\"\r\n elif Configuration.BACKEND == 'sqlite3':\r\n _query = \"SELECT COALESCE(MAX(fld_incident_id)+1, 0) \\\r\n FROM rtk_incident\"\r\n (_num_incidents, _error_code, __) = self._dao.execute(_query,\r\n commit=False)\r\n for i in range(len(self._file_contents) - 1):\r\n _contents = []\r\n\r\n for j in range(len(self._file_index)):\r\n if self._file_index[j] == -1:\r\n _contents.append('')\r\n else:\r\n try:\r\n _contents.append(\r\n self._file_contents[i][self._file_index[j]])\r\n except IndexError:\r\n _contents.append('')\r\n\r\n _contents[14] = _contents[14].replace('$', '')\r\n\r\n # Remove any single and double quotes from the description and\r\n # remarks fields.\r\n for j in[4, 5, 8]:\r\n _contents[j] = _contents[j].replace('\\'', '')\r\n _contents[j] = _contents[j].replace('\\\"', '')\r\n\r\n # Remove any commas that may be in numerical fields.\r\n for j in [12, 14, 15]:\r\n _contents[j] = _contents[j].replace(',', '')\r\n\r\n # Convert all the date fields to ordinal dates.\r\n for j in [19, 22, 25, 28]:\r\n _contents[j] = Utilities.date_to_ordinal(_contents[j])\r\n\r\n # Convert missing values to correct default value.\r\n for j in [0, 1, 2, 3, 6, 7, 13, 15, 18, 20, 21, 23, 24, 26, 27,\r\n 29, 31, 32, 35, 36, 37, 38, 39]:\r\n try:\r\n _contents[j] = Utilities.missing_to_default(\r\n int(_contents[j]), 0)\r\n except ValueError:\r\n _contents[j] = 0\r\n\r\n for j in [16, 17]:\r\n try:\r\n _contents[j] = Utilities.missing_to_default(\r\n int(_contents[j]), -1)\r\n except ValueError:\r\n _contents[j] = -1\r\n\r\n for j in [12, 14, 33]:\r\n try:\r\n _contents[j] = Utilities.missing_to_default(\r\n float(_contents[j]), 0.0)\r\n except ValueError:\r\n _contents[j] = 0.0\r\n\r\n for j in [9, 34]:\r\n try:\r\n _contents[j] = Utilities.missing_to_default(\r\n int(_contents[j]), 1)\r\n except ValueError:\r\n _contents[j] = 1\r\n\r\n if _contents[1] == 0 or _contents[1] is None or _contents[1] == '':\r\n _contents[1] = _num_incidents[0][0] + i + 1\r\n\r\n _query = \"INSERT INTO rtk_incident \\\r\n (fld_revision_id, fld_incident_id, \\\r\n fld_incident_category, fld_incident_type, \\\r\n fld_short_description, fld_long_description, \\\r\n fld_criticality, fld_detection_method, fld_remarks, \\\r\n fld_status, fld_test_found, fld_test_case, \\\r\n fld_execution_time, fld_unit, fld_cost, \\\r\n fld_incident_age, fld_hardware_id, fld_sftwr_id, \\\r\n fld_request_by, fld_request_date, fld_reviewed, \\\r\n fld_reviewed_by, fld_reviewed_date, fld_approved, \\\r\n fld_approved_by, fld_approved_date, fld_complete, \\\r\n fld_complete_by, fld_complete_date, fld_life_cycle, \\\r\n fld_analysis, fld_accepted) \\\r\n VALUES ({0:d}, {1:d}, {2:d}, {3:d}, '{4:s}', '{5:s}', \\\r\n {6:d}, {7:d}, '{8:s}', {9:d}, '{10:s}', \\\r\n '{11:s}', {12:f}, {13:d}, {14:f}, {15:d}, \\\r\n {16:d}, {17:d}, {18:d}, {19:d}, {20:d}, \\\r\n {21:d}, {22:d}, {23:d}, {24:d}, {25:d}, \\\r\n {26:d}, {27:d}, {28:d}, {29:d}, '{30:s}', \\\r\n {31:d})\".format(_contents[0], _contents[1],\r\n _contents[2], _contents[3],\r\n _contents[4], _contents[5],\r\n _contents[6], _contents[7],\r\n _contents[8], _contents[9],\r\n _contents[10], _contents[11],\r\n _contents[12], _contents[13],\r\n _contents[14], _contents[15],\r\n _contents[16], _contents[17],\r\n _contents[18], _contents[19],\r\n _contents[20], _contents[21],\r\n _contents[22], _contents[23],\r\n _contents[24], _contents[25],\r\n _contents[26], _contents[27],\r\n _contents[28], _contents[29],\r\n _contents[30], _contents[31])\r\n (_results,\r\n _error_code, __) = self._dao.execute(_query, commit=True)\r\n\r\n if _error_code == 0:\r\n _query = \"INSERT INTO rtk_incident_detail \\\r\n (fld_incident_id, fld_component_id, \\\r\n fld_age_at_incident, fld_failure, fld_suspension, \\\r\n fld_cnd_nff, fld_occ_fault, \\\r\n fld_initial_installation, fld_interval_censored) \\\r\n VALUES ({0:d}, {1:d}, {2:f}, {3:d}, \\\r\n {4:d}, {5:d}, {6:d}, {7:d}, \\\r\n {8:d})\".format(_contents[1], _contents[32],\r\n _contents[33], _contents[34],\r\n _contents[35], _contents[36],\r\n _contents[37], _contents[38],\r\n _contents[39])\r\n (_results,\r\n _error_code, __) = self._dao.execute(_query, commit=True)\r\n else:\r\n self._import_log.info('{0:d} - {1:s}'.format(_contents[1],\r\n _contents[4]))\r\n _import_errors += 1\r\n\r\n if _import_errors > 0:\r\n Widgets.rtk_information(_(u\"Error importing {0:d} program \"\r\n u\"incidents. Refer to the import log \"\r\n u\"{1:s} for more details.\").format(\r\n _import_errors, self._import_log))\r\n\r\n Widgets.set_cursor(self.modulebook.mdcRTK, gtk.gdk.LEFT_PTR)\r\n\r\n # Reload the Incident class gtk.TreeView().\r\n self._modulebook.request_load_data(self._dao, self._revision_id)\r\n\r\n return False", "def _clean_updated_items(sender, **kwargs):\n json_values = kwargs[\"json_values\"]\n stash = kwargs[\"stash\"]\n spot = kwargs[\"spot\"]\n\n stash[\"items_to_delete\"] = []\n stash[\"item_ei_to_delete\"] = []\n\n # if we don't have any json items, then return\n if spot is None or \"updated_items\" not in stash:\n return\n\n updated_items = stash[\"updated_items\"]\n\n updated_item_models = []\n updated_item_models_to_stash = {}\n\n # create the lists of items to delete\n items_to_delete = []\n item_ei_to_delete = []\n\n # get the old items\n old_items = spot.item_set.all()\n\n # create item models so we can use a hashmap for matching\n for item in updated_items:\n item_json = item.get_json()\n item_model = Item(\n name=item_json[\"name\"],\n item_category=item_json[\"item_category\"],\n item_subcategory=item_json[\"item_subcategory\"],\n id=item_json[\"id\"],\n spot=spot,\n )\n updated_item_models.append(item_model)\n updated_item_models_to_stash[item_model] = item\n\n # create a hashmap to match old to new, by using old:old\n lookup_hashmap = {}\n\n for old_item in old_items:\n lookup_hashmap[old_item] = old_item\n\n equality_hashmap = {}\n # create a hashmap matching new to old\n for updated_item in updated_item_models:\n if updated_item in lookup_hashmap:\n equality_hashmap[updated_item] = lookup_hashmap.pop(updated_item)\n\n # we should delete any missing from the PUT\n for item_to_delete in lookup_hashmap:\n items_to_delete.append(item_to_delete)\n\n # find items that haven't been updated and remove them\n for (\n updated_item_model,\n old_item,\n ) in equality_hashmap.items():\n\n updated_item = updated_item_models_to_stash[updated_item_model]\n updated_item_form = updated_item.get_form()\n updated_item.set_instance(old_item)\n updated_item_ei = updated_item.get_ei_forms()\n\n # clean up the EI\n old_ei_set = old_item.itemextendedinfo_set.all()\n ei_to_remove = clean_ei(old_ei_set, updated_item_ei)\n item_ei_to_delete += ei_to_remove\n\n # get rid of items that are all the same without EI\n if (\n updated_item_model.name == old_item.name\n and updated_item_model.item_category == old_item.item_category\n and updated_item_model.item_subcategory\n == old_item.item_subcategory\n and len(updated_item_ei) == 0\n ):\n updated_items.remove(updated_item)\n\n stash[\"items_to_delete\"] = items_to_delete\n stash[\"item_ei_to_delete\"] = item_ei_to_delete", "def ImportarTipoItem(request, id_fase, id_tipoitem):\n tipoItemExistente = TipoItem.objects.get(id=id_tipoitem)\n tipoItemNuevo = TipoItem.objects.get(id=id_tipoitem)\n fase = Fase.objects.get(id=id_fase)\n\n tipoItemNuevo.id = None\n tipoItemNuevo.fase = fase\n try:\n tipoItemNuevo.save()\n except IntegrityError as e:\n return render(request, \"keyduplicate_tipoitem.html\",\n {'fase': fase, 'tipoitem': tipoItemNuevo, \"message\": e.message},\n context_instance=RequestContext(request))\n\n atributos = AtributoTipoItem.objects.filter(tipoitem=tipoItemExistente)\n for atributo in atributos:\n atributo.id = None\n atributo.tipoitem = tipoItemNuevo\n atributo.save()\n return HttpResponseRedirect('/admin/todo/tipoitem/' + str(tipoItemNuevo.id))", "def update(self):\n # convert the text list of item identifiers into a list of parsed identifiers\n item_identifiers = filter(None, self.packing_list.list_items.replace('\\r', '').split('\\n'))\n # loop through list of parsed identifiers\n for item_identifier in item_identifiers:\n # 1. get the 'item' instance for this identifier and update it (e.g. SubjectRequisition, Aliquot)\n # 2. create a 'packing_list_item' instance related to this packing_list\n for item_model in self.packing_list.item_models:\n try:\n try:\n item = item_model.objects.get(specimen_identifier=item_identifier)\n optional_attrs = {'panel': item.panel, 'item_priority': item.priority}\n except FieldError:\n item = item_model.objects.get(aliquot_identifier=item_identifier)\n optional_attrs = {}\n user = self.user or item.user_modified\n self._update_item(item, user)\n self._create_or_update_packinglistitem(\n item_identifier,\n item,\n user,\n optional_attrs=optional_attrs)\n except item_model.DoesNotExist:\n pass", "def items():", "def import_items_from_json(dump_file, include, rectype=\"item\"):\n dump_file = dump_file[0]\n model, provider = model_provider_by_rectype(rectype)\n\n include_ids = None if include is None else include.split(',')\n with click.progressbar(json.load(dump_file)) as bar:\n records = []\n for record in bar:\n click.echo('Importing item \"{0}({1})\"...'.\n format(record['barcode'], rectype))\n if include_ids is None or record['barcode'] in include_ids:\n\n internal_location_pid_value = \\\n get_internal_location_by_legacy_recid(\n record[\"id_crcLIBRARY\"]).pid.pid_value\n\n record[\"internal_location_pid\"] = internal_location_pid_value\n try:\n record[\"document_pid\"] = get_document_by_legacy_recid(\n record[\"id_bibrec\"]).pid.pid_value\n except DocumentMigrationError:\n continue\n try:\n clean_item_record(record)\n except ItemMigrationError:\n continue\n record = import_record(record, model, provider,\n legacy_id_key='barcode')\n if record:\n records.append(record)\n # Index all new item records\n bulk_index_records(records)", "def reloadItem(self):\n # extract all selected item\n itms = []\n for item in self.scene.selectedItems():\n if isinstance(item, DiagramItem):\n if item.diagramType in [ DiagramItem.Do, DiagramItem.Adapter ]:\n itms.append(item)\n\n # update it with original\n for item in itms:\n if item.diagramType in [ DiagramItem.Do, DiagramItem.Adapter ]:\n funName = item.data['item-data']['data']['function']\n mainName = item.data['item-data']['data']['main-name']\n adpName, clsName = mainName.split('::')\n \n adapters = self.helper.helpAdapters()\n for adp in adapters:\n for cls in adp['classes']:\n if adp['name'] == adpName and cls['name'] == clsName:\n for fct in cls['functions']:\n if fct['name'] == funName:\n # read data and update it\n argsFct = self.parseDocString(docstring=fct['desc'])\n argsFct['function'] = fct['name']\n argsFct['main-name'] = mainName\n if 'default-args' in fct:\n self.addDefaultValues(defaultValues=fct['default-args'], currentFunction=argsFct)\n\n item.data['item-data']['data'] = argsFct", "def test_import_order():\n file_paths = glob.iglob('*/*.py')\n for file_path in file_paths:\n with open(file_path, 'r') as file_obj:\n file_contents = file_obj.read()\n new_file_contents = isort.code(file_contents)\n fail_msg = '{} imports are not compliant'.format(\n file_path)\n yield case.assertEqual, new_file_contents, file_contents, fail_msg", "def test_ensureFailsWhenImported(self):\n module = object()\n modules = {\"m2\": module}\n self.patch(sys, \"modules\", modules)\n e = self.assertRaises(\n ImportError,\n ensureNotImported,\n [\"m1\", \"m2\"],\n \"A message.\",\n preventImports=[\"m1\", \"m2\"],\n )\n self.assertEqual(modules, {\"m2\": module})\n self.assertEqual(e.args, (\"A message.\",))", "def save_modified_imports(self):\n # apply changed imports.\n imports_range = self.pe_manager.get_imports_range_in_structures()\n self.PE.__structures__[imports_range[0]:imports_range[1]] = \\\n self.import_structures", "def _handle_load_unknown(self, data, original):\n for key, val in original.items():\n if key not in self.fields:\n data[key] = val\n return data", "def calculate_main_install_items(self):\n # utils.add_to_actions_stack(\"calculating main items to install\")\n if \"MAIN_INSTALL_TARGETS\" not in config_vars:\n raise ValueError(\"'MAIN_INSTALL_TARGETS' was not defined\")\n\n self.main_install_targets.extend(list(config_vars[\"MAIN_INSTALL_TARGETS\"]))\n main_iids, main_guids = utils.separate_guids_from_iids(self.main_install_targets)\n iids_from_main_guids, orphaned_main_guids = self.items_table.iids_from_guids(main_guids)\n main_iids.extend(iids_from_main_guids)\n main_iids, update_iids = self.resolve_special_build_in_iids(main_iids)\n # this is a second time we run this commadn since it's possible more items were added\n main_iids, orphaned_main_iids = self.items_table.iids_from_iids(main_iids)\n update_iids, orphaned_update_iids = self.items_table.iids_from_iids(update_iids)\n\n config_vars[\"__MAIN_INSTALL_IIDS__\"] = sorted(main_iids)\n config_vars[\"__MAIN_UPDATE_IIDS__\"] = sorted(update_iids)\n config_vars[\"__ORPHAN_INSTALL_TARGETS__\"] = sorted(orphaned_main_guids+orphaned_main_iids+orphaned_update_iids)\n\n self.update_mode = \"__REPAIR_INSTALLED_ITEMS__\" in self.main_install_targets", "def _fix_up(self, cls, code_name):", "def test_ordering(self):\r\n def verify_order(source_usage_key, parent_usage_key, source_position=None):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key)\r\n parent = self.get_item_from_modulestore(parent_usage_key)\r\n children = parent.children\r\n if source_position is None:\r\n self.assertFalse(source_usage_key in children, 'source item not expected in children array')\r\n self.assertEqual(\r\n children[len(children) - 1],\r\n usage_key,\r\n \"duplicated item not at end\"\r\n )\r\n else:\r\n self.assertEqual(\r\n children[source_position],\r\n source_usage_key,\r\n \"source item at wrong position\"\r\n )\r\n self.assertEqual(\r\n children[source_position + 1],\r\n usage_key,\r\n \"duplicated item not ordered after source item\"\r\n )\r\n\r\n verify_order(self.problem_usage_key, self.seq_usage_key, 0)\r\n # 2 because duplicate of problem should be located before.\r\n verify_order(self.html_usage_key, self.seq_usage_key, 2)\r\n verify_order(self.seq_usage_key, self.chapter_usage_key, 0)\r\n\r\n # Test duplicating something into a location that is not the parent of the original item.\r\n # Duplicated item should appear at the end.\r\n verify_order(self.html_usage_key, self.usage_key)", "def module_cleanup():\n from bokeh.core.has_props import _default_resolver\n to_reset = list(panel_extension._imports.values())\n\n _default_resolver._known_models = {\n name: model for name, model in _default_resolver._known_models.items()\n if not any(model.__module__.startswith(tr) for tr in to_reset)\n }", "def delete_error():\r\n item = core.get_all_items()\r\n for i in item:\r\n if \"Error\" in i or \"Warning\" in i:\r\n if core.does_item_exist(i):\r\n reset_error(i)", "def test_short_items_short_input_files(mock_logger, items):\n with pytest.raises(ValueError, match=VALUE_ERROR_MSG):\n _run_preproc_function(failing_function, items, KWARGS,\n input_files=SHORT_INPUT_FILES)\n assert len(mock_logger.mock_calls) == 2\n\n # Debug call\n assert_debug_call_ok(mock_logger, items)\n assert mock_logger.debug.call_args[0][3] == (\n \"\\nloaded from original input file(s)\\n['x', 'y', 'z', 'w']\")\n\n # Error call\n assert_error_call_ok(mock_logger)\n error_call_args = mock_logger.error.call_args[0]\n if isinstance(items, (PreprocessorFile, Cube, str)):\n assert repr(items) in error_call_args[2]\n else:\n for item in items:\n assert repr(item) in error_call_args[2]\n assert \"further argument(s) not shown here;\" not in error_call_args[2]\n assert error_call_args[3] == (\n \"\\nloaded from original input file(s)\\n['x', 'y', 'z', 'w']\")", "def clean_up(self):\n # TODO: Implement if needed\n pass", "def _import(self, data):\n if isinstance(data, dict):\n if len(data):\n for key in data:\n if data.get(key) is not None:\n if not self.set(key, data.get(key)):\n raise Exception('%s %s icin dogru bir veri degil.' % (data.get(key), key))", "def remove_collector_imports(self):\n with open(self.filename, \"r+\") as code_file:\n content = code_file.read()\n # Delete file content so the file won't be a mess\n code_file.seek(0)\n code_file.truncate()\n # clean_content will store the content without the import lines.\n clean_content = content\n collector_import_lines = f\"{self.IMPORT_COLLECTOR_LINE}\\n{self.EXPLICIT_DECLARATION_IMPORTS_LINE}\\n\\n\"\n if content.startswith(collector_import_lines):\n logger.debug(\"Removing added import lines.\")\n # Split the content to the parts before and after the collector_import_lines\n content_parts = content.split(collector_import_lines)\n # Restore content to previous form and ignore the first found import lines.\n clean_content = f\"{collector_import_lines}\".join(content_parts[1:])\n\n code_file.write(clean_content)" ]
[ "0.64752066", "0.6060498", "0.5853492", "0.5822626", "0.57927734", "0.5723781", "0.57142884", "0.56797194", "0.56612074", "0.56152695", "0.5584632", "0.55636257", "0.5530712", "0.5489828", "0.5485509", "0.5458437", "0.5394774", "0.5328444", "0.53041655", "0.5287095", "0.5284721", "0.52845454", "0.5269172", "0.52674085", "0.5243959", "0.52145135", "0.5194377", "0.5156987", "0.515584", "0.51274276", "0.51132625", "0.51017827", "0.50957763", "0.5091476", "0.5080139", "0.507268", "0.50551593", "0.50543237", "0.5051382", "0.5033742", "0.5028692", "0.50275016", "0.50222236", "0.50122446", "0.5010752", "0.5005259", "0.5003782", "0.49826384", "0.4981368", "0.4974488", "0.4971039", "0.49702698", "0.49688682", "0.49654138", "0.49565282", "0.49565282", "0.4939715", "0.49274272", "0.4922834", "0.49191672", "0.49189895", "0.4907267", "0.49066073", "0.49020326", "0.4880229", "0.48766243", "0.48612761", "0.48607534", "0.48591042", "0.48575544", "0.48480842", "0.4845487", "0.48437795", "0.48396727", "0.4836956", "0.483195", "0.4828167", "0.48256314", "0.48252264", "0.48242253", "0.48210377", "0.4815789", "0.48096", "0.4802242", "0.47996342", "0.4788858", "0.47872904", "0.4786295", "0.47837564", "0.47797608", "0.47789365", "0.47783816", "0.47750458", "0.47680998", "0.47647694", "0.47641262", "0.4759375", "0.47507632", "0.4738848", "0.47378817" ]
0.5501716
13
Download a zipped project from Dash.
def _download_project(name, apikey): payload = {'apikey': apikey, 'project': name, 'version': 'portia'} r = requests.get(DASH_API_URL + 'as/project-slybot.zip', params=payload) return r.content
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_data():\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()", "def x_download():\n\t#_loadconfig()\n\tconf = _get_config()\n\t#print conf['xplane']\n\tdownload_url = conf['xplane']['download']\n\tlocal(\"wget -P %s %s\" % (navimport.conf.work_dir(\"/xplane_zips\"), download_url))", "def download_project_archive(request, **kwargs):\n project = kwargs.get(\"project\")\n if request.user.is_authenticated and request.user == project.user:\n filename = project.create_downloadable_archive()\n file_handle = open(filename, \"rb\")\n response = FileResponse(file_handle)\n\n response[\"Content-Length\"] = os.path.getsize(filename)\n response[\n \"Content-Disposition\"\n ] = 'attachment; filename=\"{}.zip\"'.format(project.name)\n\n return response\n else:\n raise PermissionDenied", "def download(self):\n logger.info(f\"downloading project {self}\")\n self.project.storage.download(f\"{self.path}/releasemanifest\", None)\n self.extract()", "def download_zip(self, path: Path) -> Path:\n if not self.url:\n raise ValueError(\"Release must have a valid url to download the zip.\")\n\n with requests.get(self.url, stream=True) as response:\n with open(path, \"wb\") as download_file:\n shutil.copyfileobj(response.raw, download_file)\n\n return path", "def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None", "def _download(url, outpath=None, dirname=None, branch='master', release=None):\n six.print_('downloading...')\n outfolder = outpath or os.getcwd()\n file, archive_url = get_archive_url(url, branch, release)\n six.print_(archive_url)\n if dirname:\n outfolder = \"{}/{}.zip\".format(outfolder, dirname)\n return file, wget.download(archive_url, out=outfolder)", "def download():\n env_banner()\n\n download_data = Download()\n download_data()\n click.echo('Download done.')", "def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)", "def download_dependency_url(name, url, temp_path, build_path, config, zip=True):\n parsed = urlparse(url)\n fn = os.path.basename(parsed.path)\n target_name = os.path.join(temp_path, fn)\n logger.info(f\"Downloading {url} to {target_name}\")\n\n download_file(url, target_name)\n\n if zip:\n with zipfile.ZipFile(target_name, \"r\") as z:\n z.extractall(build_path)\n else:\n shutil.copy(target_name, os.path.join(build_path, \"GameData\"))", "def download(uri: str) -> None:\n logger = logging.getLogger(__name__)\n logger.info('Download the dataset')\n\n # create destination dirs\n destination = project_dir / 'data' / 'raw'\n destination.mkdir(exist_ok=True, parents=True)\n\n # download the file\n urllib.request.urlretrieve(uri, destination / \"original.zip\")", "def download():\n response = requests.get(URL, stream=True)\n\n file = open(FILE_NAME, 'wb')\n file.write(response.content)\n\n with zipfile.ZipFile(FILE_NAME, 'r') as zip_ref:\n zip_ref.extractall()\n\n file.close()\n os.remove(FILE_NAME)", "def _Download( self ):\n self._DownloadPipe += PackageUtil.ExecuteSimpleCommand( \"git\", [\"clone\", \"git@github.com:mastbaum/avalanche.git\", self.GetInstallPath()], None, os.getcwd() )\n return", "def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise", "def download(self):\n cmd = mccli() + \" d f \" + self.localpath + \" -p \" + self.project.name\n \n set_cli_remote(self.project.remote)\n \n child = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = child.communicate()\n return CLIResult(out, err, child.returncode)", "def _download(self):\n self._system.download(\"http://geant4.web.cern.ch/geant4/support/source/\" + self._tar_name)", "def download(project, unpack=True, project_dir=None, parent_dir=None, site=None, username=None, token=None):\n download_status = client._download(project,\n project_dir=project_dir,\n parent_dir=parent_dir,\n site=site,\n username=username,\n token=token)\n if unpack and download_status:\n unpack_status = unarchive(download_status.filename, project_dir=project_dir, parent_dir=parent_dir)\n if unpack_status:\n print(unpack_status.status_description)\n return download_status", "def download():\n raise NotImplementedError", "def _download_from_web(*, ds_name: str, ds_path: Path):\n import cgi\n import zipfile\n import httpx\n from tqdm import tqdm\n\n url = DATASET_OPTIONS[ds_name]['web']\n if ds_path.exists():\n print('Dataset directory already exists; remove it if you wish to '\n 're-download the dataset')\n return\n\n ds_path.mkdir(parents=True, exist_ok=True)\n\n with httpx.Client() as client:\n with client.stream('GET', url=url) as response:\n if not response.is_error:\n pass # All good!\n else:\n raise RuntimeError(\n f'Error {response.status_code} when trying '\n f'to download {url}')\n\n\n header = response.headers['content-disposition']\n _, params = cgi.parse_header(header)\n # where to store the archive\n outfile = ds_path / params['filename']\n remote_file_size = int(response.headers['content-length'])\n\n with open(outfile, mode='wb') as f:\n with tqdm(desc=params['filename'], initial=0,\n total=remote_file_size, unit='B',\n unit_scale=True, unit_divisor=1024,\n leave=False) as progress:\n num_bytes_downloaded = response.num_bytes_downloaded\n\n for chunk in response.iter_bytes():\n f.write(chunk)\n progress.update(response.num_bytes_downloaded -\n num_bytes_downloaded)\n num_bytes_downloaded = (response\n .num_bytes_downloaded)\n\n assert outfile.suffix == '.zip'\n\n with zipfile.ZipFile(outfile) as zip:\n for zip_info in zip.infolist():\n path_in_zip = Path(zip_info.filename)\n # omit top-level directory from Zip archive\n target_path = str(Path(*path_in_zip.parts[1:]))\n if str(target_path) in ('.', '..'):\n continue\n if zip_info.filename.endswith('/'):\n (ds_path / target_path).mkdir(parents=True, exist_ok=True)\n continue\n zip_info.filename = target_path\n print(f'Extracting: {target_path}')\n zip.extract(zip_info, ds_path)\n\n outfile.unlink()", "def download():\n try:\n response = send_from_directory(\n app.config.get(\"DATA_DIR\"), \"whiteboard.zip\", as_attachment=True\n )\n\n # change headers to stop browser from delivering cached version\n response.headers[\"Last-Modified\"] = datetime.now()\n response.headers[\n \"Cache-Control\"\n ] = \"no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0\"\n response.headers[\"Pragma\"] = \"no-cache\"\n response.headers[\"Expires\"] = \"-1\"\n\n return response\n\n except:\n return traceback.format_exc()", "def download(urls, dest_folder):\n pass", "def download(ctx: click.Context, **kwargs):\n root_commands.cmd_download(ctx.obj, **kwargs)", "def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)", "def download(url, to):\n filename = url.rstrip('/').split('/')[-1] + '.zip'\n r = requests.get(url, stream=True)\n\n outpath = os.path.join(to, filename)\n\n with open(outpath, 'wb') as fd:\n for chunk in r.iter_content(1024 * 1024):\n fd.write(chunk)\n\n return outpath", "def download_and_unzip_dataset(url, path):\n dl = urllib.urlretrieve(url)\n zf = zipfile.ZipFile(dl[0])\n zf.extractall(path)\n return zf", "def download_data():\n\n if not os.path.exists(zipfile_path):\n print(f'Downloading {config.download_url} to {zipfile_path}')\n urlretrieve(config.download_url, zipfile_path)\n print(f'Successfully downloaded {zipfile_path}')\n\n zip_ref = ZipFile(zipfile_path, 'r')\n zip_ref.extractall(config.raw_data_dir)\n zip_ref.close()\n\n os.rename(f\"{config.raw_data_dir}/cornell movie-dialogs corpus\", extracted_dir)", "def download_dataset():\n \n ID = \"1-3_oB5iSF-c_V65-uSdUlo024NzlgSYZ\"\n script1 = f\"\"\"\n wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id='{ID} -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=\"{ID} -O Data.zip && rm -rf /tmp/cookies.txt\n \"\"\"\n script2 = \"\"\"unzip Data.zip\"\"\"\n\n os.system(script1)\n os.system(script2)", "def download_archive(self):\n\n def time_convert(structure):\n \"\"\"\n :param structure: tuple representation of time\n :return: GitHub archive time\n \"\"\"\n \n \n join_number_to_zero = lambda number: (\"\" if number > 9 else \"0\") + str(number)\n\n return \"%s-%s-%s-%s\" % (\n structure.tm_year, join_number_to_zero(structure.tm_mon), join_number_to_zero(structure.tm_mday),\n structure.tm_hour)\n\n current_time = self.get_time()\n self.logger.debug(__name__ + \": \" + \"current time: \" + str(gmtime(current_time)))\n\n difference = -25200\n #timezone difference in seconds between GMT and west coast of USA\n\n downloading_time = int(timegm(self.config[\"last_connection_time\"])) + 3600\n self.logger.debug(__name__ + \": \" + \"downloading time: \" + str(gmtime(downloading_time)))\n\n if downloading_time > current_time - 7200:\n self.logger.info(__name__ + \": \" + \"unable to download file (time limiting).\")\n return\n\n downloading_time += difference\n\n json_file_name = self.download_file(time_convert(gmtime(downloading_time)))\n\n self.config[\"last_connection_time\"] = gmtime(downloading_time - difference)\n self.logger.debug(__name__ + \": \" + \"last_connection_time: \" + str(self.config[\"last_connection_time\"]))\n\n return json_file_name", "def unzip() -> None:\n logger = logging.getLogger(__name__)\n logger.info('Download the dataset')\n\n # define the destination\n destination = project_dir / 'data' / 'raw'\n\n # extract zip\n zip_file = ZipFile(destination / \"original.zip\")\n zip_file.extractall(destination)", "def download():\n\treturn response.download(request, db)", "def _download(self) -> None:\n download_url(\n self.url,\n self.root,\n filename=self.data_dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download(self, download_path):\n return", "def download_one_zip(data_url, data_dir):\r\n\r\n zipfile_path, unzip_dir = zip_file_name_from_url(data_url, data_dir)\r\n if not is_there_file(zipfile_path, unzip_dir):\r\n if not os.path.isdir(unzip_dir):\r\n os.makedirs(unzip_dir)\r\n r = requests.get(data_url, stream=True)\r\n with open(zipfile_path, \"wb\") as py_file:\r\n for chunk in r.iter_content(chunk_size=1024): # 1024 bytes\r\n if chunk:\r\n py_file.write(chunk)\r\n unzip_nested_zip(zipfile_path, unzip_dir), download_small_file", "def fetch(data_dir):\n file_path = os.path.join(data_dir, DESTINATION, ZIP_NAME)\n result_path = os.path.join(data_dir, DESTINATION, NAME)\n return utils.fetch(URL, file_path, result_path)", "def download_and_extract(self, package_name):\n self.download(package_name)\n self.extract(package_name)", "def download_data():\n url = 'https://www.dropbox.com/s/xk4glpk61q3qrg2/imdb.tgz?dl=1'\n urllib.request.urlretrieve(url, 'imdb.tgz')\n tar = tarfile.open(\"imdb.tgz\")\n tar.extractall()\n tar.close()", "def download_data():\n url = 'https://www.dropbox.com/s/8oehplrobcgi9cq/imdb.tgz?dl=1'\n urllib.request.urlretrieve(url, 'imdb.tgz')\n tar = tarfile.open(\"imdb.tgz\")\n tar.extractall()\n tar.close()", "def _download(data_folder): # pragma: no cover\n\n logger.info(f\"Downloading {SOURCE_URL}.\")\n\n with urlopen(SOURCE_URL) as zipresp:\n with zipfile.ZipFile(io.BytesIO(zipresp.read())) as zfile:\n zfile.extractall(data_folder)", "def _download_zip(self, zip_url, dest_dir):\n # TODO(jsirois): Wrap with workunits, progress meters, checksums.\n self.context.log.info('Downloading {}...'.format(zip_url))\n sess = requests.session()\n sess.mount('file://', self.LocalFileAdapter())\n res = sess.get(zip_url)\n if not res.status_code == requests.codes.ok:\n raise TaskError('Failed to download {} ({} error)'.format(zip_url, res.status_code))\n\n with open_zip(BytesIO(res.content)) as zfile:\n safe_mkdir(dest_dir)\n for info in zfile.infolist():\n if info.filename.endswith('/'):\n # Skip directories.\n continue\n # Strip zip directory name from files.\n filename = os.path.relpath(info.filename, get_basedir(info.filename))\n f = safe_open(os.path.join(dest_dir, filename), 'w')\n f.write(zfile.read(info))\n f.close()", "def cli(date, path, mission):\n download.main(path, mission, date)", "def package(request, name):\n return HttpResponse(get_koji_download_url(name), mimetype='application/json')", "def package(request, name):\n return HttpResponse(get_koji_download_url(name), mimetype='application/json')", "def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )", "def download():\n \"\"\"\n \"The book p.79 have error.\n \"https://github.com/login/oauth/authorize?client_id=7e0a3cd836d3e544dbd9&redirect_uri=https%3A%2F%2Fgist.github.com%2Fauth%2Fgithub%2Fcallback%3Freturn_to%3Dhttps%253A%252F%252Fgist.github.com%252Fyoungsoul%252Ffc69665c5d08e189c57c0db0e93017a6&response_type=code&state=9b385430ee7cd1a75ca91c1d1cb6c565111f6b81e54a71f42ae9b22035241b9b\n \"\"\"\n subprocess.call([\n 'wget',\n 'https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat', \n '-P',\n 'origin_data/'\n ])\n logger.info('Download success!')", "def prepare_project(path: str):\n zip_path = os.path.join(path, 'Simulation.Machine.V1.zip')\n\n # Download zip file with project\n requested_file = requests.get(URL)\n with open(zip_path, 'wb') as f:\n f.write(requested_file.content)\n\n # Extract contents\n with ZipFile(zip_path, 'r') as zip_obj:\n zip_obj.extractall(path)\n\n # Remove file\n os.remove(zip_path)", "def download(path):\n\n # Check if directory exists\n if not os.path.isdir(path + \"birdvox_dcase_20k\"):\n print(\"Creating birdvox_dcase_20k Directory\")\n os.mkdir(path + \"birdvox_dcase_20k\")\n base = \"https://zenodo.org/record/1208080/files/\"\n filename = \"BirdVox-DCASE-20k.zip\"\n if not os.path.exists(path + \"birdvox_dcase_20k/\" + filename):\n url = base + filename + \"?download=1\"\n urllib.request.urlretrieve(url, path + \"birdvox_dcase_20k/\" + filename)\n url = \"https://ndownloader.figshare.com/files/10853300\"\n filename = \"data_labels.csv\"\n if not os.path.exists(path + \"birdvox_dcase_20k/\" + filename):\n urllib.request.urlretrieve(url, path + \"birdvox_dcase_20k/\" + filename)", "def download(self, args):\n\n\t\t\"\"\" Default argument for Architecture \"\"\"\n\t\tif len(args) >= 4:\n\t\t\tarch = args[3]\n\t\telse:\n\t\t\tarch = platform.processor()\n\n\t\t\"\"\" Default argument for Version \"\"\"\n\t\tif len(args) >= 3:\n\t\t\tif args[2] == \"latest\":\n\t\t\t\tversion = \"Latest\"\n\t\t\telse:\n\t\t\t\tversion = args[2]\n\t\telse:\n\t\t\tversion = \"Latest\"\n\n\t\t\"\"\" Find package path from package list, based on prev. arguments \"\"\"\n\t\tif len(args) >= 2:\n\t\t\tpackage = args[1]\n\t\t\tfilename = False\n\t\t\t\n\t\t\tversions = self.master.Dump(package)\n\t\t\tfor d in versions:\n\t\t\t\tif d[\"Version\"] == version:\n\t\t\t\t\tif d[\"Version\"] != \"Latest\" and d[\"Architecture\"] == arch:\n\t\t\t\t\t\tfilename = d[\"Filename\"]\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor e in versions:\n\t\t\t\t\t\t\tif e[\"Version\"] == d[\"LatestVersion\"] and e[\"Architecture\"] == arch:\n\t\t\t\t\t\t\t\tfilename = e[\"Filename\"]\n\t\t\t\t\t\t\t\tversion = d[\"LatestVersion\"];\n\t\t\tif not filename:\n\t\t\t\tself.write_line(\"ERROR XXX: Package not found.\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Find chunks to download \"\"\"\n\t\t\tid = 0\n\t\t\tto_download = False\n\t\t\tfor f in self.torrent_info.files():\n\t\t\t\tprint(f.path.replace(\"packages/\", \"\") + \" = \" + filename);\n\t\t\t\tif f.path.replace(\"packages/\", \"\") == filename:\n\t\t\t\t\tto_download = f\n\t\t\t\t\tbreak;\n\t\t\t\tid += 1\n\t\t\tif not to_download:\n\t\t\t\tprint(\"ERROR XXX: dunno\")\n\t\t\t\treturn\n\n\t\t\t\"\"\" Set chunks priority to 7? (download max priority) \"\"\"\n\t\t\tpr = self.torrent_info.map_file(id, 0, to_download.size);\n\t\t\tn_pieces = math.ceil(pr.length / self.torrent_info.piece_length() + 1);\n\n\t\t\tfor i in range(self.torrent_info.num_pieces()):\n\t\t\t\tif i in range(pr.piece, pr.piece + n_pieces):\n\t\t\t\t\tself.handler.piece_priority(i, 7)\n\n\n\t\t\t\"\"\" Print download of package status \"\"\"\n\t\t\tself.print_status(id, pr, package, version, filename)\n\t\t\t\t\n\t\t\t\"\"\" Check the server for hash validation \"\"\"\n\t\t\tif self.valid_tpkg_file(to_download.path):\n\t\t\t\tself.write_line(\"DONE {0} {1} {2} {3}\".format(package, version, arch, self.config[\"daemon\"][\"rootdir\"] + \"/\" + to_download.path).replace('//', '/'))\n\t\t\telse:\n\t\t\t\tself.write_line(\"ERROR XXX: Hash verification failed.\")\n\t\telse:\n\t\t\tself.write_line(\"INVALID ARGUMENTS\");", "def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "def download_latest_version(target_name = '', target_dir = None):\n url_address = 'https://github.com/muhammadfredo/FrMaya/archive/master.zip'\n if target_dir is None:\n temp_dir = path.Path(tempfile.gettempdir())\n else:\n temp_dir = path.Path(target_dir)\n temp_frmaya_zip = temp_dir / '{}.zip'.format(target_name)\n temp_frmaya_dir = temp_dir / target_name\n\n with open(temp_frmaya_zip, 'wb') as temp_zip:\n temp_zip.write(urllib2.urlopen(url_address).read())\n zipfile.ZipFile(temp_frmaya_zip).extractall(temp_frmaya_dir)\n\n return path.Path(temp_frmaya_zip).abspath(), path.Path(temp_frmaya_dir).abspath()", "def download(self, url: str, dest: PathLike, force: bool = False):", "def download_data(url, dest, *a, **kw):\n pth = os.path.join(\n os.path.dirname(__file__),\n 'data',\n 'allCountries.zip'\n )\n\n open(dest, 'w').write(open(pth).read())", "def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,\n reporthook=_progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "def _download_archive(self):\n _logger.debug('Downloading archive...')\n response = urlopen(self.url)\n\n with open(self._archive_full_path, 'wb') as archive_file:\n chunk_size = 1024 * 1024 # 1 MB\n chunk = response.read(chunk_size)\n\n while chunk:\n archive_file.write(chunk)\n chunk = response.read(chunk_size)\n\n _logger.debug('Archive {name} has been successfully downloaded.'.format(name=self.archive_name))", "def download(parent, name=None):\n with cd(parent):\n if not name:\n run(\"drush dl\")\n else:\n run(\"drush dl --drupal-project-rename=%s\" % name)", "def _download(self, url, rel_path):\n \n tmp_dir = \"TMP_DIR=`mktemp -d`;\"\n wget_cmd = [ tmp_dir, \"wget\", \"-nv\", \"-O\", \"$TMP_DIR/archive.tgz\", url, \";\" ]\n wget_cmd = ' '.join(wget_cmd)\n \n mkdir_cmd = \"mkdir -p %s ;\" % (\"./remote_resources/\" + rel_path)\n \n cleandir_cmd = \"rm -Rf %s/* ;\" % (\"./remote_resources/\" + rel_path)\n \n untar_cmd = [ \"tar\", \"xf\", \"$TMP_DIR/archive.tgz\", \"-C\", \"./remote_resources/%s\" % rel_path, \";\" ]\n untar_cmd = ' '.join(untar_cmd)\n \n remove_cmd = \"rm -Rf $TMP_DIR;\"\n \n return self._ssh(' '.join([ wget_cmd, mkdir_cmd, cleandir_cmd, untar_cmd, remove_cmd ]))", "def download(self, cloud_path):\n zip_file = os.path.join(self.root, ZIPFILE)\n unzip_dir = os.path.join(self.root, UNZIP_NAME)\n\n if os.path.isfile(zip_file):\n logger.debug(f\"File {zip_file} exists. Skip download.\")\n else:\n client = GCSClient()\n object_key = os.path.join(NYU_GCS_PATH, ZIPFILE)\n\n logger.debug(\n f\"Downloading file {zip_file} from gs://{const.GCS_BUCKET}/\"\n f\"{object_key}\"\n )\n client.download(const.GCS_BUCKET, object_key, zip_file)\n\n if os.path.isdir(unzip_dir):\n logger.debug(f\"File {unzip_dir} exists. Skip unzip.\")\n else:\n # unzip the file\n with ZipFile(zip_file, \"r\") as zip_ref:\n zip_ref.extractall(self.root)\n logger.debug(f\"Unzip file from {zip_file}\")", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download_and_unzip(url, zip_path, csv_path, data_folder):\n\n download_from_url(url, zip_path)\n\n unzip(zip_path, csv_path, data_folder)\n\n print('Done.')", "def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()", "def download_file(self, name):\n #TODO: handle exceptions\n archive_name = name + \".json.gz\"\n file_name = join(self.new_data_dir, name + \".json\")\n\n try:\n urlretrieve(\"http://data.githubarchive.org/\" + archive_name,\n filename=join(self.downloaded_data_dir, archive_name))\n except IOError:\n self.logger.error(__name__ + \": \" + \"unable to download file (error creating connection).\")\n\n try:\n archive = gz_open(join(self.downloaded_data_dir, archive_name))\n except IOError:\n self.logger.error(__name__ + \": \" + \"unable to open gzipped file (file not created).\")\n else:\n json_file = open(file_name, \"w\")\n json_file.write(archive.read())\n\n archive.close()\n json_file.close()\n\n remove(join(self.downloaded_data_dir, archive_name))\n\n return file_name" ]
[ "0.6970031", "0.6887985", "0.68189776", "0.681829", "0.6720921", "0.66710734", "0.65784806", "0.65546376", "0.6551284", "0.6536828", "0.64772105", "0.63660634", "0.63608265", "0.635872", "0.634232", "0.63057834", "0.630051", "0.6292988", "0.6290236", "0.62840354", "0.62497574", "0.6212216", "0.61953944", "0.61892396", "0.61676174", "0.6166975", "0.615335", "0.6149776", "0.61192924", "0.6110998", "0.61091155", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.61082035", "0.60958266", "0.60879827", "0.6078524", "0.60619205", "0.6060406", "0.6058679", "0.6058073", "0.6053415", "0.60525876", "0.60470533", "0.60470533", "0.60447365", "0.6043012", "0.6003561", "0.5995359", "0.59924674", "0.59920245", "0.598605", "0.5977819", "0.59567344", "0.5954464", "0.5950055", "0.59455055", "0.59424776", "0.594118", "0.5936356", "0.59345585", "0.59345585", "0.59345585", "0.59345585", "0.59345585", "0.59345585", "0.5919683", "0.5914025", "0.58943915" ]
0.7049751
0
Add a file to a zip archive.
def _add_to_archive(archive, filename, contents, tstamp): fileinfo = zipfile.ZipInfo(filename, tstamp) fileinfo.external_attr = 0666 << 16L archive.writestr(fileinfo, contents, zipfile.ZIP_DEFLATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, *args, **kwargs):\n self.zipfile.write(*args, **kwargs)", "def add_to_zip(zipfile, zippath, files):\n \n if zipfile == 'n' or zipfile == '':\n return False\n zippath = os.path.normpath(zippath) + '/'\n if os.path.isdir(zippath):\n z = subprocess.call(['zip', '-0', zippath + zipfile] + files)\n if z != 0:\n print(\"zip returned\", z)\n return False\n else:\n print(\"Could not create zip. Not a valid directory:\", zippath)\n return False\n return True", "def _add_tag_file(zip_file, dir_name, tag_info_list, tag_tup):\n tag_name, tag_str = tag_tup\n tag_path = d1_common.utils.filesystem.gen_safe_path(dir_name, tag_name)\n tag_iter = _create_and_add_tag_iter(zip_file, tag_path, tag_str)\n tag_info_list.append(\n {\n \"path\": tag_path,\n \"checksum\": d1_common.checksum.calculate_checksum_on_iterator(\n tag_iter, TAG_CHECKSUM_ALGO\n ),\n }\n )", "def add_zip_file_contents_to_resource(pk, zip_file_path):\r\n zfile = None\r\n resource = None\r\n try:\r\n resource = utils.get_resource_by_shortkey(pk, or_404=False)\r\n zfile = zipfile.ZipFile(zip_file_path)\r\n num_files = len(zfile.infolist())\r\n zcontents = utils.ZipContents(zfile)\r\n files = zcontents.get_files()\r\n\r\n resource.file_unpack_status = 'Running'\r\n resource.save()\r\n\r\n for i, f in enumerate(files):\r\n logger.debug(\"Adding file {0} to resource {1}\".format(f.name, pk))\r\n utils.add_file_to_resource(resource, f)\r\n resource.file_unpack_message = \"Imported {0} of about {1} file(s) ...\".format(\r\n i, num_files)\r\n resource.save()\r\n\r\n # This might make the resource unsuitable for public consumption\r\n resource.update_public_and_discoverable()\r\n # TODO: this is a bit of a lie because a different user requested the bag overwrite\r\n utils.resource_modified(resource, resource.creator, overwrite_bag=False)\r\n\r\n # Call success callback\r\n resource.file_unpack_message = None\r\n resource.file_unpack_status = 'Done'\r\n resource.save()\r\n\r\n except BaseResource.DoesNotExist:\r\n msg = \"Unable to add zip file contents to non-existent resource {pk}.\"\r\n msg = msg.format(pk=pk)\r\n logger.error(msg)\r\n except:\r\n exc_info = \"\".join(traceback.format_exception(*sys.exc_info()))\r\n if resource:\r\n resource.file_unpack_status = 'Error'\r\n resource.file_unpack_message = exc_info\r\n resource.save()\r\n\r\n if zfile:\r\n zfile.close()\r\n\r\n logger.error(exc_info)\r\n finally:\r\n # Delete upload file\r\n os.unlink(zip_file_path)", "def _add_tag_manifest_file(zip_file, dir_name, tag_info_list):\n _add_tag_file(\n zip_file, dir_name, tag_info_list, _gen_tag_manifest_file_tup(tag_info_list)\n )", "def addFile(self, filePath, baseToRemove=\"\", inZipRoot=None):\n\t\tinZipPath = os.path.relpath(filePath, baseToRemove)\n\t\t\n\t\tif inZipRoot!=None:\n\t\t\tinZipPath = os.path.join(inZipRoot, inZipPath)\n\t\t\n\t\tif self.verbose:\n\t\t\tprint \"Adding file: \" + filePath\n\t\t\tprint \"\tUnder path: \" + inZipPath\n\t\tself.write(filePath, inZipPath)", "def create_zip(file_dir):\n curr_path = os.getcwd()\n os.chdir(file_dir)\n zip_name = 'files_archive_{}.zip'.format(\n str(datetime.datetime.now())[5:16].replace(' ', \"_\"))\n files = os.listdir()\n print(\"Creating zipfile from files in...\", file_dir)\n with zipfile.ZipFile(zip_name, 'w') as zip:\n for f in files:\n zip.write(f)\n print(\"Added file: \", f)\n\n zip_path = file_dir + \"/\" + zip_name\n os.chdir(curr_path)\n # double check if path is absolute\n if os.path.isabs(zip_path):\n return zip_path\n else:\n return os.getcwd() + \"/\" + zip_name", "def toZip(self, file, zip_location):\n zip_file = zipfile.ZipFile(zip_location, 'w')\n if os.path.isfile(file):\n zip_file.write(file)\n else:\n self.__addFolderToZip(zip_file, file)\n print \"Wrote %s to %s\"%(file,zip_location)\n zip_file.close()", "def add_zip(manager, zipfile, incref=False):\n from .core.cache.buffer_cache import empty_dict_checksum, empty_list_checksum\n result = []\n for checksum in zipfile.namelist():\n if checksum in (empty_dict_checksum, empty_list_checksum):\n continue\n checksum2 = bytes.fromhex(checksum)\n buffer = zipfile.read(checksum)\n checksum3 = calculate_checksum(buffer)\n if checksum3 != checksum2:\n raise ValueError(\"Incorrect checksum for zipped file '{}'\".format(checksum))\n buffer_cache.cache_buffer(checksum2, buffer)\n if incref:\n buffer_cache.incref(checksum2, authoritative=False)\n result.append(checksum)\n return result", "def add(self, archive):\n if archive.name not in self._root['archives']:\n self._root['archives'][archive.name] = archive", "def add_file(self, fname):\n if not no_tarball:\n out_tar = os.path.join(outputdir, self.name)\n t_fname = os.path.join(outputdir, fname)\n archive = tarfile.open(out_tar, 'a')\n\n archive.add(t_fname, fname.replace(runID, ''))\n archive.close()\n try:\n if not os.path.isdir(t_fname):\n os.remove(t_fname)\n else:\n shutil.rmtree(t_fname)\n except OSError:\n log.error(\"Added to archive, but could not delete {0}.\".format(t_fname))", "def add_file(afile, filename='files'):\n \n with open(filename, 'a') as f:\n f.write(afile)", "def save_zip(zip_file, archive_dir):\n zipdata = zipfile.ZipFile(zip_file, mode='w')\n\n for root, dirs, files in os.walk(archive_dir):\n for name in files:\n fname = os.path.join(root, name)\n zipdata.write(fname)\n zipdata.close()", "def add_file(self, path):\n assert self._root_dir.is_parent_of(path)\n self._files.append(path)", "def add_files(cls, document, source_files):\n with zipfile.ZipFile(document, 'a') as open_document:\n for src_file in source_files:\n open_document.write(src_file, Manifest.file_path_to_document_path(src_file))", "def _addElemToZip(destZip, rootElem, fileName):\n elemTree = ElementTree.ElementTree(rootElem)\n with io.BytesIO() as output:\n elemTree.write(output, 'utf-8', True)\n destZip.writestr(fileName, output.getvalue())", "def create_backup_file(self, source_dir, archive_file):\n tar_file = tarfile.open(archive_file, 'w|gz')\n try:\n tar_file.add(source_dir)\n finally:\n tar_file.close()", "def writeFileToZip(zipFile, sourcePath, targetPath):\n targetPath = targetPath.replace(\"\\\\\", \"/\") # Zips use forward slashes\n utcTime = time.gmtime(os.stat(sourcePath).st_mtime)\n \n if os.path.isdir(sourcePath):\n if not targetPath.endswith(\"/\"):\n targetPath += \"/\" # Trailing slash denotes directory for some zip packages\n\n zi = zipfile.ZipInfo(targetPath, utcTime[0:6])\n zi.compress_type = zipfile.ZIP_DEFLATED\n zi.external_attr = 0x00000010L # FILE_ATTRIBUTE_DIRECTORY\n zipFile.writestr(zi, \"\")\n else: \n f = open(sourcePath, \"rb\")\n try:\n data = f.read()\n finally:\n f.close()\n \n zi = zipfile.ZipInfo(targetPath, utcTime[0:6])\n zi.compress_type = zipfile.ZIP_DEFLATED\n zi.external_attr = 0x00000020L # FILE_ATTRIBUTE_ARCHIVE\n zipFile.writestr(zi, data)", "def AddToZip(self, input_zip, output_zip, input_path=None):\n\n self.UnmountAll()\n\n common.ZipWriteStr(output_zip, \"META-INF/com/google/android/updater-script\",\n \"\\n\".join(self.script) + \"\\n\")\n\n if input_path is None:\n data = input_zip.read(\"OTA/bin/updater\")\n else:\n data = open(input_path, \"rb\").read()\n common.ZipWriteStr(output_zip, \"META-INF/com/google/android/update-binary\",\n data, perms=0o755)", "def add_file(self, path):\n pass", "def zip_file(file_path: str) -> str:\n zip_file_path: str = file_path + \".gz\"\n\n print(f\"Compressing {file_path} into {zip_file_path}\")\n timestamp=path.getmtime(file_path)\n with open(file_path, \"rb\") as read_stream:\n with gzip.open(zip_file_path, \"wb\") as write_stream:\n shutil.copyfileobj(read_stream, write_stream)\n os.utime(zip_file_path, (timestamp,timestamp) )\n\n return zip_file_path", "def _add_manifest_files(zip_file, dir_name, payload_info_list, tag_info_list):\n for checksum_algorithm in _get_checksum_algorithm_set(payload_info_list):\n _add_tag_file(\n zip_file,\n dir_name,\n tag_info_list,\n _gen_manifest_file_tup(payload_info_list, checksum_algorithm),\n )", "def add_file(folder, filename, contents, bucket):\n if bucket.get_key(folder.name):\n path = \"{0}/{1}\".format(folder.name, filename)\n key = bucket.new_key(path)\n return key.set_contents_from_string(contents)", "def _zip_archive(extracted_source, exclude_files=None, **_):\n ctx.logger.debug(\"Zipping source {source}\".format(source=extracted_source))\n exclude_files = exclude_files or []\n ctx.logger.debug('Excluding files {l}'.format(l=exclude_files))\n with tempfile.NamedTemporaryFile(suffix=\".zip\",\n delete=False) as updated_zip:\n updated_zip.close()\n with zipfile.ZipFile(updated_zip.name,\n mode='w',\n compression=zipfile.ZIP_DEFLATED) as output_file:\n for dir_name, subdirs, filenames in os.walk(extracted_source):\n # Make sure that the files that we don't want\n # to include (e.g. plugins directory) will not be archived.\n exclude_dirs(dir_name, subdirs, exclude_files)\n for filename in filenames:\n # Extra layer of validation on the excluded files.\n if not exclude_file(dir_name, filename, exclude_files):\n # Create the path as we want to archive it to the\n # archivee.\n file_to_add = os.path.join(dir_name, filename)\n # The name of the file in the archive.\n if file_storage_breaker(file_to_add):\n continue\n arc_name = file_to_add[len(extracted_source)+1:]\n output_file.write(file_to_add, arcname=arc_name)\n archive_file_path = updated_zip.name\n return archive_file_path", "def pack_file(zip_write, filename: str, suppress_error=False):\n if '\\t' in filename:\n # We want to rename the file!\n filename, arcname = filename.split('\\t')\n else:\n arcname = filename\n\n if filename[-1] == '*':\n # Pack a whole folder (blah/blah/*)\n directory = filename[:-1]\n file_count = 0\n for poss_path in RES_ROOT:\n dir_path = os.path.normpath(\n os.path.join(poss_path, directory)\n )\n if not os.path.isdir(dir_path):\n continue\n for subfile in os.listdir(dir_path):\n full_path = os.path.join(dir_path, subfile)\n rel_path = os.path.join(directory, subfile)\n zip_write(\n filename=full_path,\n arcname=rel_path,\n )\n file_count += 1\n LOGGER.info('Packed {} files from folder \"{}\"', file_count, directory)\n return\n\n for poss_path in RES_ROOT:\n full_path = os.path.normpath(\n os.path.join(poss_path, filename)\n )\n if os.path.isfile(full_path):\n zip_write(\n filename=full_path,\n arcname=arcname,\n )\n break\n else:\n if not suppress_error:\n LOGGER.warning(\n '\"bee2/' + filename + '\" not found! (May be OK if not custom)'\n )", "def add_zip(self, zipfile, valid_extensions=('txt', 'csv', 'xls', 'xlsx'), encoding='utf8',\n doc_label_fmt_txt='{path}-{basename}', doc_label_path_join='_', doc_label_fmt_tabular='{basename}-{id}',\n force_unix_linebreaks=True, **kwargs):\n\n tmpdir = mkdtemp()\n\n read_size = kwargs.pop('read_size', -1)\n\n with ZipFile(zipfile) as zipobj:\n for member in zipobj.namelist():\n path_parts = path_recursive_split(member)\n\n if not path_parts:\n continue\n\n dirs, fname = path_parts[:-1], path_parts[-1]\n\n basename, ext = os.path.splitext(fname)\n basename = basename.strip()\n\n if ext:\n ext = ext[1:]\n\n if ext in valid_extensions:\n tmpfile = zipobj.extract(member, tmpdir)\n\n if ext in {'csv', 'xls', 'xlsx'}:\n self.add_tabular(tmpfile, encoding=encoding, doc_label_fmt=doc_label_fmt_tabular,\n force_unix_linebreaks=force_unix_linebreaks, **kwargs)\n else:\n doclabel = doc_label_fmt_txt.format(path=doc_label_path_join.join(dirs),\n basename=basename,\n ext=ext)\n\n if doclabel.startswith('-'):\n doclabel = doclabel[1:]\n\n self.add_files(tmpfile, doc_labels=[doclabel], encoding=encoding, read_size=read_size,\n force_unix_linebreaks=force_unix_linebreaks)\n\n return self", "def add_file(self, filename):\n # If absolute path\n if filename[0] == '/':\n self.filenames.append(filename)\n else:\n filename = self.current_path + filename\n self.filenames.append(filename)", "def _make_archive(file_list, archive, root):\n with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as zipf:\n for f in file_list:\n zipf.write(f, os.path.relpath(f, root))", "def load_file_in_archive(req_path, archive, archive_ext, zip_path, name, ext):\n BASE_DIR = ROOT\n full_path = u\"%s/%s.%s\" % (req_path, archive, archive_ext)\n zip_path = u\"%s/%s.%s\" % (zip_path, name, ext)\n full_real_path = get_real_path(BASE_DIR, full_path)\n logger.info(\"%s, %s\", full_real_path, [to_hex(c) for c in full_real_path])\n try:\n zip_path = unquote(zip_path).encode('utf-8')\n except Exception as e:\n logger.info(\"Failed to encode: %s\", zip_path)\n\n if ext == 'thm' or archive_ext not in archive_exts:\n logger.info(\"Unsupported file\")\n return ('', 204)\n\n if os.path.exists(full_real_path.encode('utf-8')):\n logger.info(\"File doesn't exist: %s\", full_real_path)\n return ('', 204)\n\n #Only zip files are supported <path>/file.zip/1/01.jpg\n ## Render single file\n with zipfile.ZipFile(full_real_path) as zf:\n for name in zf.namelist():\n encoded_name = name.decode('euc-kr').encode('utf-8')\n logger.info(\"%s(%s), %s(%s), %s, %s\", encoded_name, type(encoded_name), zip_path, type(zip_path),\n [to_hex(c) for c in name], [to_hex(c) for c in zip_path])\n if encoded_name == zip_path:\n with zf.open(name) as f:\n bytesIO = BytesIO()\n bytesIO.write(f.read())\n bytesIO.seek(0)\n return flask.send_file(bytesIO, attachment_filename=os.path.basename(zip_path), as_attachment=True)\n logger.error(\"No file Name: %s\", zip_path)\n return ('', 204)", "def unzip_file(zipfile_path, target_dir, touchfile_path):\r\n with zipfile.ZipFile(zipfile_path, 'r') as zip_ref:\r\n zip_ref.extractall(target_dir)\r\n\r\n with open(touchfile_path, 'w') as touchfile:\r\n touchfile.write(f'unzipped {zipfile_path}')", "def add_file(self, oozie_file_path: str) -> None:\n self.file_path_processor.check_path_for_comma(oozie_file_path)\n split_by_hash_sign(oozie_file_path)\n self.files.append(oozie_file_path)\n self.hdfs_files.append(self.file_path_processor.preprocess_path_to_hdfs(oozie_file_path))", "def _taradd(func, tar_file, name):\n with tempfile.NamedTemporaryFile('wb', delete=False) as temp_file:\n func(temp_file)\n temp_file.close()\n tar_file.add(temp_file.name, arcname=name)\n if os.path.isfile(temp_file.name):\n os.remove(temp_file.name)", "def Zip(args):\n parser = argparse.ArgumentParser(description=Zip.__doc__)\n parser.add_argument(\n '-r', dest='recursive', action='store_true',\n default=False,\n help='recurse into directories')\n parser.add_argument(\n '-q', dest='quiet', action='store_true',\n default=False,\n help='quiet operation')\n parser.add_argument('zipfile')\n parser.add_argument('filenames', nargs='+')\n options = parser.parse_args(args)\n\n src_files = []\n for filename in options.filenames:\n globbed_src_args = glob.glob(filename)\n if not globbed_src_args:\n if not options.quiet:\n print('zip warning: name not matched: %s' % filename)\n\n for src_file in globbed_src_args:\n src_file = os.path.normpath(src_file)\n src_files.append(src_file)\n if options.recursive and os.path.isdir(src_file):\n for root, dirs, files in os.walk(src_file):\n for dirname in dirs:\n src_files.append(os.path.join(root, dirname))\n for filename in files:\n src_files.append(os.path.join(root, filename))\n\n # zip_data represents a list of the data to be written or appended to the\n # zip_stream. It is a list of tuples:\n # (OS file path, zip path/zip file info, and file data)\n # In all cases one of the |os path| or the |file data| will be None.\n # |os path| is None when there is no OS file to write to the archive (i.e.\n # the file data already existed in the archive). |file data| is None when the\n # file is new (never existed in the archive) or being updated.\n zip_data = []\n new_files_to_add = [OSMakeZipPath(src_file) for src_file in src_files]\n zip_path_to_os_path_dict = dict((new_files_to_add[i], src_files[i])\n for i in range(len(src_files)))\n write_mode = 'a'\n if os.path.exists(options.zipfile):\n with zipfile.ZipFile(options.zipfile, 'r') as zip_stream:\n try:\n files_to_update = set(new_files_to_add).intersection(\n set(zip_stream.namelist()))\n if files_to_update:\n # As far as I can tell, there is no way to update a zip entry using\n # zipfile; the best you can do is rewrite the archive.\n # Iterate through the zipfile to maintain file order.\n write_mode = 'w'\n for zip_path in zip_stream.namelist():\n if zip_path in files_to_update:\n os_path = zip_path_to_os_path_dict[zip_path]\n zip_data.append((os_path, zip_path, None))\n new_files_to_add.remove(zip_path)\n else:\n file_bytes = zip_stream.read(zip_path)\n file_info = zip_stream.getinfo(zip_path)\n zip_data.append((None, file_info, file_bytes))\n except IOError:\n pass\n\n for zip_path in new_files_to_add:\n zip_data.append((zip_path_to_os_path_dict[zip_path], zip_path, None))\n\n if not zip_data:\n print('zip error: Nothing to do! (%s)' % options.zipfile)\n return 1\n\n with zipfile.ZipFile(options.zipfile, write_mode,\n zipfile.ZIP_DEFLATED) as zip_stream:\n for os_path, file_info_or_zip_path, file_bytes in zip_data:\n if isinstance(file_info_or_zip_path, zipfile.ZipInfo):\n zip_path = file_info_or_zip_path.filename\n else:\n zip_path = file_info_or_zip_path\n\n if os_path:\n st = os.stat(os_path)\n if stat.S_ISDIR(st.st_mode):\n # Python 2.6 on the buildbots doesn't support writing directories to\n # zip files. This was resolved in a later version of Python 2.6.\n # We'll work around it by writing an empty file with the correct\n # path. (This is basically what later versions do anyway.)\n zip_info = zipfile.ZipInfo()\n zip_info.filename = zip_path\n zip_info.date_time = time.localtime(st.st_mtime)[0:6]\n zip_info.compress_type = zip_stream.compression\n zip_info.flag_bits = 0x00\n zip_info.external_attr = (st[0] & 0xFFFF) << 16\n zip_info.CRC = 0\n zip_info.compress_size = 0\n zip_info.file_size = 0\n zip_stream.writestr(zip_info, '')\n else:\n zip_stream.write(os_path, zip_path)\n else:\n zip_stream.writestr(file_info_or_zip_path, file_bytes)\n\n if not options.quiet:\n if zip_path in new_files_to_add:\n operation = 'adding'\n else:\n operation = 'updating'\n zip_info = zip_stream.getinfo(zip_path)\n if (zip_info.compress_type == zipfile.ZIP_STORED or\n zip_info.file_size == 0):\n print(' %s: %s (stored 0%%)' % (operation, zip_path))\n elif zip_info.compress_type == zipfile.ZIP_DEFLATED:\n print(' %s: %s (deflated %d%%)' % (operation, zip_path,\n 100 - zip_info.compress_size * 100 / zip_info.file_size))\n\n return 0", "def add_file(self, key, dict, data):\n try:\n # If new file (aka, data passed in, write file)\n path = os.path.join(self.file_path, '%s.xoj' % key)\n f = open( path, 'w' )\n f.write(data)\n f.close()\n\n self.fileList[key] = dict\n except:\n print \"Error writing file\", path\n\n self.save()", "def zipfile(filepath, cleanup=False):\n\tzfile = filepath+\".gz\"\n\twith open(filepath, 'rb') as f_in:\n\t\twith gzip.open(zfile, 'wb') as f_out:\n\t\t\tf_out.writelines(f_in)\t\n\t\n\tif cleanup and file_exists(zfile):\n\t\tos.remove(filepath)\n\treturn zfile", "def zip_dir(dir_path, zip_path):\n file_paths = glob.glob(dir_path + \"/*\") #create path to search for files.\n\n zf = zipfile.ZipFile(zip_path, 'w')\n dir_name = os.path.basename(dir_path)\n for p in file_paths:\n file_name = os.path.basename(p)\n zf.write(p, arcname=os.path.join(dir_name, file_name))\n zf.close()\n return zip_path", "def add(self, file):\n if file not in self._files:\n self._files.insert(0, file)\n if len(self._files) > self.nbmax:\n del(self._files[-1])\n else:\n self._files.remove(file)\n self._files.insert(0, file)\n try:\n with open(self._filename, 'w') as file:\n file.write('\\n'.join(self._files))\n except Exception:\n # avoid raising errors if location is read-only or invalid path\n pass", "def Add_File(self,txn,filename,newcontents):\n opid = self.new_opid()\n fullname = os.path.join(self.home,filename)\n #if not self.tx.dir_exists(os.path.dirname(fullname)):\n # raise OSError(errno.ENOENT,\"No directory: %r\"%os.path.dirname(fullname))\n xaction = ReplaceAll_Operation(fullname,newcontents,opid)\n self._add_operation(txn,xaction)", "def sign_zipfile(filename, key_file, cert_file, password_fd):\n zip_obj = zipfile.ZipFile(filename, 'a')\n\n # Get ZIP hashes and sign them\n hashes = get_zip_hashes(zip_obj)\n signed_binary = sign_data(hashes, key_file, password_fd)\n signed_ascii = base64.b64encode(signed_binary)\n\n # Encode certificate\n cert_data = file(cert_file).read()\n cert_ascii = base64.b64encode(cert_data)\n\n # Add signed data as ZIP comment\n zip_obj.comment = '%s:%s:%s' % (HEADER, cert_ascii, signed_ascii)\n\n # Mark file as modified and save it\n zip_obj._didModify = True\n zip_obj.close()", "def make_zipfile(base_name, base_dir, verbose=0, dry_run=0):\n zip_filename = base_name + \".zip\"\n mkpath(os.path.dirname(zip_filename), dry_run=dry_run)\n\n # If zipfile module is not available, try spawning an external\n # 'zip' command.\n if zipfile is None:\n if verbose:\n zipoptions = \"-r\"\n else:\n zipoptions = \"-rq\"\n\n try:\n spawn([\"zip\", zipoptions, zip_filename, base_dir],\n dry_run=dry_run)\n except DistutilsExecError:\n # XXX really should distinguish between \"couldn't find\n # external 'zip' command\" and \"zip failed\".\n raise DistutilsExecError((\"unable to create zip file '%s': \"\n \"could neither import the 'zipfile' module nor \"\n \"find a standalone zip utility\") % zip_filename)\n\n else:\n log.info(\"creating '%s' and adding '%s' to it\",\n zip_filename, base_dir)\n\n if not dry_run:\n try:\n zip = zipfile.ZipFile(zip_filename, \"w\",\n compression=zipfile.ZIP_DEFLATED)\n except RuntimeError:\n zip = zipfile.ZipFile(zip_filename, \"w\",\n compression=zipfile.ZIP_STORED)\n\n if base_dir != os.curdir:\n path = os.path.normpath(os.path.join(base_dir, ''))\n zip.write(path, path)\n log.info(\"adding '%s'\", path)\n for dirpath, dirnames, filenames in os.walk(base_dir):\n for name in dirnames:\n path = os.path.normpath(os.path.join(dirpath, name, ''))\n zip.write(path, path)\n log.info(\"adding '%s'\", path)\n for name in filenames:\n path = os.path.normpath(os.path.join(dirpath, name))\n if os.path.isfile(path):\n zip.write(path, path)\n log.info(\"adding '%s'\", path)\n zip.close()\n\n return zip_filename", "def add_files_to_zip(\n file_list, common_root_directory, zip_handler, put_all_files_in_shared_root_dir\n):\n for file_path in file_list:\n rel_path = file_path\n if common_root_directory is not None:\n rel_path = os.path.relpath(file_path, common_root_directory)\n else:\n # If we don't have a common root dir then, on Windows, path will begin with drive letter\n # e.g. 'C:\\' - remove this for adding to the ZIP\n if platform.system() == \"Windows\":\n rel_path = rel_path.replace(\":\", \"\")\n try:\n if put_all_files_in_shared_root_dir and common_root_directory is not None:\n zip_handler.write(\n file_path,\n arcname=os.path.join(os.path.basename(common_root_directory), rel_path),\n )\n else:\n zip_handler.write(file_path, arcname=rel_path)\n except IOError:\n printer(\n \"'{}' no longer present in folder - zip creation aborted\".format(file_path),\n \"error\",\n True,\n )\n raise\n except OSError:\n printer(\"OSError on '{}' - zip creation aborted\".format(file_path), \"error\", True)\n raise", "def zip_file(src_dir):\n zip_name = slugify(src_dir) + '.zip'\n z = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)\n for dirpath, dirnames, filenames in os.walk(src_dir):\n fpath = dirpath.replace(src_dir, '')\n fpath = fpath and fpath + os.sep or ''\n for filename in filenames:\n z.write(os.path.join(dirpath, filename), fpath + filename)\n z.close()", "def make_zip(self):\n shutil.make_archive(self.name, 'zip', self.name)", "def zipdir(path, ziph):\n for root, dirs, files in os.walk(path):\n for file in files:\n ziph.write(os.path.join(root, file),\n arcname=os.path.join(os.path.relpath(root, path), file))", "def _zip_files(self):\n\n zip_file = Path(self.build_directory.parent).joinpath(\n self.package_name + '.zip'\n )\n logger.info('Creating zip file: %s', zip_file)\n\n shutil.make_archive(zip_file.with_suffix(''), 'zip', self.build_directory)\n shutil.move(str(zip_file), self.build_directory)", "def add_new_file(self, filename):\n self.load_config()\n if not self.__is_file_in_working_directory(filename):\n path = os.path.join(self.__directory.working_path,\n filename)\n raise FileNotFoundError(f\"No such file '{filename}' in '{path}'!\")\n self.__indexed_files.add(filename)\n files = self.config['info']['files']\n self.config['info']['files'] = f'{files},{filename}'.strip(',')\n source_file = os.path.join(self.__directory.working_path, filename)\n file_copy = os.path.join(self.__directory.index_path, filename)\n copyfile(source_file, file_copy)\n print(f'File {source_file} added')\n self.save_config()", "def add_library_file(file_name: str):\n ensure_library_dir_exists()\n\n file_path = os.path.join(library_dir, file_name)\n\n if os.path.exists(file_name):\n raise FileExistsError(\"File at '\" + str(file_path) + \"' already exists.\")\n\n with open(file_path, \"w\"):\n pass\n\n print(\"Created library file at '\" + str(file_path) + \"'.\")", "def unzip_file(zip_file: str) -> None:\n destination = tempfile.mkdtemp(prefix='gaelo_pross_unzip_')\n with ZipFile(zip_file) as my_zip:\n for member in my_zip.namelist():\n filename = os.path.basename(member)\n # skip directories\n if not filename:\n continue\n # copy file (taken from zipfile's extract)\n source = my_zip.open(member)\n target = open(os.path.join(destination, filename), \"wb\")\n with source, target:\n shutil.copyfileobj(source, target)\n # return destination", "def add_file(self, filename):\n f = open(filename, 'r', encoding='utf8', errors='ignore')\n s = f.read() \n self.add_string(s)", "def put_file(self, path, contents):\n data = io.BytesIO()\n with tarfile.open(fileobj=data, mode='w') as tarfile_:\n file_contents = contents.encode() if isinstance(contents, str) else contents\n tarinfo = tarfile.TarInfo(path)\n\n # We set the modification time to now because some systems (e.g. logging) rely upon\n # timestamps to determine whether to read config files.\n tarinfo.mtime = time.time()\n tarinfo.size = len(file_contents)\n tarfile_.addfile(tarinfo, io.BytesIO(file_contents))\n data.seek(0)\n\n self.container.put_archive(path='/', data=data)", "def add_file_from_memory(\n self, entry_path, entry_size, entry_data,\n filetype=REGULAR_FILE,\n permission=DEFAULT_UNIX_PERMISSION\n ):\n archive_pointer = self._pointer\n\n with new_archive_entry() as archive_entry_pointer:\n archive_entry = ArchiveEntry(None, archive_entry_pointer)\n\n archive_entry.pathname = entry_path\n entry_set_size(archive_entry_pointer, entry_size)\n entry_set_filetype(archive_entry_pointer, filetype)\n entry_set_perm(archive_entry_pointer, permission)\n write_header(archive_pointer, archive_entry_pointer)\n\n for chunk in entry_data:\n if not chunk:\n break\n write_data(archive_pointer, chunk, len(chunk))\n\n write_finish_entry(archive_pointer)\n entry_clear(archive_entry_pointer)", "def from_zip(cls, *args, **kwargs):\n return cls().add_zip(*args, **kwargs)", "def archive(self, files, name):\n self.log.debug(\"Putting files into archive: %s\" % \"\\n\".join(files))\n tar_name = \"%s%s\" % (name, self.extension)\n if os.path.exists(tar_name):\n raise RuntimeError (\"Tried to create an archive that already exists: %s\" % tar_name) \n else:\n self.log.info(\"Creating a new archive %s\" % tar_name)\n tar = tarfile.open(tar_name, 'w:gz');\n for name in files:\n tar.add(name)\n print '%s'% (name)\n tar.close()\n return tar_name", "def add_file(self, filename):\n f = open(filename, 'r', encoding='utf8', errors='ignore')\n text = f.read()\n f.close()\n \n self.add_string(text)", "def add_file(self, name, content):\n self._add_field(struct.pack(\"<L\", len(name)))\n self._add_field(name.encode())\n\n if self.flags & FLAG_GZIP != 0:\n content = GzipWrapper(content, retain_cache=self.cache_chunks)\n elif self.flags & FLAG_LZ4 != 0:\n content = Lz4Wrapper(content, retain_cache=self.cache_chunks)\n else:\n content = NoOpWrapper(content, retain_cache=self.cache_chunks)\n\n self._add_field(struct.pack(\"<Q\", _get_length(content)))\n self._add_field(content)", "def add(self, filename, *args):\n return self.cmd('add', filename, *args)", "def add_file(self, filename):\n f = open(filename, 'r', encoding='utf8', errors='ignore')\n text = f.read()\n f.close()\n self.add_string(text)", "def archive_log(self, f_in, filename):\n if not os.path.isdir('archived'):\n os.makedirs('archived')\n f_out = gzip.open('archived/'+filename+'.gz', 'wb')\n f_out.writelines(f_in)\n f_out.close()\n f_in.close()", "def add_file(self, filename):\r\n f = open(filename, 'r', encoding='utf8', errors='ignore')\r\n text = f.read()\r\n self.add_string(text)\r\n f.close()", "def zipDirectory(folder_path, zip_path):\r\n # Create a ZipFile object\r\n with ZipFile(zip_path, mode='w') as zipObj:\r\n # Iterate over all the files in directory\r\n for folderName, subfolders, filenames in os.walk(folder_path):\r\n for filename in filenames:\r\n # Filter on TSV files\r\n if filename.endswith(\".tsv\"):\r\n # Create complete filepath of file in directory\r\n filePath = os.path.join(folderName, filename)\r\n # Add file to zip\r\n zipObj.write(filePath, basename(filePath))", "def zip_compress(self, destination):\n\n if destination is not None and isinstance(destination, str):\n with ZipFile(destination, \"w\") as thezip:\n thezip.write(self.file)", "def write_zip(self, zip_file, callback=None):\n\n def _cb(*args):\n if callback:\n callback(*args)\n\n try:\n _cb('zip-in-progress', True)\n curr_path = os.path.dirname(os.path.abspath(__file__))\n save_path = curr_path + \"/user_downloads\"\n\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n name_of_file = \"gmail_images.zip\"\n full_file_name = os.path.join(save_path, name_of_file)\n fp = open(full_file_name, \"w\")\n fp.write(zip_file)\n fp.close()\n zip_file.close()\n _cb('zip-complete, True')\n _cb('write-zip', True, name_of_file)\n\n return True\n\n except:\n\n _cb('write-zip', False)\n\n return False", "def upload_nextcloud_zipfile(import_type: migration.Migration, archive: UploadFile = File(...)):\n dir = app_dirs.MIGRATION_DIR.joinpath(import_type.value)\n dir.mkdir(parents=True, exist_ok=True)\n dest = dir.joinpath(archive.filename)\n\n with dest.open(\"wb\") as buffer:\n shutil.copyfileobj(archive.file, buffer)\n\n if not dest.is_file:\n raise HTTPException(status.HTTP_400_BAD_REQUEST)", "def extract_file(self):\n# path_destination = os.path.join(\n# self.root, self.resources.replace(\".zip\", \"\"))\n# os.makedirs(path_destination, exist_ok=True)\n shutil.unpack_archive(os.path.join(\n self.root, self.resources), self.root)\n os.remove(os.path.join(self.root, self.resources))", "def archive(filepath,archive_dir='archive'):\n\n # Make sure we have a directory to archive to\n try:\n mkdir(archive_dir)\n except:\n print(\"Error making archive directory\")\n return\n\n try:\n (dir, filename) = os.path.split(filepath)\n outfile = os.path.join(dir,archive_dir,filename)+'.gz'\n with open(filename, 'rb') as f_in, gzip.open(outfile, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n except Exception as e:\n print(\"Error archiving \",filepath)\n print(e)\n else:\n try:\n os.remove(filepath)\n except:\n print(\"Error removing \",filepath)", "def get_zip_writer(zipfile: ZipFile):\n dump_folder = CONF['packfile_dump', '']\n if not dump_folder:\n return zipfile.write\n\n dump_folder = os.path.abspath(dump_folder)\n\n # Delete files in the folder, but don't delete the folder itself.\n try:\n dump_files = os.listdir(dump_folder)\n except FileNotFoundError:\n pass\n else:\n for name in dump_files:\n name = os.path.join(dump_folder, name)\n if os.path.isdir(name):\n shutil.rmtree(name)\n else:\n os.remove(name)\n\n def write_to_zip(filename, arcname):\n dump_loc = os.path.join(dump_folder, arcname)\n os.makedirs(os.path.dirname(dump_loc), exist_ok=True)\n shutil.copy(filename, dump_loc)\n zipfile.write(filename, arcname)\n return write_to_zip", "def _zipdir(self, dir: Path, zip_handle: zipfile.ZipFile) -> None:\n for root, _, files in os.walk(dir):\n for file in files:\n zip_handle.write(os.path.join(root, file), file)", "def process(self, event):\n # the file will be processed there\n print event.src_path, event.event_type\n\n if os.path.isfile(\"/Users/filename.zip\") == True:\n os.remove(\"/Users/filename.zip\")\n print (\"existing file is removed \")\n shutil.make_archive(\"directory\", \"zip\", \"/Users/directory/\")\n print (\"delete existing zip file and created a new zip file\")\n else:\n print (\"There is no zip file at the moment\")\n shutil.make_archive(\"directory\",\"zip\", \"/Users/directory\")\n print (\" A new zip file is created now \")", "def zipfile_containing(file_contents: Sequence[Tuple[str, str]]):\n with tempfile.NamedTemporaryFile(suffix='.zip') as temp_file:\n with zipfile.ZipFile(temp_file, 'w') as zip_file:\n for file_name, contents in file_contents:\n zip_file.writestr(file_name, contents)\n temp_file.flush()\n yield temp_file", "def zipdir(path, ziph):\n zf = zipfile.ZipFile(ziph, \"w\")\n for root, dirs, files in os.walk(path):\n for file in files:\n zf.write(os.path.join(root, file))\n zf.close()\n return ziph", "def add_file(self, path):\n self.git_repo.index.add([path])", "def create_zip_from_files(files: List[Path]) -> Any:\n temp = tempfile.NamedTemporaryFile()\n with zipfile.ZipFile(temp, 'w') as handle:\n for f in files:\n filename = f.name\n handle.write(f, arcname=filename)\n temp.flush()\n return temp", "def zip_dir(path: str, zip_filename: str) -> None:\n with zipfile.ZipFile(zip_filename, \"w\", zipfile.ZIP_DEFLATED) as zip_file:\n for root, dirs, files in os.walk(path):\n for file in files:\n f = os.path.join(root, file)\n zip_file.write(f, f[len(path) :])", "def addFile(self,pathAndFilename):\n #generate the key for the file\n fkey=self.getKey(pathAndFilename)\n \n #Checking if the key was seen before; if so, then file has duplicates\n if self.keyToFile.has_key(fkey):\n self.keyToFile[fkey].append(pathAndFilename)\n else:\n self.keyToFile[fkey]=[pathAndFilename]", "def add_files(self, *paths, **kw):\n write_p = self._pointer\n\n block_size = ffi.write_get_bytes_per_block(write_p)\n if block_size <= 0:\n block_size = 10240 # pragma: no cover\n\n with new_archive_entry() as entry_p:\n entry = ArchiveEntry(None, entry_p)\n for path in paths:\n with new_archive_read_disk(path, **kw) as read_p:\n while 1:\n r = read_next_header2(read_p, entry_p)\n if r == ARCHIVE_EOF:\n break\n entry.pathname = entry.pathname.lstrip('/')\n read_disk_descend(read_p)\n write_header(write_p, entry_p)\n if entry.isreg:\n with open(entry_sourcepath(entry_p), 'rb') as f:\n while 1:\n data = f.read(block_size)\n if not data:\n break\n write_data(write_p, data, len(data))\n write_finish_entry(write_p)\n entry_clear(entry_p)", "def make_zipfile(output_filename, source_dir):\n relroot = os.path.abspath(os.path.join(source_dir, os.pardir))\n with zipfile.ZipFile(output_filename, \"w\", zipfile.ZIP_DEFLATED) as zip:\n for root, dirs, files in os.walk(source_dir):\n # add directory (needed for empty dirs)\n zip.write(root, os.path.relpath(root, relroot))\n for file in files:\n filename = os.path.join(root, file)\n if os.path.isfile(filename): # regular files only\n arcname = os.path.join(os.path.relpath(root, relroot), file)\n zip.write(filename, arcname)", "def addFile(self, path):\n self._model.insertFile(path)", "def zip_alg_file(task_id):\n start_dir = os.path.join(FILE_PATH, \"task\", task_id)\n res = None\n if os.path.exists(start_dir):\n zip_file_dir = os.path.join(FILE_PATH, \"task\", task_id + \".zip\")\n file = zipfile.ZipFile(zip_file_dir, \"w\", zipfile.ZIP_DEFLATED)\n for dir_path, _, file_names in os.walk(start_dir):\n for file_name in file_names:\n file.write(os.path.join(dir_path, file_name))\n file.close()\n res = zip_file_dir\n return res", "def archive_files(archive_fileprefix, flist, zip_type, reldir, prefix=\"\"):\n def archive_filter(tinfo):\n fdir, fbase = os.path.split(tinfo.name)\n archpath = os.path.join(prefix, os.path.relpath(tinfo.name, reldir))\n tinfo.name = archpath\n return tinfo\n write_type = 'w:'+zip_type\n\n if zip_type:\n archive_filename = '{}.tar.{}'.format(archive_fileprefix, zip_type)\n else:\n archive_filename = '{}.tar'.format(archive_fileprefix)\n\n with tarfile.open(archive_filename, write_type) as out_file:\n for f in flist:\n out_file.add(f, filter=archive_filter)", "def compress_files(self):\n archive_file_path = tkinter.filedialog.asksaveasfilename(parent=self,\n defaultextension=\".zip\",\n filetypes=[(\"Zip File\", \"*.zip\")])\n treeview_items = self.files_treeview.get_children()\n if archive_file_path and treeview_items:\n with ZipFile(archive_file_path, \"w\", ZIP_DEFLATED) as archive:\n for row in treeview_items:\n file_path = self.files_treeview.item(row, \"values\")[0]\n file_name = os.path.basename(file_path)\n archive.write(file_path, arcname=file_name)", "def extract_zip(file, extract_location):\n\n with zipfile.ZipFile(file, \"r\") as zip_ref:\n zip_ref.extractall(extract_location)\n\n print(f\"Extracted file to {extract_location}\")", "def archive(self):\n suffixStart = len(self._filename)\n suffixEnd = suffixStart + 4\n archiveFile = osp.join(self.currentRecordingPath, self._filename + \".zip\")\n with ZipFile(archiveFile, 'w') as recarchive:\n for recfile in listdir(self.currentRecordingPath):\n if recfile.endswith(\".rec\", suffixStart, suffixEnd):\n recarchive.write(osp.join(self.currentRecordingPath, recfile), recfile)\n self._logger.info(\"Archive file has been created {}\".format(archiveFile))", "def make_zipfile(output_filename, source_dir):\n import zipfile, zlib\n relroot = os.path.abspath(os.path.join(source_dir, os.pardir))\n with zipfile.ZipFile(output_filename, \"w\", zipfile.ZIP_DEFLATED, allowZip64) as zip:\n for root, dirs, files in os.walk(source_dir):\n # add directory (needed for empty dirs)\n zip.write(root, os.path.relpath(root, relroot))\n for file in files:\n filename = os.path.join(root, file)\n if os.path.isfile(filename): # regular files only\n arcname = os.path.join(os.path.relpath(root, relroot), file)\n zip.write(filename, arcname)", "def addFileLocation(samweb, filenameorid, location):\n url = _make_file_path(filenameorid) + '/locations'\n data = { \"add\" : location }\n return samweb.postURL(url, data=data, secure=True, role='*')", "def load(zip_path, path_to_add=\"\"):\n with tempfile.TemporaryDirectory(prefix=\"zipload-py\") as tmp_dir:\n _extract_zip(zip_path, tmp_dir, path_to_add)\n sys.path.append(os.path.join(tmp_dir, path_to_add))\n yield None\n sys.path.remove(os.path.join(tmp_dir, path_to_add))", "def archive_file(filename, maxsize):#{{{\n if not os.path.exists(filename):\n print(filename, \"does not exist. ignore.\", file=sys.stderr)\n return 1\n\n filesize = os.path.getsize(filename)\n if filesize > maxsize:\n cnt = 0\n zipfile = \"\"\n while 1:\n cnt += 1\n zipfile = \"%s.%d.gz\"%(filename, cnt)\n if not os.path.exists(zipfile):\n break\n # write zip file\n try:\n f_in = open(filename, 'rb')\n except IOError:\n print(\"Failed to read %s\"%(filename), file=sys.stderr)\n return 1\n try:\n f_out = gzip.open(zipfile, 'wb')\n except IOError:\n print(\"Failed to write to %s\"%(zipfile), file=sys.stderr)\n return 1\n\n f_out.writelines(f_in)\n f_out.close()\n f_in.close()\n print(\"%s is archived to %s\"%(filename, zipfile))\n os.remove(filename)\n return 0", "def add_file(self, field_name, file_name, file_content, mimetype=None):\n if mimetype is None:\n mimetype = mimetypes.guess_type(file_name)[0] or 'application/octet-stream'\n self.files.append((field_name, file_name, mimetype, file_content))\n return", "def zip(zipfilename, srcdir): # , recursive=True):\n\tsrcdir = uniformpath(srcdir)\n\trootdir = os.path.dirname(srcdir) # \"...doc/Java\" gives doc\n\trootnameindex = len(rootdir) + 1 # \"...doc/Java\" gives start of \"Java\"\n\twith zipfile.ZipFile(zipfilename, mode=\"w\", compression=zipfile.ZIP_DEFLATED) as z:\n\t\tfor f in allfiles(srcdir):\n\t\t\tz.write(f, f[rootnameindex:])", "def store_source(zipfile_name, directory, storage_directory):\n if directory in ZIP_EXCLUDES:\n zip_command = ['zip', '-x ' + '-x'.join(ZIP_EXCLUDES[directory]), '-r', zipfile_name, directory]\n else:\n zip_command = ['zip', '-r', zipfile_name, directory]\n\n if exists(zipfile_name):\n os.remove(zipfile_name)\n\n try:\n logging.debug('Zipping Source: %s', zip_command)\n check_output(zip_command)\n except CalledProcessError as build_error:\n logging.error('Could not zip source, Return Code %s, Command %s', build_error.returncode, build_error.cmd)\n logging.error(build_error.output)\n os.remove(zipfile_name)\n return False\n\n qmk_storage.save_file(zipfile_name, os.path.join(storage_directory, zipfile_name), 'application/zip')\n os.remove(zipfile_name)\n\n return True", "def add_files(self, files, commit_msg):\n paths = []\n for rpath in files:\n path = os.path.join(self.repodir, rpath)\n paths.append(path)\n with open(path, 'w') as f:\n f.write(files[rpath])\n if paths:\n self.git_cmd(['add'] + paths)\n self.commit(commit_msg)", "def add(self, file_id: str, contents: bytes):\n file_path = self._path_to_file(file_id)\n b2_utils.write_file(file_path, contents)\n self._add_to_index(file_id, len(contents))", "def _compress_file(filename: str, basename: str):\n write_mode = _get_write_mode(filename)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n shutil.move(filename, os.path.join(tmpdir, basename))\n with tarfile.open(filename, write_mode) as tarball:\n tarball.add(tmpdir, arcname='')", "def open(self, *args, **kwargs):\n return ZipFileArchiver(*args,**kwargs)", "def add_file(self, path):\n self.files.append(filetypes.WrapVideoFile(path))", "def add_file(self, file_path):\n content = ''\n with open(file_path, 'r') as fh:\n self.__add_source_data(fh.read())", "def add(self,**kwargs):\n if self._extract:\n raise RuntimeError('This archive is write-only!')\n\n items = kwargs.iteritems() if PY2 else kwargs.items()\n for key,value in items:\n self._setitem(key,value)", "def _open_zip(self):\n self.buffer = io.BytesIO()\n self.zf = zipfile.ZipFile(self.buffer, \"w\", zipfile.ZIP_DEFLATED)", "def add_file(self, filename):\n f = open(filename, 'r', encoding='utf8', errors='ignore')\n text = f.read()\n f.close()\n self.add_string(text)", "def add(self, content):\n if (not self.exists()):\n raise IOError(\"File at '{}' does not exist.\".format(self.location))\n with open(self.location, 'a') as f:\n f.write(content)", "def add_file(current_path, parent_dir, descriptor, index_dict):\n # Get the relative path from the repository\n rel_path_from_repository = relpath(abspath(current_path), parent_dir)\n # If the file is outside the repository, print error, return\n if rel_path_from_repository.startswith(\"..\"):\n print(\"fatal: %s: '%s' is outside repository\" % (current_path,\n current_path))\n return\n # Read file content and hash it\n file_sha1_hash, file_content = read_and_hash(current_path)\n # Convert the modification time of the file to string\n mtime = convert_mtime_to_formatted_string(current_path)\n try:\n update_info_when_add(descriptor, rel_path_from_repository,\n mtime, file_sha1_hash, index_dict)\n except TypeError:\n # Raise error when the hash and file content are None.\n print(\"error: unable to index file %s\" % basename(current_path))\n # Create the new directory + object file in objects if needed\n make_directory_and_object_file(file_sha1_hash, file_content, parent_dir)" ]
[ "0.73756856", "0.7169092", "0.6900707", "0.68461806", "0.66883737", "0.6629361", "0.66237056", "0.64670384", "0.6460916", "0.64255655", "0.636943", "0.6339873", "0.6218348", "0.6186335", "0.6168846", "0.6077438", "0.6057466", "0.6050394", "0.6045798", "0.60440147", "0.59432864", "0.59070504", "0.5892512", "0.5872402", "0.5865803", "0.5859915", "0.582567", "0.57907784", "0.57845706", "0.5730467", "0.5730393", "0.5709739", "0.57087463", "0.5692842", "0.56405723", "0.5636828", "0.56340325", "0.5633983", "0.56324124", "0.5630354", "0.5629827", "0.56221217", "0.5622057", "0.5616915", "0.56084555", "0.56063235", "0.5605334", "0.5603409", "0.5565296", "0.5557413", "0.5554864", "0.55354357", "0.5534545", "0.5530288", "0.5530271", "0.5529869", "0.5507189", "0.55004907", "0.549874", "0.54708534", "0.5470772", "0.5464889", "0.5464809", "0.54508054", "0.5445468", "0.54432267", "0.54416424", "0.5424819", "0.5423298", "0.5420265", "0.5417813", "0.5414747", "0.540558", "0.53948784", "0.53902376", "0.5384885", "0.5374191", "0.5373027", "0.53603655", "0.53498685", "0.53488046", "0.53480947", "0.53480494", "0.5341853", "0.5337759", "0.5331545", "0.53240734", "0.5307993", "0.53063834", "0.5297677", "0.5296555", "0.52953124", "0.52951497", "0.52881056", "0.52751845", "0.5271075", "0.52703476", "0.52610135", "0.525875", "0.5254338" ]
0.6727953
4
Archive a project stored in GIT into a zip file.
def _archive_project(name, buff, files=None, repo=None, branch='master', ignore_deleted=False): if repo is None: repo = Repoman.open_repo(name) now = datetime.now().timetuple()[:6] archive = zipfile.ZipFile(buff, "w", zipfile.ZIP_DEFLATED) files_list = files if files is not None else \ repo.list_files_for_branch(branch) all_files = files_list if files is None else \ repo.list_files_for_branch(branch) template_paths = defaultdict(list) for file_path in all_files: split_file_path = file_path.split('/') if len(split_file_path) > 2: template_paths[split_file_path[1]].append(file_path) extractors = json.loads(repo.file_contents_for_branch('extractors.json', branch) or '{}') seen_files = set() spiders = set() for file_path in files_list: if file_path.startswith('spiders'): try: parts = file_path.split("/") if len(parts) >= 2: spider_name = parts[1] if spider_name.endswith('.json'): spider_name = spider_name[:-5] if spider_name not in spiders: # Load spider if necessary if len(parts) > 2: file_path = 'spiders/' + spider_name + '.json' file_contents = repo.file_contents_for_branch( file_path, branch) as_json = json.loads(file_contents) templates = [] # Load all spider templates spider_templates = template_paths.get(spider_name, []) for template_path in spider_templates: seen_files.add(template_path) existing = {} # Ignore deleted templates try: templ_contents = repo.file_contents_for_branch( template_path, branch) except (TypeError, ValueError): continue json_template = json.loads(templ_contents) # Validate extractors template_extractors = json_template.get( 'extractors', {}) for field, eids in template_extractors.items(): existing[field] = [eid for eid in eids if eid in extractors] json_template['extractors'] = existing spider_name = parts[1] templates.append(json_template) spiders.add(spider_name) as_json.pop('template_names', None) as_json['templates'] = templates _add_to_archive(archive, file_path, json.dumps(as_json), now) except TypeError: if ignore_deleted: continue # Handle Deleted Spiders file_contents = repo.file_contents_for_branch(file_path, 'master') file_info = {'deleted': True} if file_contents: as_json = json.loads(file_contents) _add_to_archive(archive, file_path, json.dumps(file_info), now) else: file_contents = repo.file_contents_for_branch(file_path, branch) _add_to_archive(archive, file_path, file_contents, now) seen_files.add(file_path) # Add empty placeholders for missing files required by dash for file_path in {'extractors.json', 'items.json'} - seen_files: _add_to_archive(archive, file_path, '{}', now) archive.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __gitCreateArchive(self):\n self.vcs.gitCreateArchive(self.project.getProjectPath())", "def archive(project, filename, pack_envs=False):\n return archiver._archive_project(project, filename, pack_envs)", "def archive_projectbuild(projectbuild, archive):\n transport = get_transport_for_projectbuild(projectbuild, archive)\n transport.archive()", "def make_zip(self, project):\n return None", "def create_zip_file():\n shutil.make_archive(os.path.join(DIST_DIR, \"build\"), \"zip\", BUILD_DIR)", "def deploy_project(name, apikey, changed_files=None, repo=None,\n branch='master'):\n zbuff = StringIO()\n if changed_files is not None:\n changed_files = list(set(changed_files) | REQUIRED_FILES)\n _archive_project(name, zbuff, changed_files, repo, branch)\n zbuff.reset()\n payload = {'apikey': apikey, 'project': name}\n req = requests.post(\n DASH_API_URL + 'as/import.json?version=portia',\n files=[('archive', ('archive', zbuff, 'application/zip'))],\n params=payload\n )\n if req.status_code == 200:\n project_url = DASH_API_URL.rsplit('/', 2)[0] + '/p/' + name\n return {\n 'status': 'ok',\n 'schedule_url': project_url\n }\n else:\n raise DeployError('Deploy to Dash failed: %s' % req.text)", "def make_zip(self):\n shutil.make_archive(self.name, 'zip', self.name)", "def git_archive_all(path, archive_file_name):\n import os\n import tarfile\n\n def ls_files(prefix=''):\n \"\"\"\n Does a `git ls-files` on every git repository (eg: submodules)\n found in the working git repository and returns a list with all the\n filenames returned by each `git ls-files`\n\n --full-name Forces paths to be output relative to the project top\n directory\n --exclude-standard adds standard git exclusions\n (.git/info/exclude, .gitignore, ...)\n \"\"\"\n cmd = 'git ls-files --full-name --exclude-standard'\n raw_files = local(cmd, capture=True)\n files = []\n\n for filename in raw_files.split('\\n'):\n if (os.path.isdir(filename) and\n os.path.exists(os.path.join(filename, '.git'))):\n os.chdir(filename)\n files.extend(ls_files(prefix=filename))\n else:\n files.append(os.path.join(prefix, filename))\n\n return files\n\n cwd = os.getcwd()\n os.chdir(path)\n files = ls_files()\n os.chdir(path)\n project_tar = tarfile.open(archive_file_name, 'w:gz')\n\n for filename in files:\n project_tar.add(filename)\n\n project_tar.close()\n os.chdir(cwd)\n\n print(green('Archive created at %s/%s' % (path, archive_file_name)))", "def download_project_archive(request, **kwargs):\n project = kwargs.get(\"project\")\n if request.user.is_authenticated and request.user == project.user:\n filename = project.create_downloadable_archive()\n file_handle = open(filename, \"rb\")\n response = FileResponse(file_handle)\n\n response[\"Content-Length\"] = os.path.getsize(filename)\n response[\n \"Content-Disposition\"\n ] = 'attachment; filename=\"{}.zip\"'.format(project.name)\n\n return response\n else:\n raise PermissionDenied", "def _archive_repository(\n owner: str, project_name: str, secret_token: str\n) -> Tuple[bool, str]:\n project_settings = {\"archived\": \"true\"}\n\n headers = {\n \"Authorization\": f\"token {secret_token}\",\n }\n\n url = f\"https://{REST_HOST}/repos/{owner}/{project_name}\"\n\n response = patch(url, json=project_settings, headers=headers, verify=VERIFY_CERT)\n return response.ok, (\n f\"Status: {response.status_code}. \" f'Error: \"{response.text}\".'\n )", "def zip_repo(src_path, dest_path):\n tar = tarfile.open(dest_path, \"w:gz\")\n for file_name in glob.glob(os.path.join(src_path, \"*\")):\n tar.add(file_name, os.path.basename(file_name))\n\n tar.close()", "def deploy_django_project(self):\n\n if self.no_files:\n return\n\n local_dir = \"{0}\".format(self.app_dir)\n app_dir = \"{0}\".format(self.app_remote_dir)\n\n if not exists(app_dir):\n mkdir(app_dir)\n\n zip_name = make_zip(local_dir, self.app_name)\n put(zip_name, self.app_remote_dir)\n\n with cd(self.app_remote_dir):\n run(\"unzip -o {0}\".format(zip_name))\n\n os.remove(zip_name)", "def _zip_files(self):\n\n zip_file = Path(self.build_directory.parent).joinpath(\n self.package_name + '.zip'\n )\n logger.info('Creating zip file: %s', zip_file)\n\n shutil.make_archive(zip_file.with_suffix(''), 'zip', self.build_directory)\n shutil.move(str(zip_file), self.build_directory)", "def archive(\n self,\n ostream: Union[TextIO, BinaryIO],\n treeish: Optional[str] = None,\n prefix: Optional[str] = None,\n **kwargs: Any,\n ) -> Repo:\n if treeish is None:\n treeish = self.head.commit\n if prefix and \"prefix\" not in kwargs:\n kwargs[\"prefix\"] = prefix\n kwargs[\"output_stream\"] = ostream\n path = kwargs.pop(\"path\", [])\n path = cast(Union[PathLike, List[PathLike], Tuple[PathLike, ...]], path)\n if not isinstance(path, (tuple, list)):\n path = [path]\n # end assure paths is list\n self.git.archive(\"--\", treeish, *path, **kwargs)\n return self", "def upload_project(self, workflow_snapshot_id: Text, project_path: Text) -> Text:\n with tempfile.TemporaryDirectory() as temp_dir:\n zip_file_name = 'workflow_{}_project.zip'.format(workflow_snapshot_id)\n temp_dir_path = Path(temp_dir)\n zip_file_path = temp_dir_path / zip_file_name\n make_dir_zipfile(project_path, zip_file_path)\n with open(zip_file_path, 'rb') as f:\n self.s3_client.upload_fileobj(f, self.bucket_name, zip_file_name)\n return zip_file_name", "def upload_tar_from_git():\n require(\"release\", provided_by=[deploy])\n tree = prompt(\"Please enter a branch or SHA1 to deploy\", default=\"master\")\n local(\"git archive --format=tar %s | gzip > %s.tar.gz\" % (tree, env['release']))\n sudo(\"mkdir %(path)s/releases/%(release)s\" % env)\n put(\"%(release)s.tar.gz\" % env, \"%(path)s/packages/\" % env, use_sudo=True)\n sudo(\"cd %(path)s/releases/%(release)s && tar zxf ../../packages/%(release)s.tar.gz\" % env)\n local(\"rm %(release)s.tar.gz\" % env)", "def zip_file(backup_objects):\n\n # Get name from date_time\n name_of_zip_file = (get_date(\"%d%m%Y_%H.%S\") + '.zip')\n # put files in zip archiv\n z = zipfile.ZipFile(name_of_zip_file, 'a', zipfile.ZIP_DEFLATED) # create archive\n for i in backup_objects:\n if os.path.isdir(i):\n for root, dirs, files in os.walk(i): # get list of files in folder\n for file in files:\n z.write(os.path.join(root, file)) # Создание относительных путей и запись файлов в архив\n else:\n z.write(i)\n z.close()\n if zipfile.is_zipfile(name_of_zip_file):\n notest_file(\"arckhiving is conplite! Created file\" + name_of_zip_file)\n return name_of_zip_file", "def zip_build_dir(self):\n logging.debug('Attempting to zip build dir...')\n shutil.make_archive(self.deployment_zip, 'zip', self.build_dir)\n logging.debug('Successfully zipd build dir...')", "def github_archive(\n name,\n repository = None,\n commit = None,\n commit_pin = None,\n sha256 = \"0\" * 64,\n build_file = None,\n patches = None,\n extra_strip_prefix = \"\",\n local_repository_override = None,\n mirrors = None,\n **kwargs):\n if repository == None:\n fail(\"Missing repository=\")\n if commit == None:\n fail(\"Missing commit=\")\n if mirrors == None:\n fail(\"Missing mirrors=; see mirrors.bzl\")\n\n build_file = _resolve_drake_abbreviation(name, build_file)\n patches = [\n _resolve_drake_abbreviation(name, one_patch)\n for one_patch in (patches or [])\n ]\n\n if local_repository_override != None:\n path = local_repository_override\n if extra_strip_prefix:\n path += \"/\" + extra_strip_prefix\n if build_file == None:\n native.local_repository(\n name = name,\n path = path,\n )\n else:\n native.new_local_repository(\n name = name,\n build_file = build_file,\n path = path,\n )\n return\n\n # Once we've handled the \"local_repository_override\" sidestep, we delegate\n # to a rule (not a macro) so that we have more leeway in the actions we can\n # take (i.e., so we can do more than just a simple download-and-extract).\n _github_archive_real(\n name = name,\n repository = repository,\n commit = commit,\n commit_pin = commit_pin,\n sha256 = sha256,\n build_file = build_file,\n patches = patches,\n extra_strip_prefix = extra_strip_prefix,\n mirrors = mirrors,\n **kwargs\n )", "def zipdata(filename: str) -> None:\n\n # Generate the path to the project TODO: check if this is entire project or server\n directoryName = ROOT.split(\"/\")[-3]\n codeDestination = \"/\".join(ROOT.split(\"/\")[:-2])\n\n # Create the output file\n zippedFile = zipfile.ZipFile(filename, \"w\", compression=zipfile.ZIP_DEFLATED)\n\n # Walk over the directory and save all files\n for abspath, dirnames, filenames in os.walk(codeDestination):\n local = abspath[abspath.index(directoryName):]\n [zippedFile.write(os.path.join(abspath, name), os.path.join(local, name)) for name in filenames]\n\n # Close the zip file\n zippedFile.close()", "def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()", "def unpackToProject(self,archive,project,progress=None):\n progress = progress or bolt.Progress()\n files = self.sortFiles([x[0] for x in self.fileSizeCrcs])\n if not files: return 0\n #--Clear Project\n destDir = dirs['installers'].join(project)\n if destDir.exists(): destDir.rmtree(safety='Installers')\n #--Extract\n progress(0,project.s+_(\"\\nExtracting files...\"))\n self.unpackToTemp(archive,files,SubProgress(progress,0,0.9))\n #--Move\n progress(0.9,project.s+_(\"\\nMoving files...\"))\n count = 0\n tempDir = self.tempDir\n for file in files:\n srcFull = tempDir.join(file)\n destFull = destDir.join(file)\n if srcFull.exists():\n srcFull.moveTo(destFull)\n count += 1\n self.clearTemp()\n return count", "def makeArchiveGitSubcommand(self, prefix, revision=u\"HEAD\", outputFilePath=None):\n command = \"archive --prefix=%s/ %s \"\n command = command % (prefix, revision)\n if outputFilePath is not None:\n command += \" -o %s\" % outputFilePath\n return command", "def deploy():\n\n project_dir = '/home/gastosabertos/gastos_abertos_website'\n with cd(project_dir):\n local('tar -cvzf build.tar.gz build')\n run('cp -r build build-old')\n put('build.tar.gz', '.')\n run('tar -xvf build.tar.gz')", "def git_project(soup, github_user, github_pass, github_repo, github_name):\n giturl = 'https://{user}:{password}@github.com/{user}/{repo}.git'.format(\n user=github_user, password=github_pass, repo=github_repo\n )\n oldcwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n gitdir = os.path.join(tmpdir, github_repo)\n cmd = 'git clone {} {}'.format(shlex.quote(giturl), shlex.quote(gitdir))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(gitdir)\n rhinoscrape(soup, github_user, github_name)\n cmd = 'git add .'\n subprocess.run(shlex.split(cmd), check=False)\n msg = 'Project committed by Rhino Repo'\n cmd = 'git commit -m {}'.format(shlex.quote(msg))\n subprocess.run(shlex.split(cmd), check=False)\n cmd = 'git push {}'.format(shlex.quote(giturl))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(oldcwd)\n shutil.rmtree(tmpdir, ignore_errors=True)", "def archive(ctx, config):\n log.info('Creating archive directory...')\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'install', '-d', '-m0755', '--', archive_dir,\n ],\n wait=False,\n )\n )\n\n try:\n yield\n except Exception:\n # we need to know this below\n set_status(ctx.summary, 'fail')\n raise\n finally:\n passed = get_status(ctx.summary) == 'pass'\n if ctx.archive is not None and \\\n not (ctx.config.get('archive-on-error') and passed):\n log.info('Transferring archived files...')\n logdir = os.path.join(ctx.archive, 'remote')\n if (not os.path.exists(logdir)):\n os.mkdir(logdir)\n for rem in ctx.cluster.remotes.iterkeys():\n path = os.path.join(logdir, rem.shortname)\n misc.pull_directory(rem, archive_dir, path)\n # Check for coredumps and pull binaries\n fetch_binaries_for_coredumps(path, rem)\n\n log.info('Removing archive directory...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'rm',\n '-rf',\n '--',\n archive_dir,\n ],\n wait=False,\n ),\n )", "def _make_archive(file_list, archive, root):\n with zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED) as zipf:\n for f in file_list:\n zipf.write(f, os.path.relpath(f, root))", "def zipAll(app):\n context = app.context\n\n backend = app.backend\n base = backendRep(backend, \"clone\")\n org = context.org\n repo = context.repo\n relative = context.relative\n relative = prefixSlash(normpath(relative))\n version = context.version\n\n graphics = context.graphicsRelative\n graphics = prefixSlash(normpath(graphics))\n\n prov = context.provenanceSpec\n mods = prov.get(\"moduleSpecs\", [])\n\n repoDir = f\"{base}/{org}/{repo}\"\n\n dataItems = [\n (\"app\", f\"{repoDir}/{APP_APP}\"),\n (\"main data\", f\"{repoDir}{relative}/{version}\"),\n ]\n if graphics:\n dataItems.append((\"graphics\", f\"{repoDir}{graphics}\"))\n\n good = True\n\n for mod in mods:\n mbackend = mod[\"backend\"]\n if mbackend is None:\n mbackend = app.backend\n mbase = backendRep(mbackend, \"clone\")\n morg = mod[\"org\"]\n mrepo = mod[\"repo\"]\n mrelative = mod[\"relative\"]\n mrelative = prefixSlash(normpath(mrelative))\n mrepoDir = f\"{mbase}/{morg}/{mrepo}\"\n labelItems = []\n if mbase != base:\n labelItems.append(mbase)\n if morg != org:\n labelItems.append(morg)\n if mrepo != repo:\n labelItems.append(mrepo)\n if mrelative != relative:\n labelItems.append(mrelative)\n label = \"-\".join(labelItems)\n if mbase != base:\n good = False\n console(f\"ERROR: module {label} not on expected backend {backend}\")\n dataItems.append((f\"module {label}\", f\"{mrepoDir}{mrelative}/{version}\"))\n\n if not good:\n return\n\n destBase = f\"{DW}/{backendRep(backend, 'norm')}\"\n dest = normpath(f\"{destBase}/{org}/{repo}\")\n destFile = f\"{dest}/{APP_EXPRESS_ZIP}\"\n\n console(\"Data to be zipped:\")\n results = []\n\n for (label, path) in dataItems:\n if dirExists(path):\n (release, commit) = addCheckout(path)\n checkout = f\"({release or 'v??'} {commit[-6:] if commit else '??'})\"\n zipBase = path.removeprefix(f\"{base}/\")\n collectFiles(path, \"\", results, zipBase=zipBase)\n status = \"OK\"\n else:\n good = False\n status = \"missing\"\n checkout = \"(??)\"\n console(f\"\\t{status:<8} {label:<24} {checkout:<20}: {path}\")\n\n if not good:\n return\n\n if not dirExists(dest):\n dirMake(dest)\n console(\"Writing zip file ...\")\n with ZipFile(destFile, \"w\", **ZIP_OPTIONS) as zipFile:\n for (internalPath, path) in sorted(results):\n zipFile.write(\n path,\n arcname=internalPath,\n )\n return ux(destFile)", "def archive(mongo_backup_file):\r\n filename = get_archive_filename()\r\n tar = tarfile.open(filename, \"w|gz\")\r\n tar.add(mongo_backup_file)\r\n tar.close()\r\n\r\n return filename", "def Zip(args):\n parser = argparse.ArgumentParser(description=Zip.__doc__)\n parser.add_argument(\n '-r', dest='recursive', action='store_true',\n default=False,\n help='recurse into directories')\n parser.add_argument(\n '-q', dest='quiet', action='store_true',\n default=False,\n help='quiet operation')\n parser.add_argument('zipfile')\n parser.add_argument('filenames', nargs='+')\n options = parser.parse_args(args)\n\n src_files = []\n for filename in options.filenames:\n globbed_src_args = glob.glob(filename)\n if not globbed_src_args:\n if not options.quiet:\n print('zip warning: name not matched: %s' % filename)\n\n for src_file in globbed_src_args:\n src_file = os.path.normpath(src_file)\n src_files.append(src_file)\n if options.recursive and os.path.isdir(src_file):\n for root, dirs, files in os.walk(src_file):\n for dirname in dirs:\n src_files.append(os.path.join(root, dirname))\n for filename in files:\n src_files.append(os.path.join(root, filename))\n\n # zip_data represents a list of the data to be written or appended to the\n # zip_stream. It is a list of tuples:\n # (OS file path, zip path/zip file info, and file data)\n # In all cases one of the |os path| or the |file data| will be None.\n # |os path| is None when there is no OS file to write to the archive (i.e.\n # the file data already existed in the archive). |file data| is None when the\n # file is new (never existed in the archive) or being updated.\n zip_data = []\n new_files_to_add = [OSMakeZipPath(src_file) for src_file in src_files]\n zip_path_to_os_path_dict = dict((new_files_to_add[i], src_files[i])\n for i in range(len(src_files)))\n write_mode = 'a'\n if os.path.exists(options.zipfile):\n with zipfile.ZipFile(options.zipfile, 'r') as zip_stream:\n try:\n files_to_update = set(new_files_to_add).intersection(\n set(zip_stream.namelist()))\n if files_to_update:\n # As far as I can tell, there is no way to update a zip entry using\n # zipfile; the best you can do is rewrite the archive.\n # Iterate through the zipfile to maintain file order.\n write_mode = 'w'\n for zip_path in zip_stream.namelist():\n if zip_path in files_to_update:\n os_path = zip_path_to_os_path_dict[zip_path]\n zip_data.append((os_path, zip_path, None))\n new_files_to_add.remove(zip_path)\n else:\n file_bytes = zip_stream.read(zip_path)\n file_info = zip_stream.getinfo(zip_path)\n zip_data.append((None, file_info, file_bytes))\n except IOError:\n pass\n\n for zip_path in new_files_to_add:\n zip_data.append((zip_path_to_os_path_dict[zip_path], zip_path, None))\n\n if not zip_data:\n print('zip error: Nothing to do! (%s)' % options.zipfile)\n return 1\n\n with zipfile.ZipFile(options.zipfile, write_mode,\n zipfile.ZIP_DEFLATED) as zip_stream:\n for os_path, file_info_or_zip_path, file_bytes in zip_data:\n if isinstance(file_info_or_zip_path, zipfile.ZipInfo):\n zip_path = file_info_or_zip_path.filename\n else:\n zip_path = file_info_or_zip_path\n\n if os_path:\n st = os.stat(os_path)\n if stat.S_ISDIR(st.st_mode):\n # Python 2.6 on the buildbots doesn't support writing directories to\n # zip files. This was resolved in a later version of Python 2.6.\n # We'll work around it by writing an empty file with the correct\n # path. (This is basically what later versions do anyway.)\n zip_info = zipfile.ZipInfo()\n zip_info.filename = zip_path\n zip_info.date_time = time.localtime(st.st_mtime)[0:6]\n zip_info.compress_type = zip_stream.compression\n zip_info.flag_bits = 0x00\n zip_info.external_attr = (st[0] & 0xFFFF) << 16\n zip_info.CRC = 0\n zip_info.compress_size = 0\n zip_info.file_size = 0\n zip_stream.writestr(zip_info, '')\n else:\n zip_stream.write(os_path, zip_path)\n else:\n zip_stream.writestr(file_info_or_zip_path, file_bytes)\n\n if not options.quiet:\n if zip_path in new_files_to_add:\n operation = 'adding'\n else:\n operation = 'updating'\n zip_info = zip_stream.getinfo(zip_path)\n if (zip_info.compress_type == zipfile.ZIP_STORED or\n zip_info.file_size == 0):\n print(' %s: %s (stored 0%%)' % (operation, zip_path))\n elif zip_info.compress_type == zipfile.ZIP_DEFLATED:\n print(' %s: %s (deflated %d%%)' % (operation, zip_path,\n 100 - zip_info.compress_size * 100 / zip_info.file_size))\n\n return 0", "def zip_campaign_files(): # pylint: disable=too-many-locals\n try:\n build_tag = env.get('BUILD_TAG')\n assert Campaign.dump_db() == Campaign.EX_OK\n assert Campaign.dump_artifacts() == Campaign.EX_OK\n with zipfile.ZipFile(f'{build_tag}.zip',\n 'w', zipfile.ZIP_DEFLATED) as zfile:\n zfile.write(f\"{build_tag}.json\")\n for root, _, files in os.walk(build_tag):\n for filename in files:\n zfile.write(os.path.join(root, filename))\n b3resource = boto3.resource(\n 's3', endpoint_url=os.environ[\"S3_ENDPOINT_URL\"])\n dst_s3_url = os.environ[\"S3_DST_URL\"]\n multipart_threshold = 5 * 1024 ** 5 if \"google\" in os.environ[\n \"S3_ENDPOINT_URL\"] else 8 * 1024 * 1024\n tconfig = TransferConfig(multipart_threshold=multipart_threshold)\n bucket_name = urllib.parse.urlparse(dst_s3_url).netloc\n mime_type = mimetypes.guess_type(f'{build_tag}.zip')\n path = urllib.parse.urlparse(dst_s3_url).path.strip(\"/\")\n # pylint: disable=no-member\n b3resource.Bucket(bucket_name).upload_file(\n f'{build_tag}.zip',\n os.path.join(path, f'{build_tag}.zip'),\n Config=tconfig,\n ExtraArgs={'ContentType': mime_type[\n 0] or 'application/octet-stream'})\n dst_http_url = os.environ[\"HTTP_DST_URL\"]\n link = os.path.join(dst_http_url, f'{build_tag}.zip')\n Campaign.__logger.info(\n \"All data were successfully published:\\n\\n%s\", link)\n return Campaign.EX_OK\n except KeyError as ex:\n Campaign.__logger.error(\"Please check env var: %s\", str(ex))\n return Campaign.EX_ZIP_CAMPAIGN_FILES_ERROR\n except botocore.exceptions.NoCredentialsError:\n Campaign.__logger.error(\n \"Please fill ~/.aws/credentials, ~/.boto or set \"\n \"AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY in env\")\n return Campaign.EX_ZIP_CAMPAIGN_FILES_ERROR\n except Exception: # pylint: disable=broad-except\n Campaign.__logger.exception(\"Cannot publish the artifacts\")\n return Campaign.EX_ZIP_CAMPAIGN_FILES_ERROR", "def archive(po_filename, bl_filename):\n\n # Store archive in same dir as this script\n root = os.path.abspath(os.path.dirname(sys.argv[0]))\n\n po_archive = root + '/po.csv.%s' % datetime.date.today()\n bl_archive = root + '/bl.csv.%s' % datetime.date.today()\n\n shutil.move(po_filename, po_archive)\n shutil.move(bl_filename, bl_archive)\n\n perms = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH\n os.chmod(po_archive, perms)\n os.chmod(bl_archive, perms)", "def pack(**kwargs):\n require('repository')\n #if env.repository.startswith('svn://'):\n if env.repository.type == 'svn':\n execute(svn.pack, **kwargs)\n if env.repository.type == 'git':\n execute(git.pack, **kwargs)\n else:\n abort('Unsupported repository type %s' % env.repository)", "def create_archive(filelist):\n\t\n\n\ttmp = tempfile.NamedTemporaryFile()\n\t# with tempfile.SpooledTemporaryFile() as tmp:\n\twith zipfile.ZipFile(tmp, 'w', zipfile.ZIP_DEFLATED) as archive:\n\t\tarcname = './docs/'\n\t\tfor x in filelist:\n\t\t\tfilename = os.path.basename(x[1])\n\t\t\t_file = x[0]\n\t\t\t# make sure we're at the start...\n\t\t\t_file.seek(0)\n\t\t\tarchive.write(_file.name, arcname=os.path.join(arcname, filename))\n\n\t# Reset file pointer\n\ttmp.seek(0)\n\n\treturn tmp\n\n\t\t# Write file data to response\n\t\t# return HttpResponse(tmp.read(), content_type='application/x-zip-compressed')", "def archive_log(self, f_in, filename):\n if not os.path.isdir('archived'):\n os.makedirs('archived')\n f_out = gzip.open('archived/'+filename+'.gz', 'wb')\n f_out.writelines(f_in)\n f_out.close()\n f_in.close()", "def deploy_to_s3():\n env.gzip_path = '%(path)s/repository/gzip/assets/' % env\n run(('s3cmd -P --add-header=Content-encoding:gzip --guess-mime-type --rexclude-from=%(path)s/repository/s3exclude sync %(gzip_path)s s3://%(s3_bucket)s/%(project_name)s/') % env)", "def save_zip(zip_file, archive_dir):\n zipdata = zipfile.ZipFile(zip_file, mode='w')\n\n for root, dirs, files in os.walk(archive_dir):\n for name in files:\n fname = os.path.join(root, name)\n zipdata.write(fname)\n zipdata.close()", "def _package_project_files(self, zf):\n write_str_to_zipfile(zf, 'logger_demo/data.txt', 'Foo!\\n')", "def archive_repository(self, repository_id, new_id):\n if not repository_id:\n _exit_if_errors(['--id is required'])\n if not new_id:\n _exit_if_errors(['--new-id is required'])\n\n _, errors = self.rest.archive_backup_repository(repository_id, new_id)\n _exit_if_errors(errors)\n _success('Archived repository')", "def do_pack():\n\n now = datetime.now()\n # format the name of the file with the timestamps\n now_year = now.year\n now_month = now.month\n now_day = now.day\n now_hour = now.hour\n now_minute = now.minute\n now_second = now.second\n # apply the format\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n now_year, now_month, now_day, now_hour, now_minute, now_second\n )\n # All archives must be stored in the folder versions\n local('mkdir -p versions')\n # execute locally the compression of the folder\n command = local(\"tar -cvzf \" + file_name + \" ./web_static/\")\n # return the archive path if the archive has been correctly generated\n if command.succeeded:\n return file_name\n else:\n return None", "def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()", "def zipped_tarball(this_tmp_dir):\n tgz_name = \"%s.tar.gz\" % this_tmp_dir\n\n tar = tarfile.open(tgz_name, \"w:gz\")\n\n tar.add(this_tmp_dir)\n\n tar.close()\n\n return tgz_name", "def archive(self, files, name):\n self.log.debug(\"Putting files into archive: %s\" % \"\\n\".join(files))\n tar_name = \"%s%s\" % (name, self.extension)\n if os.path.exists(tar_name):\n raise RuntimeError (\"Tried to create an archive that already exists: %s\" % tar_name) \n else:\n self.log.info(\"Creating a new archive %s\" % tar_name)\n tar = tarfile.open(tar_name, 'w:gz');\n for name in files:\n tar.add(name)\n print '%s'% (name)\n tar.close()\n return tar_name", "def upload_tar_from_git(path):\n require('release', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('branch', provided_by=[prod])\n local('git checkout %s' % (env.branch))\n local('git archive --format=tar %s | gzip > %s.tar.gz' % (env.branch, env.release))\n sudo('mkdir -p %s' % (path))\n put('%s.tar.gz' % (env.release), '/tmp/', mode=0755)\n sudo('mv /tmp/%s.tar.gz %s/packages/' % (env.release, env.code_root))\n sudo('cd %s && tar zxf ../../../packages/%s.tar.gz' % (env.whole_path, env.release))\n local('rm %s.tar.gz' % (env.release))\n sudo('rm %s/packages/%s.tar.gz' % (env.code_root, env.release))", "def create_zip(self, db, submission, request, logger, passwd=None,\r\n mat_dir=None, no_subdirs=None):\r\n if db(db.material.leak_id==submission.id).select().first():\r\n try:\r\n filedir = str(db(db.submission.leak_id==submission.id).select(\r\n db.submission.dirname).first().dirname)\r\n filedir = os.path.join(request.folder, \"material\", filedir)\r\n except:\r\n logger.error('create_zip: invalid filedir')\r\n return dict(error='invalid filedir')\r\n err = None\r\n try:\r\n # XXX should need some refactoring\r\n if not mat_dir:\r\n mat_dir = filedir\r\n splitted = os.path.split(mat_dir)\r\n if splitted[-1].isdigit():\r\n filedir = \"%s-%s\" % (splitted[-2], splitted[-1])\r\n if no_subdirs:\r\n save_file = filedir + \"-0\"\r\n # get only files, no subdirectories\r\n files = [f for f in os.listdir(mat_dir)\r\n if not os.path.isdir(os.path.join(mat_dir, f))]\r\n else:\r\n save_file = filedir\r\n files = os.listdir(mat_dir)\r\n # XXX: issue #51\r\n if passwd and os.path.exists(mat_dir):\r\n logger.error('Encrypted ZIP function disabled, due to security redesign needs')\r\n return 0\r\n # cmd = 'zip -e -P%(passwd) %(zipfile).zip %(files)' % dict(\r\n # passwd=passwd, zipfile=filedir,\r\n # files=\" \".join(files))\r\n # subprocess.check_call(cmd.split())\r\n elif not passwd and os.path.exists(mat_dir):\r\n zipf = zipfile.ZipFile(save_file+'.zip', 'w')\r\n for f in files:\r\n path = os.path.join(mat_dir, f)\r\n zipf.write(path, f)\r\n subdirs = os.walk(path)\r\n for subdir in subdirs:\r\n inner_subdir = os.path.split(subdir[0])[-1]\r\n if not inner_subdir.isdigit():\r\n inner_subdir = \"\"\r\n for subfile in subdir[2]:\r\n zipf.write(os.path.join(subdir[0], subfile),\r\n os.path.join(inner_subdir,subfile))\r\n else:\r\n logger.error('create_zip: invalid path')\r\n except RuntimeError as err:\r\n logger.error('create_zip: error in creating zip')\r\n try:\r\n zipf.close()\r\n except (RuntimeError, zipfile.error) as err:\r\n logger.info('create_zip: error when trying to save zip')\r\n except subprocess.CalledProcessError as err :\r\n logger.error('create_zip: error in creating zip')\r\n finally:\r\n return dict(error=err) if err else None", "def package(notes, version):\n print('creating tarball')\n archive_name = '{}.tgz'.format(version.replace('.', '-'))\n run('tar --exclude=\".git\"'\n ' --exclude=\"{}/wp-content/themes/tmp\"'\n ' --exclude=\"{}/wp-content/uploads\" -zcf ../tmp/{} *'\n .format(DOC_ROOT, DOC_ROOT, archive_name))\n os.chdir(previous_dir)\n return archive_name", "def prepare_project(self, project=None):\n if project == None:\n return [None, '']\n else:\n memory_file = BytesIO()\n with zipfile.ZipFile(memory_file, 'w') as zf:\n project_dict = project.compress()\n comments = project_dict['comments']\n del project_dict['comments']\n resources = project_dict['resources']\n del project_dict['resources']\n history = project_dict['history']\n del project_dict['history']\n records = project_dict['records']\n del project_dict['records']\n diffs = project_dict['diffs']\n del project_dict['diffs']\n application = project_dict['application']\n del project_dict['application']\n try:\n self.agent_prepare(zf, 'project', project_dict)\n except:\n print(traceback.print_exc())\n try:\n self.agent_prepare(zf, 'comments', comments)\n except:\n print(traceback.print_exc())\n try:\n self.agent_prepare(zf, 'resources', resources)\n except:\n print(traceback.print_exc())\n try:\n self.agent_prepare(zf, 'environments', history)\n except:\n print(traceback.print_exc())\n try:\n self.agent_prepare(zf, 'records', records)\n except:\n print(traceback.print_exc())\n try:\n self.agent_prepare(zf, 'application', application)\n except:\n print(traceback.print_exc())\n try:\n self.agent_prepare(zf, 'diffs', diffs)\n except:\n print(traceback.print_exc())\n memory_file.seek(0)\n\n return [memory_file, \"project-%s.zip\"%str(project.id)]", "def _copy_sources_to_archive(project: PyfmuProject, archive: PyfmuArchive) -> PyfmuArchive:\n\n main_script_found = project.main_script_path.is_file()\n\n if(not main_script_found):\n raise RuntimeError(\n f'main script: {project.main_script} was not found inside project: {project.root}')\n\n archive_main_script_path = archive.root / 'resources' / archive.main_script\n\n # make directories and copy source files\n\n if(not archive_main_script_path.parent.is_dir()):\n makedirs(archive_main_script_path.parent)\n \n copyfile(project.main_script_path,archive_main_script_path)\n\n archive.main_script_path = archive_main_script_path", "def zip(zipfilename, srcdir): # , recursive=True):\n\tsrcdir = uniformpath(srcdir)\n\trootdir = os.path.dirname(srcdir) # \"...doc/Java\" gives doc\n\trootnameindex = len(rootdir) + 1 # \"...doc/Java\" gives start of \"Java\"\n\twith zipfile.ZipFile(zipfilename, mode=\"w\", compression=zipfile.ZIP_DEFLATED) as z:\n\t\tfor f in allfiles(srcdir):\n\t\t\tz.write(f, f[rootnameindex:])", "def open(self, *args, **kwargs):\n return ZipFileArchiver(*args,**kwargs)", "def compress(repo, location):\r\n os.chdir(location)\r\n debug(\"Compressing repositories in [%s]...\" % (location), True)\r\n exec_cmd(\"tar -zcvf bitbucket-backup-%s-%s.tar.gz `ls -d *`\" % (repo.get('owner'), datetime.datetime.now().strftime('%Y%m%d%H%m%s')))\r\n debug(\"Cleaning up...\", True)\r\n for d in os.listdir(location):\r\n path = os.path.join(location, d)\r\n if os.path.isdir(path):\r\n exec_cmd(\"rm -rfv %s\" % path)", "def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)", "def prepare_project(path: str):\n zip_path = os.path.join(path, 'Simulation.Machine.V1.zip')\n\n # Download zip file with project\n requested_file = requests.get(URL)\n with open(zip_path, 'wb') as f:\n f.write(requested_file.content)\n\n # Extract contents\n with ZipFile(zip_path, 'r') as zip_obj:\n zip_obj.extractall(path)\n\n # Remove file\n os.remove(zip_path)", "def zipData(\n backend,\n org,\n repo,\n relative=RELATIVE,\n version=None,\n tf=True,\n keep=True,\n source=None,\n dest=None,\n):\n\n if source is None:\n source = backendRep(backend, \"clone\")\n if dest is None:\n dest = f\"{DW}/{backendRep(backend, 'norm')}\"\n relative = prefixSlash(normpath(relative))\n console(f\"Create release data for {org}/{repo}{relative}\")\n sourceBase = normpath(f\"{source}/{org}\")\n destBase = normpath(f\"{dest}/{org}-release\")\n sourceDir = f\"{sourceBase}/{repo}{relative}\"\n destDir = f\"{destBase}/{repo}\"\n dataFiles = {}\n\n initTree(destDir, fresh=not keep)\n relativeDest = relative.removeprefix(\"/\").replace(\"/\", \"-\")\n\n if tf:\n if not dirExists(sourceDir):\n return\n with scanDir(sourceDir) as sd:\n versionEntries = [(sourceDir, e.name) for e in sd if e.is_dir()]\n if versionEntries:\n console(f\"Found {len(versionEntries)} versions\")\n else:\n versionEntries.append((sourceDir, \"\"))\n console(\"Found unversioned features\")\n for (versionDir, ver) in versionEntries:\n if ver == TEMP_DIR:\n continue\n if version is not None and version != ver:\n continue\n versionRep = f\"/{ver}\" if ver else \"\"\n versionRep2 = f\"{ver}/\" if ver else \"\"\n versionRep3 = f\"-{ver}\" if ver else \"\"\n tfDir = f\"{versionDir}{versionRep}\"\n with scanDir(tfDir) as sd:\n for e in sd:\n if not e.is_file():\n continue\n featureFile = e.name\n if featureFile in EXCLUDE:\n continue\n if not featureFile.endswith(\".tf\"):\n console(\n f'WARNING: non feature file \"{versionRep2}{featureFile}\"',\n error=True,\n )\n continue\n dataFiles.setdefault(ver, set()).add(featureFile)\n\n console(f\"zip files end up in {destDir}\")\n for (ver, features) in sorted(dataFiles.items()):\n item = f\"{org}/{repo}\"\n versionRep = f\"/{ver}\" if ver else \"\"\n versionRep3 = f\"-{ver}\" if ver else \"\"\n target = f\"{relativeDest}{versionRep3}.zip\"\n console(\n f\"zipping {item:<25} {ver:>4} with {len(features):>3} features ==> {target}\"\n )\n with ZipFile(f\"{destDir}/{target}\", \"w\", **ZIP_OPTIONS) as zipFile:\n for featureFile in sorted(features):\n zipFile.write(\n f\"{sourceDir}{versionRep}/{featureFile}\",\n arcname=featureFile,\n )\n else:\n results = []\n versionRep = f\"/{version}\" if version else \"\"\n sourceDir = f\"{sourceDir}{versionRep}\"\n collectFiles(sourceDir, \"\", results)\n if not relativeDest:\n relativeDest = \"-\"\n console(f\"zipping {org}/{repo}{relative}{versionRep} with {len(results)} files\")\n console(f\"zip file is {destDir}/{relativeDest}.zip\")\n with ZipFile(f\"{destDir}/{relativeDest}.zip\", \"w\", **ZIP_OPTIONS) as zipFile:\n for (internalPath, path) in sorted(results):\n zipFile.write(\n path,\n arcname=internalPath,\n )", "def open_project(self, zipfilename):\n self.project = zipfile.ZipFile(zipfilename, 'r')\n # self.corpus = codecs.open(zipfilename + '.main.txt', 'w', encoding='utf-8')", "def archive(filepath,archive_dir='archive'):\n\n # Make sure we have a directory to archive to\n try:\n mkdir(archive_dir)\n except:\n print(\"Error making archive directory\")\n return\n\n try:\n (dir, filename) = os.path.split(filepath)\n outfile = os.path.join(dir,archive_dir,filename)+'.gz'\n with open(filename, 'rb') as f_in, gzip.open(outfile, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n except Exception as e:\n print(\"Error archiving \",filepath)\n print(e)\n else:\n try:\n os.remove(filepath)\n except:\n print(\"Error removing \",filepath)", "def master_archive(f, e):\n template = e.get_template(TEMPLATES['archive'])\n write_file(\"archives.html\", template.render(entries=f))", "def download(repo_url, sha, working_dir):\n print 'Downloading %s ...' % (sha)\n sf_zip = os.path.join(working_dir, 'sf.gz')\n with open(sf_zip, 'wb+') as f:\n f.write(requests.get(github_api(repo_url) + '/zipball/' + sha).content)\n zip_file = ZipFile(sf_zip)\n zip_file.extractall(working_dir)\n zip_file.close()\n\n for name in zip_file.namelist():\n if name.endswith('/src/'):\n src_dir = name\n break\n\n return os.path.join(working_dir, src_dir)", "def deploy():\n build()\n rsync_project(\n local_dir=os.path.abspath(env.config['destination']) + \"/\",\n remote_dir=env.remote_dir,\n delete=True,\n extra_opts='--exclude=\".DS_Store\"',\n )", "def zipFasta(self):\n utils.log(\"zipping {} ...\".format(self.fastaFileName))\n cmd = \"bgzip -f {}\".format(self.fastaFileName)\n utils.runCommand(cmd)", "def build_jar_and_push(project, branch):\n Print.GN('Building JAR and pushing image.')\n current_env = os.environ.copy()\n current_env['BRANCH'] = branch\n subprocess.check_call([\n os.path.join(SCRIPT_DIR, '../../docker/build.sh'),\n 'jar',\n '-p', project,\n '-d', 'push',\n '-r', 'gcr',\n ], env=current_env)", "def _zip_dir(path):\n file_path = '/tmp/iapydependencies-' + uuid.uuid1().hex + \".zip\"\n _make_archive(_get_dir_entries(path, True), file_path, path[0:path.rfind('/')])\n return file_path", "def archive_backup(self):\n\n # Archiving the Training script\n shutil.copyfile(self.script_path, self.save_path + '/0-' + os.path.basename(self.script_path))\n os.chmod(self.save_path + '/0-' + os.path.basename(self.script_path), 0o755)\n # Archiving the src folder\n pkg_path = os.path.dirname(arch_src)\n backup_path = os.path.join(self.save_path, 'src_backup')\n shutil.make_archive(backup_path, 'gztar', pkg_path)\n\n # Archiving the Environment Info\n env_info = collect_env.get_pretty_env_info()\n with open(self.save_path + '/env_info.txt', 'w') as f:\n f.write(env_info)", "def do_pack():\n date = datetime.datetime.now()\n archive = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(date.year,\n date.month,\n date.day,\n date.hour,\n date.minute,\n date.second)\n local('mkdir -p versions')\n check = local('tar -cvzf {} web_static'.format(archive))\n if check.failed:\n return None\n else:\n return archive", "def test_git_end_to_end(tmpdir):\n repo = Repo.init(path=tmpdir)\n tmppath = pathlib.Path(tmpdir)\n\n index = repo.index\n author = Actor(\"An author\", \"author@example.com\")\n committer = Actor(\"A committer\", \"committer@example.com\")\n\n # First commit\n with open(tmppath / \".gitignore\", \"w\") as ignore:\n ignore.write(\".wily/\")\n index.add([\".gitignore\"])\n commit1 = index.commit(\"commit1\", author=author, committer=committer)\n\n # Second commit\n with open(tmppath / \"test.py\", \"w\") as file1:\n file1.write(\"print(1)\")\n index.add([\"test.py\"])\n commit2 = index.commit(\"commit2\", author=author, committer=committer)\n repo.close()\n\n config = DEFAULT_CONFIG\n config.path = tmpdir\n\n archiver = GitArchiver(config)\n assert archiver.config == config\n\n revisions = archiver.revisions(tmpdir, 3)\n assert len(revisions) == 2\n assert revisions[0].message == \"commit2\"\n assert revisions[0].author_email == \"author@example.com\"\n assert revisions[0].author_name == \"An author\"\n assert (\n revisions[0].key in commit2.name_rev\n and revisions[0].key not in commit1.name_rev\n )\n\n assert revisions[1].message == \"commit1\"\n assert revisions[1].author_email == \"author@example.com\"\n assert revisions[1].author_name == \"An author\"\n assert (\n revisions[1].key in commit1.name_rev\n and revisions[1].key not in commit2.name_rev\n )\n\n checkout = archiver.checkout(revisions[1], None)\n\n assert not (tmppath / \"test.py\").exists()\n\n finish = archiver.finish()\n\n assert (tmppath / \"test.py\").exists()", "def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,\n dry_run=0, owner=None, group=None):\n save_cwd = os.getcwd()\n if root_dir is not None:\n log.debug(\"changing into '%s'\", root_dir)\n base_name = os.path.abspath(base_name)\n if not dry_run:\n os.chdir(root_dir)\n\n if base_dir is None:\n base_dir = os.curdir\n\n kwargs = {'dry_run': dry_run}\n\n try:\n format_info = ARCHIVE_FORMATS[format]\n except KeyError:\n raise ValueError(\"unknown archive format '%s'\" % format)\n\n func = format_info[0]\n for arg, val in format_info[1]:\n kwargs[arg] = val\n\n if format != 'zip':\n kwargs['owner'] = owner\n kwargs['group'] = group\n\n try:\n filename = func(base_name, base_dir, **kwargs)\n finally:\n if root_dir is not None:\n log.debug(\"changing back to '%s'\", save_cwd)\n os.chdir(save_cwd)\n\n return filename", "def do_pack():\n\n now = datetime.now()\n time_now = now.strftime(\"%Y%m%d%H%M%S\")\n archive_name = \"versions/web_static_\" + time_now + \".tgz\"\n local('mkdir -p versions')\n archive_command = local(\"tar -zcvf \" + archive_name + \" web_static\")\n\n if archive_command.succeeded:\n return archive_name\n\n return None", "def get_archive_async(\n hostname, project, treeish, dir_path=None, **fetch_kwargs):\n _validate_args(hostname, project, treeish, dir_path)\n dir_path = (dir_path or '').strip('/')\n if dir_path:\n dir_path = '/%s' % dir_path\n return gerrit.fetch_async(\n hostname,\n '%s/+archive/%s%s.tar.gz' % _quote_all(project, treeish, dir_path),\n **fetch_kwargs)", "def targz_folder(folder, archive_name=None):\n if not archive_name:\n archive_name = \"{}.tar.gz\".format(folder)\n with tarfile.open(archive_name, \"w:gz\") as tar:\n tar.add(folder, arcname=os.path.basename(folder))\n return archive_name", "def sync(args: argparse.Namespace) -> None:\n\tdel args\n\trepo_path = _find_repo()\n\tmanifest_file = os.path.join(repo_path, MANIFEST_DIRECTORY, storas.manifest.DEFAULT_MANIFEST_FILE)\n\tmanifest = storas.manifest.load(manifest_file)\n\tfor project in manifest.projects:\n\t\tfull_path = os.path.join(repo_path, \"..\", project.path)\n\t\tremote = project.remote\n\t\tfull_fetch_url = urllib.parse.urljoin(remote.fetch_host, project.name)\n\t\tif not os.path.exists(full_path):\n\t\t\tos.makedirs(full_path, exist_ok=True)\n\t\t\tLOGGER.debug(\"Created '%s'\", full_path)\n\t\t\t_run_git([\"clone\", \"-b\", project.revision, full_fetch_url], cwd=full_path)", "def archive(self, user: User, snapshot: str, path: str, **callback) -> Job:\n # Get the upload policy\n policy = snapshots_storage().generate_post_policy(path)\n url = policy.get(\"url\") if policy else None\n secrets = policy.get(\"fields\") if policy else None\n\n return Job.objects.create(\n project=self,\n creator=user,\n method=JobMethod.archive.name,\n params=dict(project=self.id, snapshot=snapshot, path=path, url=url,),\n secrets=secrets,\n description=f\"Archive project '{self.name}'\",\n **callback,\n )", "def dir_2_cbz(dir_pth):\r\n shutil.make_archive(dir_pth, 'zip', dir_pth)\r\n shutil.rmtree(dir_pth)\r\n os.rename(dir_pth+'.zip', dir_pth+'.cbz')\r\n pass", "def _download_project(name, apikey):\n payload = {'apikey': apikey, 'project': name, 'version': 'portia'}\n r = requests.get(DASH_API_URL + 'as/project-slybot.zip', params=payload)\n return r.content", "def pack_zip(output_filename, sources):\n previous_dir = os.getcwd()\n if not isinstance(sources, (list, tuple)) and \\\n isinstance(sources, str):\n sources = [sources]\n zip_ds = zipfile.ZipFile(output_filename, 'w', zipfile.ZIP_DEFLATED)\n for source in sources:\n os.chdir(os.path.dirname(source))\n if os.path.isdir(source):\n for root, dirs, files in os.walk(os.path.basename(source)):\n for file in files:\n zip_ds.write(os.path.join(root, file))\n else:\n zip_ds.write(os.path.basename(source))\n zip_ds.close()\n os.chdir(previous_dir)", "def backup_project():\n _require_environment()\n\n # Unless explicitly provided, uses local Django settings to\n # extract username/password to access remote database\n database = env.project.get('database', None)\n if not database:\n django.settings_module(env.project['settings'])\n database = django_settings.DATABASES['default']\n\n # Remote side\n with prefix(_django_prefix()):\n with cd(_django_project_dir()):\n # Creates dir to store backup, avoiding existing similar names\n dirname = '../backup/%s_%s' % (datetime.date.today().strftime('%Y%m%d'), env.environment)\n path = dirname\n index = 0\n while files.exists(path) or files.exists('%s.tar.gz' % path):\n index += 1\n path = '%s.%s' % (dirname, index)\n run('mkdir -p %s' % path)\n\n # Backup MySQL\n run('mysqldump %s -u %s -p%s %s > %s/%s.sql' % (\n '-h %s' % database['HOST'] if database.get('HOST', None) else '',\n database['USER'],\n database['PASSWORD'],\n database['NAME'],\n path,\n env.project['project'],\n ))\n\n # Backup extra files\n extra_backup_files = env.project.get('extra_backup_files', [])\n for file in extra_backup_files:\n run('cp -R %s %s/' % (file, path))\n\n # Create .tar.gz and removes uncompressed files\n with hide('stdout'):\n run('tar -czvf %s.tar.gz %s/' % (path, path))\n run('rm -rf %s/' % path)\n\n # Download backup?\n if console.confirm('Download backup?'):\n return get('%s.tar.gz' % path, '../backup')", "def create_gitignore(project_name):\n\twith io.FileIO(\".gitignore\", \"w\") as file:\n\t\tfile.write(\"# Directories #\\n###################\\nbin/\\n\\n\"\n\t\t\t\t\"# OS generated files #\\n###################\\n\"\n\t\t\t\t\".DS_Store\\n._*\\n.nfs*\\n\\n\"\n\t\t\t\t\"# Compiled source #\\n###################\\n\"\n\t\t\t\t\"a.out\\n*.o\")\n\tshutil.move('.gitignore', project_name)\n\tprint \"Created project .gitignore file.\"", "def gzip_assets():\n run('cd %(repo_path)s; python gzip_assets.py' % env)", "def archive_writeup(syn, evaluation, stat=\"VALIDATED\", reArchive=False):\n if type(evaluation) != synapseclient.Evaluation:\n evaluation = syn.getEvaluation(evaluation)\n\n print(\"\\n\\nArchiving\", evaluation.id, evaluation.name)\n print(\"-\" * 60)\n\n for sub, status in syn.getSubmissionBundles(evaluation, status=stat):\n # retrieve file into cache and copy it to destination\n checkIfArchived = filter(\n lambda x: x.get(\"key\") == \"archived\",\n status.annotations['stringAnnos'])\n if len(list(checkIfArchived)) == 0 or reArchive:\n projectEntity = synapseclient.Project(\n 'Archived {} {} {} {}'.format(\n sub.name.replace(\"&\", \"+\").replace(\"'\", \"\"),\n int(round(time.time() * 1000)),\n sub.id,\n sub.entityId))\n entity = syn.store(projectEntity)\n adminPriv = [\n 'DELETE', 'DOWNLOAD', 'CREATE', 'READ', 'CHANGE_PERMISSIONS',\n 'UPDATE', 'MODERATE', 'CHANGE_SETTINGS']\n syn.setPermissions(entity, \"3324230\", adminPriv)\n synapseutils.copy(syn, sub.entityId, entity.id)\n archived = {\"archived\": entity.id}\n status = utils.update_single_submission_status(status, archived)\n syn.store(status)", "def unarchive(filename, project_dir, parent_dir=None, frontend=None):\n if frontend is None:\n frontend = _null_frontend()\n return archiver._unarchive_project(filename, project_dir=project_dir, parent_dir=parent_dir, frontend=frontend)", "def _archive_logs(self, logdir, files):\n cwd = os.getcwd()\n archive_wd = os.path.dirname(logdir)\n archive_file = os.path.basename(logdir) + \".tgz\"\n\n # move files into logdir for archive\n for f in files:\n self.logger.info(\"moving '%s' to archive folder\" % f)\n shutil.move(f, logdir)\n\n # move to logdir parent folder\n self.logger.info(\"archiving profile logs into '%s'\" % archive_file)\n os.chdir(archive_wd)\n archive = tarfile.open(archive_file, \"w:gz\")\n archive.add(os.path.basename(logdir))\n archive.close()\n\n # go back to current working dir and remove logdir\n os.chdir(cwd)\n shutil.rmtree(logdir)", "def download_project(self, workflow_snapshot_id, remote_path: Text, local_path: Text = None) -> Text:\n local_zip_file_name = 'workflow_{}_project'.format(workflow_snapshot_id)\n if local_path is not None:\n repo_path = Path(local_path)\n elif self.local_repository is not None:\n repo_path = Path(self.local_repository)\n else:\n repo_path = Path(tempfile.gettempdir())\n local_zip_file_path = str(repo_path / local_zip_file_name) + '.zip'\n extract_path = str(repo_path / local_zip_file_name)\n\n if not os.path.exists(local_zip_file_path):\n logger.debug(\"{} not exist\".format(local_zip_file_path))\n lock_file_path = os.path.join(repo_path, \"{}.lock\".format(local_zip_file_name))\n lock_file = open(lock_file_path, 'w')\n logger.debug(\"Locking file {}\".format(lock_file_path))\n fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX)\n logger.debug(\"Locked file {}\".format(lock_file_path))\n try:\n if not os.path.exists(local_zip_file_path):\n logger.info(\"Downloading S3 object: {}\".format(remote_path))\n self._get_s3_object(local_zip_file_path, remote_path)\n logger.info(\"Downloaded S3 object: {}\".format(local_zip_file_path))\n except Exception as e:\n logger.error(\"Failed to download S3 file: {}\".format(remote_path), exc_info=e)\n finally:\n logger.debug('Locked file {}'.format(lock_file_path))\n fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)\n logger.debug('Unlocked file {}'.format(lock_file_path))\n lock_file.close()\n if os.path.exists(lock_file_path):\n try:\n os.remove(lock_file_path)\n except OSError as e:\n logger.warning(\"Failed to remove lock file: {}\".format(lock_file_path), exc_info=e)\n else:\n logger.info(\"S3 file: {} already exist at {}\".format(remote_path, local_zip_file_path))\n\n return extract_project_zip_file(workflow_snapshot_id=workflow_snapshot_id,\n local_root_path=repo_path,\n zip_file_path=local_zip_file_path,\n extract_project_path=extract_path)", "def archive(self,prompt=True,dry_run=False):\n\t\t# make sure the project is valid\n\t\tself.validate_project()\n\n\t\t# get the segments\n\t\tself.get_segments()\n\n\t\t# get the header file(s)\n\t\tself.get_headers()\n\n\t\t# determine which pool each file is going to\n\t\tself._set_element_pools()\n\n\t\t# check the files against the pools for duplicates\n\t\tself._get_archive_status()\n\n\t\t# print what we found for archiving\n\t\tself.print_queue()\n\n\t\t# check to see if we have anything to archive\n\t\t# and prompt the user \n\t\tself.ready_check(prompt=prompt)\n\n\t\t# do the archive\n\t\tself._archive(dry_run=dry_run)", "def do_pack():\n now = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n local('mkdir -p versions')\n result = local('tar -czvf versions/web_static_{}.tgz web_static'\n .format(now))\n if result.failed:\n return None\n else:\n return result", "def archive(self):\n logging.info(_('Creating compressed archive...'))\n\n report_file_ext = 'bz2'\n compressor = 'bzip2'\n caller = Caller({})\n try:\n caller.call('xz --version')\n report_file_ext = 'xz'\n compressor = 'xz'\n except Exception:\n logging.debug('xz compression not available')\n\n if not os.path.exists(self.conf[\"output\"]):\n os.makedirs(self.conf[\"output\"])\n\n self.conf[\"path\"] = os.path.join(\n self.conf[\"output\"],\n \"sosreport-%s-%s.tar.%s\" % (\n 'LogCollector',\n time.strftime(\"%Y%m%d%H%M%S\"),\n report_file_ext\n )\n )\n\n if self.conf[\"ticket_number\"]:\n self.conf[\"path\"] = os.path.join(\n self.conf[\"output\"],\n \"sosreport-%s-%s-%s.tar.%s\" % (\n 'LogCollector',\n self.conf[\"ticket_number\"],\n time.strftime(\"%Y%m%d%H%M%S\"),\n report_file_ext\n )\n )\n\n config = {\n 'report': os.path.splitext(self.conf['path'])[0],\n 'compressed_report': self.conf['path'],\n 'compressor': compressor,\n 'directory': self.conf[\"local_tmp_dir\"],\n 'rname': os.path.basename(self.conf['path']).split('.')[0],\n }\n caller.configuration = config\n shutil.move(\n os.path.join(\n self.conf[\"local_tmp_dir\"],\n 'working'\n ),\n os.path.join(\n self.conf[\"local_tmp_dir\"],\n config[\"rname\"]\n ),\n )\n caller.call(\"tar -cf '%(report)s' -C '%(directory)s' '%(rname)s'\")\n shutil.rmtree(self.conf[\"local_tmp_dir\"])\n caller.call(\"%(compressor)s -1 '%(report)s'\")\n os.chmod(self.conf[\"path\"], stat.S_IRUSR | stat.S_IWUSR)\n sha256_out = caller.call(\"sha256sum '%(compressed_report)s'\")\n checksum = sha256_out.split()[0]\n with open(\"%s.sha256\" % self.conf[\"path\"], 'w') as checksum_file:\n checksum_file.write(sha256_out)\n\n msg = ''\n if os.path.exists(self.conf[\"path\"]):\n archiveSize = float(os.path.getsize(self.conf[\"path\"])) / (1 << 20)\n\n size = '%.1fM' % archiveSize\n\n msg = _(\n 'Log files have been collected and placed in {path}\\n'\n 'The sha256 for this file is {checksum} and its size is {size}'\n ).format(\n path=self.conf[\"path\"],\n size=size,\n checksum=checksum,\n )\n\n if archiveSize >= 1000:\n msg += _(\n '\\nYou can use the following filters -c, -d, -H in the '\n 'next execution to limit the number of Datacenters,\\n'\n 'Clusters or Hosts that are collected in order to '\n 'reduce the archive size.'\n )\n return msg", "def zip_alg_file(task_id):\n start_dir = os.path.join(FILE_PATH, \"task\", task_id)\n res = None\n if os.path.exists(start_dir):\n zip_file_dir = os.path.join(FILE_PATH, \"task\", task_id + \".zip\")\n file = zipfile.ZipFile(zip_file_dir, \"w\", zipfile.ZIP_DEFLATED)\n for dir_path, _, file_names in os.walk(start_dir):\n for file_name in file_names:\n file.write(os.path.join(dir_path, file_name))\n file.close()\n res = zip_file_dir\n return res", "def _zip_archive(extracted_source, exclude_files=None, **_):\n ctx.logger.debug(\"Zipping source {source}\".format(source=extracted_source))\n exclude_files = exclude_files or []\n ctx.logger.debug('Excluding files {l}'.format(l=exclude_files))\n with tempfile.NamedTemporaryFile(suffix=\".zip\",\n delete=False) as updated_zip:\n updated_zip.close()\n with zipfile.ZipFile(updated_zip.name,\n mode='w',\n compression=zipfile.ZIP_DEFLATED) as output_file:\n for dir_name, subdirs, filenames in os.walk(extracted_source):\n # Make sure that the files that we don't want\n # to include (e.g. plugins directory) will not be archived.\n exclude_dirs(dir_name, subdirs, exclude_files)\n for filename in filenames:\n # Extra layer of validation on the excluded files.\n if not exclude_file(dir_name, filename, exclude_files):\n # Create the path as we want to archive it to the\n # archivee.\n file_to_add = os.path.join(dir_name, filename)\n # The name of the file in the archive.\n if file_storage_breaker(file_to_add):\n continue\n arc_name = file_to_add[len(extracted_source)+1:]\n output_file.write(file_to_add, arcname=arc_name)\n archive_file_path = updated_zip.name\n return archive_file_path", "def create_artifact(current_revision):\n archive_path = '/tmp/{revision}.tar.gz'.format(revision=current_revision)\n local('tar -czf {archive_path} --exclude=.git *'.format(archive_path=archive_path))", "def zipdir(path, ziph):\n for root, dirs, files in os.walk(path):\n for file in files:\n ziph.write(os.path.join(root, file),\n arcname=os.path.join(os.path.relpath(root, path), file))", "def archive_logs(self):\n source = GAConfig[\"log_file_location\"]\n destination = source + \"Archive/\"\n\n if not os.path.exists(source):\n os.makedirs(source)\n if not os.path.exists(destination):\n os.makedirs(destination)\n\n if len(os.listdir(source)) > 1:\n specific_folder = destination + str(\n len(os.listdir(destination))) + '/'\n os.makedirs(specific_folder)\n for f in os.listdir(source):\n if((\".log\" in f) or (\".zip\" in f)):\n shutil.move(source + f, specific_folder)", "def _rollback_releaseinfo_file(projname):\n dirs = projname.split('.')\n os.chdir(os.path.join(*dirs))\n print 'rolling back releaseinfo.py for %s' % projname\n os.system('git checkout -- releaseinfo.py')", "def zip_file(src_dir):\n zip_name = slugify(src_dir) + '.zip'\n z = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)\n for dirpath, dirnames, filenames in os.walk(src_dir):\n fpath = dirpath.replace(src_dir, '')\n fpath = fpath and fpath + os.sep or ''\n for filename in filenames:\n z.write(os.path.join(dirpath, filename), fpath + filename)\n z.close()", "def push_sources():\n ensure_src_dir()\n push_rev = getattr(env, 'push_rev', None)\n if push_rev is None:\n push_rev = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n local(\"git tag -a {0} -m \\\"Tagged for release\\\"\".format(push_rev))\n local(\"git push origin master --tags\")\n\n with cd(SRC_DIR):\n run(\"git pull origin master\")\n run(\"git fetch -t\")\n run(\"git checkout {0}\".format(push_rev))", "def get_archive_url(url, branch='master', release=None):\n git_url = trim_repo_url(url)\n fragment = None\n file = git_url.split(\"/\")[-1]\n \n if release:\n fragment = \"/archive/{}.zip\".format(release)\n else:\n fragment = \"/archive/{}.zip\".format(branch)\n \n return file, git_url+fragment", "def download(self):\n logger.info(f\"downloading project {self}\")\n self.project.storage.download(f\"{self.path}/releasemanifest\", None)\n self.extract()", "def upload(project, private=None, site=None, username=None, token=None, suffix='.tar.bz2', log_level=None):\n failed = _check_problems(project)\n if failed is not None:\n return failed\n\n # delete=True breaks on windows if you use tmp_tarfile.name to re-open the file,\n # so don't use delete=True.\n tmp_tarfile = NamedTemporaryFile(delete=False, prefix=\"anaconda_upload_\", suffix=suffix)\n tmp_tarfile.close() # immediately un-use it to avoid file-in-use errors on Windows\n try:\n status = archive(project, tmp_tarfile.name)\n if not status:\n return status\n status = client._upload(project,\n tmp_tarfile.name,\n uploaded_basename=(project.name + suffix),\n private=private,\n site=site,\n username=username,\n token=token,\n log_level=log_level)\n return status\n finally:\n os.remove(tmp_tarfile.name)", "def get_zip_file(self):\n io = StringIO()\n zf = zipfile.ZipFile(io, \"w\")\n try:\n for track in self.get_tracks():\n zf.write(track.file_name,\n track.safe_file_name,\n zipfile.ZIP_DEFLATED)\n finally:\n zf.close()\n\n io.reset()\n io.seek(0, 2)\n length = io.tell()\n io.reset()\n return io,\\\n cleanse_filename(\"%s - %s.zip\" % (self.name, self.year)),\\\n length", "def make_submissions_zip(self, subids, incl_subs=True, incl_reports=False, additional_files=None):\n \n #Initial filename\n zip_path = os.path.join(settings.TEMP_DIR, \"%s_%s_subs.zip\" % (self.course.code, self.code))\n \n #Find new tmp filename\n i = 1\n while os.path.exists(zip_path):\n zip_path = os.path.join(settings.TEMP_DIR, \"%s_%s_subs_%d.zip\" % (self.course.code, self.code, i))\n i += 1\n \n #Open file\n zfile = ZipFile(zip_path, \"w\")\n \n #Load submissions to put into zipfile\n submissions = self.submission_set.filter(id__in=subids)\n \n #Write all submissions to the zip file\n for sub in submissions:\n \n #Write submission files\n if incl_subs:\n sub_path = sub.get_directory()\n sub_name = sub.get_filename()\n zfile.write(os.path.join(sub_path, sub_name), os.path.join(sub.student.username, sub_name))\n \n #Write report files\n if incl_reports and sub.get_report_files():\n rep_path = sub.get_directory(subdir='report')\n for filename, date, size in sub.get_report_files():\n zfile.write(os.path.join(rep_path, filename), os.path.join(sub.student.username, 'report', filename))\n \n #Write additional files\n if additional_files:\n if type(additional_files) != list:\n additional_files = [additional_files]\n for filename in additional_files:\n zfile.write(filename, os.path.basename(filename))\n \n #Save zip file and return path\n zfile.close()\n return zip_path", "def modifySrcDstForZipDownload(src, dstBase): \n\n src = src + \"?format=zip\"\n dst = os.path.join(dstBase , 'projects' + \n src.replace('?format=zip', '').\\\n split('projects')[1].split('/files')[0] + '/files.zip')\n return src, dst", "def zip_data_file(task_id, task_name, data_path):\n zip_file_dir = os.path.join(FILE_PATH, task_id + \".zip\")\n file = zipfile.ZipFile(zip_file_dir, \"w\", zipfile.ZIP_DEFLATED)\n sample_path = os.path.join(data_path, \"datasets\", str(task_id) + \"_\" + task_name + \".csv\")\n true_dag_path = os.path.join(data_path, \"true\", str(task_id) + \"_\" + task_name + \".npz\")\n file.write(sample_path)\n file.write(true_dag_path)\n file.close()\n return zip_file_dir", "def submit(self, root=None, force=False, repo=None):\n import ambry.util as du\n \n if repo:\n self.repo_name = repo\n self.set_api()\n \n import os\n from os.path import basename\n \n ckb = self.remote.update_or_new_bundle_extract(self.bundle)\n \n sent = set()\n \n self.remote.put_package(ckb)\n \n for doc in self.bundle.config.group('about').get('documents',[]):\n self.store_document(ckb, doc)\n\n zip_inputs = {}\n\n for extract_data in self.generate_extracts(root=root):\n\n zip = extract_data.get('zip', False)\n will_zip = False\n \n if zip == 'dir':\n zip_inputs[os.path.dirname(extract_data['path'])] = extract_data\n will_zip = True\n elif zip == 'file':\n zip_inputs[extract_data['path']] = extract_data\n will_zip = True\n\n file_ = self._do_extract(extract_data, force=force)\n \n if will_zip:\n self.bundle.log(\"{} will get submitted as a zip\".format(file_))\n elif file_ not in sent:\n r = self._send(ckb, extract_data,file_)\n sent.add(file_)\n url = r['ckan_url']\n self.bundle.log(\"Submitted {} to {}\".format(basename(file_), url))\n else:\n self.bundle.log(\"Already processed {}, not sending.\".format(basename(file_)))\n \n \n zip_outputs = self.zip(zip_inputs.keys() )\n \n \n print zip_outputs\n \n for in_zf, out_zf in zip_outputs.items():\n extract_data = zip_inputs[in_zf]\n extract_data['name'] = extract_data['zipname'] if 'zipname' in extract_data else extract_data['name']\n r = self._send(ckb, extract_data,out_zf)\n \n url = r['ckan_url']\n self.bundle.log(\"Submitted {} to {}\".format(basename(out_zf), url))\n \n \n return True" ]
[ "0.76189184", "0.7322678", "0.67856395", "0.6666653", "0.6537198", "0.65115416", "0.6488287", "0.64775175", "0.6187276", "0.6164838", "0.6076534", "0.6037745", "0.5955031", "0.58592504", "0.5741227", "0.5725475", "0.56754285", "0.56595165", "0.5650359", "0.56347746", "0.5608411", "0.5564879", "0.5529604", "0.55295163", "0.55216163", "0.54916716", "0.54906976", "0.5488844", "0.5462392", "0.5418018", "0.53931177", "0.53846043", "0.5361576", "0.53591657", "0.53403986", "0.53382045", "0.53240675", "0.53023165", "0.5297511", "0.5293009", "0.5290994", "0.5286784", "0.52643174", "0.5262667", "0.52613544", "0.5259151", "0.5247076", "0.5229156", "0.5224809", "0.5210093", "0.52098334", "0.520708", "0.52036333", "0.51887494", "0.5182706", "0.51760274", "0.51744145", "0.51634455", "0.51570743", "0.5156997", "0.515402", "0.51288676", "0.5101615", "0.50921494", "0.5080563", "0.5069462", "0.50582874", "0.5048558", "0.5043566", "0.5036178", "0.50293666", "0.5024678", "0.50139403", "0.50028735", "0.49998704", "0.49971765", "0.49941182", "0.49875396", "0.49826834", "0.4980002", "0.49768874", "0.49674195", "0.49650723", "0.4961332", "0.49552926", "0.49513632", "0.49506366", "0.49446988", "0.49391472", "0.49348235", "0.49340522", "0.49255517", "0.49223906", "0.49125198", "0.49094912", "0.4906735", "0.49024042", "0.49013138", "0.4900844", "0.4897491" ]
0.6150292
10
Convert to front facing coordinates
def get_front_facing_xz(self): yaw_radian = math.radians(self.cur_rotation) return cam.step * math.sin(yaw_radian) * math.cos(0), cam.step * math.cos( yaw_radian) * math.cos(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def frontFace(self):\n\n if not self.threedee:\n return gl.GL_CCW\n\n # Only looking at the mesh -> display\n # transform, thus we are assuming that\n # the MVP matrix does not have any\n # negative scales.\n xform = self.opts.getTransform('mesh', 'display')\n\n if npla.det(xform) > 0: return gl.GL_CCW\n else: return gl.GL_CW", "def _find_front(self):\n self.front = (laplace(self.working_mask) > 0).astype('uint8')\n # TODO: check if scipy's laplace filter is faster than scikit's", "def pareto_frontier(self) -> Tuple[Tensor, Tensor]:\n raise NotImplementedError(\"Pareto frontier not yet implemented.\")", "def get_forehead(self):\n\t\tx,y,w,h = self.get_face()\n\t\t\n\t\tx += w * self.fh_x\n\t\ty += h * self.fh_y\n\t\tw *= self.fh_w\n\t\th *= self.fh_h\n\t\t\n\t\tx -= (w / 2.0)\n\t\ty -= (h / 2.0)\n\t\t\n\t\treturn tuple(map(int, (x,y,w,h)))", "def front(self):\n return _osgAnimation.vectorFloatKeyframe_front(self)", "def startface(self):\n self.fan = (self.position.x,self.position.y,self.position.z)", "def front(self):\n return _osgAnimation.vectorMatrixKeyframe_front(self)", "def rotateXOut(self):\n MV = self.MV\n MV[:3, 2] = 1, 0, 0 # 3rd col is normal vector, make it point along x axis\n # set top left and top middle values to zero:\n MV[0, 0] = 0\n MV[0, 1] = 0\n b = MV[2, 0] # grab bottom left value\n a = np.sqrt(1 - b**2) # calc new complementary value to get normalized vectors\n #if MV[1, 0] < 0:\n # a = -a # keep a -ve, reduce jumping around of axes\n MV[1, 0] = a\n MV[2, 1] = a\n MV[1, 1] = -b # needs to be -ve of MV[2, 0]\n self.MV = MV", "def rotateXOut(self):\n MV = self.MV\n MV[:3, 2] = 1, 0, 0 # 3rd col is normal vector, make it point along x axis\n # set top left and top middle values to zero:\n MV[0, 0] = 0\n MV[0, 1] = 0\n b = MV[2, 0] # grab bottom left value\n a = np.sqrt(1 - b**2) # calc new complementary value to get normalized vectors\n #if MV[1, 0] < 0:\n # a = -a # keep a -ve, reduce jumping around of axes\n MV[1, 0] = a\n MV[2, 1] = a\n MV[1, 1] = -b # needs to be -ve of MV[2, 0]\n self.MV = MV", "def isotropic_correction_front(self):\n return self.cartesian_map_array(self.IsotropicCorrection(self,'front'))", "def front(self):\n return _osgAnimation.vectorQuatKeyframe_front(self)", "def facing(self) -> Union[int, float]:\n return self.proto.facing", "def screenToCamera(self,x,y):\n #self.x = x\n #self.y = y\n new_x = x / (self.surf.get_width() - 1) - 0.5\n #-(new_x)\n new_y = y / (self.surf.get_height() - 1)\n new_y = (1.0 - cy) - 0.5\n new_z = -self.camNear\n formula = math3dsol.VectorN((new_x,new_y,new_z))\n return formula\n\n # FINISH ME!!!", "def initialCoordinates():\r\n return (-250,-250)", "def moveFront(self, face, l):\n nlist = [[x[i] for x in face] for i in range(len(face[0]))]\n for row in range(self.size):\n for col in range(self.size):\n if col >= int((self.size) / 2):\n break\n else:\n buffer_ = nlist[row][col]\n nlist[row][col] = nlist[row][self.size - 1 - col]\n nlist[row][self.size - 1 - col] = buffer_\n if l == 'f':\n self.front = nlist\n elif l == 'u':\n self.up = nlist\n elif l == 'd':\n self.down = nlist\n elif l == 'l':\n self.left = nlist\n elif l == 'r':\n self.right = nlist\n elif l == 'b':\n self.back = nlist", "def near_clipping_face(self):\n pln = self.tripod.plane\n l, r, b, t, n, f = self.body.dim\n face = gt.Plin((l, b, -n), (r, b, -n), (r, t, -n), (l, t, -n))\n return pln.TM * face", "def xyz2fxy(x,y,z):\n f,X,Y = xyz2facestereo(x,y,z)\n X,Y = stereo2tansquare(X,Y)\n return np.asarray(f,int),np.asfarray(X),np.asfarray(Y)", "def front(self):\n return _osgAnimation.VertexList_front(self)", "def getFrontCoord(self, colour, startCoord, rowDirection, columnDirection, arrangement, isLine, points):\r\n print \"Doing Front\"\r\n # We create a list to hold the front coordinates\r\n meshFrontLocation = []\r\n\r\n # We create a mechanism that triggers when there is infinite loop\r\n CountLoop = 0\r\n\r\n # Loop front view till no intended colour is detected\r\n while True:\r\n # Trigger mechanism when there is infinite loop\r\n CountLoop += 1\r\n if CountLoop > 10000:\r\n print \"CountLoop is more then 10000, mechanism to prevent infinite loop during get3DCoord activated\"\r\n break\r\n\r\n # Detect the colour pixel\r\n PixelDetected = self.frontImage.detectColourPixel(colour, startCoord, rowDirection, columnDirection)\r\n\r\n # If pixel is detected\r\n if len(PixelDetected) != 0:\r\n # traceLine(self, colour, StartCoord, rowScan, columnScan)\r\n CoordList, startCoord = self.frontImage.traceLine(colour, PixelDetected, rowDirection, columnDirection)\r\n # We rearrange to coordlist to a certain arrangement so we can get the same starting point in front and side\r\n CoordList = self.rearrange(CoordList, arrangement)\r\n\r\n # We check if it is a line or not. If it is a line, we half the coordinates\r\n if isLine == 1:\r\n CoordList = CoordList[:len(CoordList) / 2]\r\n # We then take note of a certain number of points in the CoordList\r\n\r\n\r\n stride = len(CoordList) / float(points)\r\n\r\n # We add the coordinates to the meshFrontLocation list\r\n #Safety mechanism for infinite loop\r\n loopTrigger = 0\r\n currentIndex = 0.0\r\n while loopTrigger < points:\r\n loopTrigger +=1\r\n if loopTrigger > 10000:\r\n print \"Infinite loop in front coord scanning stride part!!\"\r\n break\r\n i = int(currentIndex)\r\n meshFrontLocation.append([CoordList[i][0], CoordList[i][1]])\r\n currentIndex+=stride\r\n\r\n # If no pixel is detected\r\n else:\r\n break\r\n\r\n return meshFrontLocation", "def translateToOriginXform(self):\n return np.array([[1, 0, 0, -self.eye[0]],\n [0, 1, 0, -self.eye[1]],\n [0, 0, 1, -self.eye[2]],\n [0, 0, 0, 1]])", "def front(self):\n return _osgAnimation.vectorVec3Keyframe_front(self)", "def float_to_front(qtile):\n for window in qtile.current_group.windows:\n if window.floating:\n window.cmd_bring_to_front()", "def origin(self):\r\n\r\n return self.ox, self.oy, self.oz", "def GetSurfaceConversion(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeConvertToBezier_GetSurfaceConversion(self, *args)", "def kinect_transform(self, x, y, z):\n xposition = x\n yposition = y\n zposition = z\n\n return zposition, xposition, yposition", "def position_to_Fourier(self):\n #TODO Try to do it with FFT \n U = self.alphas @ self.positions\n \n return U", "def forward_vector(self):\n return pm.datatypes.Vector(0, 0, 1).rotateBy(self.rotation)", "def front(self):\n return _osgAnimation.vectorVec4Keyframe_front(self)", "def front(self):\n return _osgAnimation.vectorVec2Keyframe_front(self)", "def transformToOrigin(self):\n return Transform.shiftOrigin(self.point, self.head)", "def reshape_to_current_relative(self, vel):\n # tf is the time of front passage as f(x), i.e. supply this\n # with an argument in x and we get the corresponding time\n # reshape, taking a constant T time intervals behind front\n t0 = self.front_offset\n t1 = self.T_width\n tf = self.tf\n X = np.indices((vel.shape[1],)).squeeze()\n U_ = np.dstack(vel[:, x, int(tf(x)) + t0:int(tf(x)) + t1] for x in X)\n # reshape this to same dimensions as before\n Uf = np.transpose(U_, (0, 2, 1))\n # TODO: does axis 2 of Uf need to be reversed?\n # reverse axis so that time axis progresses as time in the\n # evolution of the front\n # Uf = Uf[:,:,::-1]\n return Uf", "def rotate_to_local(self,vxyz):\n return sp.mxv(self.mtxtofov,vxyz)", "def _get_flared_coords(self, x0, y0, inc, PA, z_func):\n x_mid, y_mid = self._get_midplane_cart_coords(x0, y0, inc, PA)\n r_tmp, t_tmp = np.hypot(x_mid, y_mid), np.arctan2(y_mid, x_mid)\n for _ in range(5):\n y_tmp = y_mid + z_func(r_tmp) * np.tan(np.radians(inc))\n r_tmp = np.hypot(y_tmp, x_mid)\n t_tmp = np.arctan2(y_tmp, x_mid)\n return r_tmp, t_tmp, z_func(r_tmp)", "def front_wheel_from_axis():", "def forward(self):\n #print('forward\\r')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def imageFromCamera(self, points, hide_backpoints=True):\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array([-points[..., 0] * self.focallength_x_px / points[..., 2] + self.center_x_px,\n points[..., 1] * self.focallength_y_px / points[..., 2] + self.center_y_px]).T\n if hide_backpoints:\n transformed_points[points[..., 2] > 0] = np.nan\n return transformed_points", "def compute_positions(self):\n return (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1), \\\n (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)", "def _get_random_pos_on_back(self):\n y = self.lower_vertex[1] + self.offset\n x_lower, x_upper = self._shrink_range_by_padding(self._x_range())\n z_lower, z_upper = self._shrink_range_by_padding(self._z_range())\n x = random.uniform(x_lower, x_upper)\n z = random.uniform(z_lower, z_upper)\n return x, y, z", "def front(self):\n return _uhd_swig.device_addr_vector_t_front(self)", "def get_front_distances(self):\n return np.array([self.get_distance(name) for name in self.front_distance_sensors])", "def origin():\n\n # indices: [axis, point]\n return numpy.zeros((3, 1))", "def Front(self) -> int:\n return self.circular[self.front] if self.size else -1\n # if self.size:\n # return self.cicular[self.front]\n # else:\n # return -1", "def origin_z(self):\n return self.locations_z[0]", "def far_clipping_face(self):\n pln = self.tripod.plane\n l, r, b, t, n, f = self.body.dim\n if self.body.fshape == 'p':\n d = f - n\n # far face dimensions\n l, r, b, t = [(i * d) / n + i for i in (l, r, b, t)]\n face = gt.Plin((l, b, -f), (r, b, -f), (r, t, -f), (l, t, -f))\n return pln.TM * face", "def project_on_trans(self, F):\r\n\r\n x0 = self.x1\r\n y0 = self.y1\r\n self.z1 = F - math.sqrt(F * F - x0 * x0 - y0 * y0)\r\n\r\n x0 = self.x2\r\n y0 = self.y2\r\n self.z2 = F - math.sqrt(F * F - x0 * x0 - y0 * y0)\r\n\r\n x0 = self.x3\r\n y0 = self.y3\r\n self.z3 = F - math.sqrt(F * F - x0 * x0 - y0 * y0)", "def depth_to_xyz(self,u,v,depth_val):\n '''\n u - x image coordinate\n v - y image coodrinate\n depth_val - depth value at that (u,v) from depth_image\n '''\n\n fx=self.cam_intrin[0]\n fy=self.cam_intrin[4]\n cx=self.cam_intrin[2]\n cy=self.cam_intrin[5]\n\n z = float(depth_val)\n x = float((u - cx)/fx)*z\n y = float((v - cy)/fy)*z\n\n result = [x, y, z]\n return result", "def get_forward_vector(game_object: GameObject) -> CommonVector3:\n return CommonVector3.from_vector3(game_object.forward)", "def test_rotate_front(self):\n # Testing 'Front' rotation clockwise\n side = 'F'\n rotation = list(self.cube.rotate_cube(side))\n result = [np.array([['g', 'w'], ['g', 'w']], dtype='<U1'),\n np.array([['y', 'y'], ['g', 'g']], dtype='<U1'),\n np.array([['o', 'o'], ['o', 'o']], dtype='<U1'),\n np.array([['b', 'b'], ['w', 'w']], dtype='<U1'),\n np.array([['y', 'b'], ['y', 'b']], dtype='<U1'),\n np.array([['r', 'r'], ['r', 'r']], dtype='<U1')]\n\n np.testing.assert_array_equal(rotation, result)", "def to_oriented_points(self):\n return g.points_from_probe(self)", "def fl_flip_yorigin():\n _fl_flip_yorigin = library.cfuncproto(\n library.load_so_libforms(), \"fl_flip_yorigin\",\\\n None, [],\\\n \"\"\"void fl_flip_yorigin()\"\"\")\n _fl_flip_yorigin()", "def deformar_afin_frontera(self, F):\n xf0 = self.nodos.get_coors0_fr()\n xf = np.matmul( xf0, np.transpose(F) )\n self.nodos.set_coors_fr(xf)", "def convert_grid_to_z_up(scene): # pragma nocover\n\n '''\n There is an interaction between up and forward, the direction that the\n camera is pointing. By default, the camera points in the -z direction\n vector(0,0,-1). In this case, you can make the x or y axes (or anything\n between) be the up vector, but you cannot make the z axis be the up\n vector, because this is the axis about which the camera rotates when\n you set the up attribute. If you want the z axis to point up, first set\n forward to something other than the -z axis, for example vector(1,0,0).\n https://www.glowscript.org/docs/VPythonDocs/canvas.html\n '''\n # First set the x-axis forward\n scene.forward = x_axis_vector\n scene.up = z_axis_vector\n\n # Place the camera in the + axes\n scene.camera.pos = vector(10, 10, 10)\n scene.camera.axis = -scene.camera.pos\n return", "def forward(self, x):\r\n self.x = (self.x+(x*(math.cos(self.dir))))\r\n self.y = (self.y+(x*(math.sin(self.dir))))\r\n return (self.x, self.y)", "def convertdirection(self, frame):\n return _coordsys.coordsys_convertdirection(self, frame)", "def infront(self, xyz: np.ndarray, directions: bool = False) -> np.ndarray:\n dxyz = xyz if directions else xyz - self.xyz\n z = np.dot(dxyz, self.R.T[:, 2])\n return z > 0", "def flipXYZ(oldXYZ): # This is an example of a nice Modular function.\n coordList = oldXYZ.split()\n x = int(coordList[0]) * -1\n y = int(coordList[1]) * -1\n xyz = ' '.join([str(x), str(y), coordList[2]])\n return xyz", "def flipXYZ(oldXYZ): # This is an example of a nice Modular function.\n coordList = oldXYZ.split()\n x = int(coordList[0]) * -1\n y = int(coordList[1]) * -1\n xyz = ' '.join([str(x), str(y), coordList[2]])\n return xyz", "def getFrontProfileCoord(self, locatorList):\r\n\r\n colour = [0, 128, 128]\r\n startCoord = [0, 0]\r\n rowDirection = \"LeftRight\"\r\n columnDirection = \"DownUp\"\r\n points = 25\r\n isLine = 1\r\n arrangement = \"LowestX\"\r\n\r\n FrontProfile3DCoord = self.get3DCoord(colour, startCoord, rowDirection, columnDirection, points, isLine, arrangement)\r\n\r\n if not FrontProfile3DCoord:\r\n print \"No front profile was detected\"\r\n return\r\n\r\n FrontProfileSep3DCoord = []\r\n print len(FrontProfile3DCoord)\r\n # As the front profile come with 25 points each, each other 25 points is another front profile\r\n for p in range(0, len(FrontProfile3DCoord), points):\r\n FrontProfile = []\r\n for frontprofilepoint in range(0, points):\r\n FrontProfile.append(FrontProfile3DCoord[p + frontprofilepoint])\r\n FrontProfileSep3DCoord.append(FrontProfile)\r\n\r\n # We plot the front profile out using locators first\r\n for i in FrontProfileSep3DCoord:\r\n for j in range(0, points):\r\n location = cmds.spaceLocator(n=\"FrontProfile_Coord#\")\r\n cmds.xform(location, t=[i[j][0], i[j][1], i[j][2]])\r\n locatorList.append(location[0])", "def gen_front_term(self, x):\n return x * (self.goal - self.y0)", "def z(self):\r\n return self.position.z", "def affine_space(self):\n vertices = self.vertices()\n if not self.contains_origin():\n v0 = vertices[0]\n vertices = [v-v0 for v in vertices]\n return self.ambient_space().span(vertices).saturation()", "def flip(self, index):\n head, tail = self.get_extreme_points(index)\n centroid = self.get_position(index)\n\n if tail is not None and centroid is not None:\n self.set_angle(index, points_angle(centroid, tail) )", "def get_front_door_mask(self) -> np.array:\n front_door_mask = self.boundary == 255\n region = measure.regionprops(front_door_mask.astype(int))[0]\n return np.array(region.bbox, dtype=int)", "def focal_point(self):\n return self._focal_point", "def to_local(self, xy):\n return self._projective_transform(self.A_inv, xy)", "def convert_coordinate_system_3d(x, y, z):\n\n return x, -z, y", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * np.arctan2(points[..., 1], np.sqrt(\n points[..., 0] ** 2 + points[..., 2] ** 2)) + self.center_y_px]).T\n\n # return the points\n return transformed_points", "def project_ref_to_velo(self, pts_3d_ref):\n pts_3d_ref = self.cart2hom(pts_3d_ref) # nx4\n return np.dot(pts_3d_ref, np.transpose(self.C2V))", "def project_ref_to_velo(self, pts_3d_ref):\n pts_3d_ref = self.cart2hom(pts_3d_ref) # nx4\n return np.dot(pts_3d_ref, np.transpose(self.C2V))", "def perspectiveNormalizationXform(self):\n return np.array([[1.0/np.tan(self.view_angle_h), 0, 0, 0],\n [0, 1.0/np.tan(self.view_angle_v), 0, 0],\n [0, 0, (self.far + self.near)/(self.far - self.near),\n 2*self.far*self.near/(self.far - self.near)],\n [0, 0, -1, 0]])", "def flip(self):\n \n if self.faceup:\n self.faceup = False\n else:\n self.faceup = True", "def Move180(self):\n if self.facing == 0:\n self.facing = 1\n self.x -= self.stepLeft\n elif self.facing == 1:\n self.facing = 2\n self.y -= self.stepUp\n elif self.facing == 2:\n self.facing = 3\n self.x += self.stepRight\n elif self.facing == 3:\n self.facing = 0\n self.y += self.stepDown", "def forward(self):\n print('forward')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def paint_focal_axes(self):\n GL.glTranslatef(*self.focus) # translate to focus\n self.paint_axes(self.sigma)\n GL.glTranslatef(*-self.focus) # translate back", "def xyz2facestereo(x,y,z):\n ax = np.abs(x)\n ay = np.abs(y)\n az = np.abs(z)\n mskx = (y != x) & (z != x)\n mskyz = z != y\n msk0 = ( x >= ay) & ( x >= az) & mskx\n msk3 = (-x >= ay) & (-x >= az) & mskx\n msk1 = ( y >= az) & mskyz\n msk4 = (-y >= az) & mskyz\n msk2 = z > 0\n f = (1-msk0)*(msk3*3 + (1-msk3)*(msk1 + (1-msk1)*(msk4*4 + (1-msk4)*(msk2*2 + (1-msk2)*5))))\n xnew = np.choose(f, ( y, -x, -x, -z, -z, y))\n ynew = np.choose(f, ( z, z, -y, -y, x, x))\n znew = np.choose(f, ( x, y, z, -x, -y, -z))\n X,Y = xyz2stereo(xnew, ynew, znew)\n\n return f,X,Y", "def _calculate_origin(self):\n\n if 'x' in self._name:\n\n assert len(np.unique(self._vertices[:, 0])) == 1, 'vertices are wrong!'\n\n self._normal = np.array([1, 0, 0]) * self._sign\n\n x_origin = self._vertices[0, 0]\n\n y_origin = (min(self._vertices[:, 1]) + max(self._vertices[:, 1])) / 2.\n\n z_origin = (min(self._vertices[:, 2]) + max(self._vertices[:, 2])) / 2.\n\n # self._edges = [self.]\n\n\n elif 'y' in self._name:\n\n assert len(np.unique(self._vertices[:, 1])) == 1, 'vertices are wrong!'\n\n self._normal = np.array([0, 1., 0]) * self._sign\n\n x_origin = (min(self._vertices[:, 0]) + max(self._vertices[:, 0])) / 2.\n\n y_origin = self._vertices[0, 1]\n\n z_origin = (min(self._vertices[:, 2]) + max(self._vertices[:, 2])) / 2.\n\n\n elif 'z' in self._name:\n\n assert len(np.unique(self._vertices[:, 2])) == 1, 'vertices are wrong!'\n\n self._normal = np.array([0, 0, 1]) * self._sign\n\n x_origin = (min(self._vertices[:, 0]) + max(self._vertices[:, 0])) / 2.\n\n y_origin = (min(self._vertices[:, 1]) + max(self._vertices[:, 1])) / 2.\n\n z_origin = self._vertices[0, 2]\n\n self._xmax = self._vertices[:, 0].max()\n self._ymax = self._vertices[:, 1].max()\n self._zmax = self._vertices[:, 2].max()\n\n self._xmin = self._vertices[:, 0].min()\n self._ymin = self._vertices[:, 1].min()\n self._zmin = self._vertices[:, 2].min()\n\n self._origin = np.array([x_origin, y_origin, z_origin])\n\n # TODO: perhaps rewrite this to find the plane\n\n self._sympy_plane = Plane(Point3D(self._origin), normal_vector=self._normal)", "def flip(self):", "def get_origin(self):\n return self.zero", "def test_converts_to_agisoft_and_back_exactly() -> None:\n # k[3:] must be zero\n cam = Camera(\n imgsz=(4288, 2848),\n f=(3100, 3200),\n c=(5, -4),\n k=(0.1, -0.05, 0.02),\n p=(0.03, 0.04),\n )\n xcam = Agisoft.from_camera(cam)\n residuals = Converter(xcam, cam).residuals()\n np.testing.assert_allclose(residuals, 0, rtol=0, atol=1e-11)\n cam2 = xcam.to_camera()\n np.testing.assert_equal(cam.to_array(), cam2.to_array())", "def get3DCoord(self, colour, startCoord, rowDirection, columnDirection, points, isLine = 1, arrangement = \"LowestY\"):\r\n #We create a list to store the coordinates results\r\n\r\n meshFrontLocation = self.getFrontCoord(colour, startCoord, rowDirection, columnDirection, arrangement, isLine, points)\r\n print \"mesh front location has %s points\" % len(meshFrontLocation)\r\n\r\n #We check if any colour is detected in the front view first before we scan side view\r\n if len(meshFrontLocation) == 0:\r\n return\r\n\r\n #Reset the startCoord\r\n meshSideLocation = self.getSideCoord(colour, startCoord, rowDirection, columnDirection, arrangement, isLine)\r\n\r\n\r\n #We check if any colour is detected in the side view. If there is no colour, there is an error\r\n if len(meshSideLocation) == 0:\r\n print \"Detected colour [%s, %s, %s] in front but not side. Please check\" % (colour[0], colour[1], colour[2])\r\n return\r\n\r\n print \"mesh side location has %s points\" % len(meshSideLocation)\r\n\r\n #We now fix the offset between the front and the side location\r\n #We get the highest and lowest y value from the front location\r\n TempFront = meshFrontLocation\r\n FrontLowestY = self.rearrange(TempFront, \"LowestY\")[0][1]\r\n FrontHighestY = self.rearrange(TempFront, \"HighestY\")[0][1]\r\n TempSide = meshSideLocation\r\n SideLowestY = self.rearrange(TempSide, \"LowestY\")[0][1]\r\n SideHighestY = self.rearrange(TempSide, \"HighestY\")[0][1]\r\n\r\n #We then get the middle y for the 2 views and get the offset\r\n #We keep it to int as pixels doesn't exist as floats.\r\n FrontMiddleY = (FrontLowestY + FrontHighestY) / 2\r\n SideMiddleY = (SideLowestY + SideHighestY) / 2\r\n MidOffset = FrontMiddleY - SideMiddleY\r\n\r\n #We add the offset to the meshSideLocation\r\n for i in range(0, len(meshSideLocation)):\r\n meshSideLocation[i][1] += MidOffset\r\n\r\n #We check if the front y range is bigger then the side y range\r\n FrontRangeY = FrontHighestY - FrontLowestY\r\n SideRangeY = SideHighestY - SideLowestY\r\n if FrontRangeY > SideRangeY:\r\n cmds.warning(\"The side image for colour [%s, %s, %s] range is smaller then front image. Not enough data to calculate 3D values\" % (colour[0], colour[1], colour[2]))\r\n cmds.warning(\"The range values are FrontRange: %s, SideRange: %s\" % (FrontRangeY, SideRangeY))\r\n cmds.warning(\"The frontHighest Y is %s and the frontLowestY is %s\" % (FrontHighestY, FrontLowestY))\r\n cmds.warning(\"The sideHighest Y is %s and the sideLowestY is %s\" % (SideHighestY, SideLowestY))\r\n return\r\n\r\n #We then get the matching Y coordinates from the meshFrontLocation and the meshSideLocation\r\n mesh3DCoord = []\r\n\r\n for i in range(0, len(meshFrontLocation)):\r\n match = 0\r\n for j in range(0, len(meshSideLocation)):\r\n if meshFrontLocation[i][1] == meshSideLocation[j][1]:\r\n mesh3DCoord.append([meshFrontLocation[i][0], meshFrontLocation[i][1], meshSideLocation[j][0]])\r\n match = 1\r\n break\r\n if match==0:\r\n print \"There is no match found for meshFrontLocation[i][1] = %s\" % meshFrontLocation[i][1]\r\n print \"Ensure your side view has more pixels then front\"\r\n\r\n\r\n return mesh3DCoord", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * points[..., 1] / np.linalg.norm(points[..., [0, 2]],\n axis=-1) + self.center_y_px]).T\n # ensure that points' x values are also nan when the y values are nan\n transformed_points[np.isnan(transformed_points[..., 1])] = np.nan\n # return the points\n return transformed_points", "def z(self):\n return self.coords[2]", "def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))", "def get_fairlead_force_3d(self, index):\n fx = c_double(-999.9)\n fy = c_double(-999.9)\n fz = c_double(-999.9)\n Map.lib.map_get_fairlead_force_3d( pointer(fx), pointer(fy), pointer(fz), self.f_type_d, index, self.status, pointer(self.ierr))\n return fx.value, fy.value, fz.value", "def turn_left(self):\n self.facing_direction -= self.config\n if self.facing_direction < 0:\n self.facing_direction += 8\n self.x, self.y = self.compute_positions()", "def get_origin(self):\n return self.coord_cls(x=0, y=0, z=0, system=self)", "def getTranslation(fracs):\n \n \n \n # Determine whether the shift needs to be from inf to 0 \n # or from -inf to 0\n \n # Along all x fractionals\n if abs(max(fracs[0]))>=abs(min(fracs[0])):\n minX = min([x for x in fracs[0] if x>0])\n else:\n minX = min([x for x in fracs[0] if x<0])\n \n # Along all y fractionals\n if abs(max(fracs[1]))>=abs(min(fracs[1])):\n minY = min([x for x in fracs[1] if x>0])\n else:\n minY = min([x for x in fracs[1] if x<0])\n \n # Along all z fractionals\n # Need to consider all atoms lying in a single\n # plane (e.g. graphene), thus the final \"else\"\n # statement\n if abs(max(fracs[2]))>abs(min(fracs[2])):\n minZ = min([x for x in fracs[2] if x>0])\n elif abs(max(fracs[2]))<abs(min(fracs[2])):\n minZ = min([x for x in fracs[2] if x<0])\n else:\n minZ = max(fracs[2])\n\n shift_vector = np.array([minX,minY,minZ])\n \n return(shift_vector)", "def flip_faceup(self):\r\n self.faceup = True", "def turned(self,angle: \"radians to turn\") -> Position:\n return Position(self.x, self.y, self.facing + angle)", "def get_current_xyz(self):\n joint_states = self.joints_state\n shp_joint_ang = joint_states.position[0]\n shl_joint_ang = joint_states.position[1]\n elb_joint_ang = joint_states.position[2]\n wr1_joint_ang = joint_states.position[3]\n wr2_joint_ang = joint_states.position[4]\n wr3_joint_ang = joint_states.position[5]\n \n q = [shp_joint_ang, shl_joint_ang, elb_joint_ang, wr1_joint_ang, wr2_joint_ang, wr3_joint_ang]\n mat = ur_utils.forward(q, self._ik_params)\n xyz = mat[:3, 3]\n return xyz", "def get_forward_position(self):\n return self._forward_position", "def get_shower_trans_matrix (azimuth,altitude):\n\n cos_z = sin(altitude)\n sin_z = cos(altitude)\n cos_az = cos(azimuth)\n sin_az = sin(azimuth)\n\n trans = np.zeros([3,3])\n trans[0][0] = cos_z*cos_az\n trans[1][0] = sin_az\n trans[2][0] = sin_z*cos_az\n\n trans[0][1] = -cos_z*sin_az\n trans[1][1] = cos_az\n trans[2][1] = -sin_z*sin_az\n\n trans[0][2] = -sin_z\n trans[1][2] = 0.\n trans[2][2] = cos_z\n\n return trans", "def flip_points(a):\n a = np.array(a)\n return np.flip(a, 1)", "def BackTransform(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD3_BackTransform(self, *args)", "def get_default_cam_pos(self, negative=False):\n focal_pt = self.center\n if any(np.isnan(focal_pt)):\n focal_pt = (0.0, 0.0, 0.0)\n position = np.array(self._theme.camera['position']).astype(float)\n if negative:\n position *= -1\n position = position / np.array(self.scale).astype(float)\n cpos = [position + np.array(focal_pt), focal_pt, self._theme.camera['viewup']]\n return cpos", "def _to_parent_frame(self, *args, **kwargs):\n lat, lon, _ = self.latlonalt\n m = rot3(-lon) @ rot2(lat - np.pi / 2.0) @ rot3(self.heading)\n offset = np.zeros(6)\n offset[:3] = self.coordinates\n return self._convert(m, m), offset", "def get_ztf_footprint_corners():\n x = 6.86 / 2\n return [-x, +x, +x, -x] * u.deg, [-x, -x, +x, +x] * u.deg", "def backward(self):\n #print('backward\\r')\n self.linearVector = Vector3(x=-1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def detect_frontier(self, map):\n if self.start:\n queue = []\n start = map_helper.map_to_world(self.start[0], self.start[1], self.resolution, self.x_offset, self.y_offset)\n visited = [start]\n queue = [start]\n while queue:\n current = queue.pop(0)\n if map_helper.is_valid_loc_aStar(current, map):\n neighbors = map_helper.get_neighbors(current, map)\n for node in set(neighbors) - set(visited):\n if map_helper.is_frontier(node, map):\n self.frontier.append(node)\n queue.append(node)\n visited += [node]\n else:\n queue.append(node)\n visited += [node]\n self.detected = True\n\t self.frontier_show = copy.deepcopy(self.frontier)\n if len(self.frontier) == 0:\n return True\n return False", "def get_front_center_pose(mike_pos, # Microphone position matrix, as elsewhere.\n center_dist=1., # meters from array center\n origin_in_front=True):\n mike_center = np.array([np.mean((mike_pos.T)[0]), np.mean((mike_pos.T)[1]),\n np.mean((mike_pos.T)[2]), 0, 0])\n dir_mat = get_dir(mike_center, mike_pos)\n mike_center = mike_center[:3]\n far_index = np.argmax(dir_mat, axis=0)[0]\n if far_index > 1:\n far2_index = np.argmax((dir_mat.T)[0][:far_index])\n else:\n far2_index = np.argmax((dir_mat.T)[0][(far_index+1):]) + far_index+1\n \n proj_vect = np.cross(mike_pos[far_index]-mike_center,\n mike_pos[far2_index]-mike_center)\n proj_vect = proj_vect*center_dist/la.norm(proj_vect) # Scale to desired distance\n \n # Set appropriate orientation\n if np.dot(proj_vect, mike_center) > 0 or not origin_in_front:\n proj_vect *= -1.\n \n theta = np.arctan2(-proj_vect[1], -proj_vect[0]) # Find angular direction\n phi = np.arctan2(-proj_vect[2], np.sqrt(proj_vect[0]**2 + proj_vect[1]**2))\n proj_vect += mike_center # Translate to global coordinates\n \n return np.array([proj_vect[0], proj_vect[1], proj_vect[2], theta, phi])" ]
[ "0.6466102", "0.5958782", "0.59478974", "0.59458196", "0.5798166", "0.57393557", "0.5688547", "0.56672025", "0.56672025", "0.56627524", "0.5614368", "0.5602697", "0.5566901", "0.5562923", "0.55597514", "0.5549741", "0.55102384", "0.55058265", "0.54947865", "0.54475546", "0.5416932", "0.5410108", "0.54082084", "0.5404698", "0.53981113", "0.53310734", "0.5323869", "0.53236276", "0.53093594", "0.5273548", "0.52671015", "0.5266285", "0.5245323", "0.52324605", "0.5230146", "0.52291566", "0.52143896", "0.521002", "0.5207337", "0.519649", "0.5193022", "0.51862276", "0.5182278", "0.51744354", "0.51719666", "0.516961", "0.51635355", "0.5162786", "0.51527965", "0.5151762", "0.5150707", "0.5148872", "0.5142492", "0.5141176", "0.5129541", "0.5126922", "0.5126922", "0.5104048", "0.5101243", "0.51011735", "0.5096438", "0.5092173", "0.50894815", "0.5086453", "0.5072253", "0.5067526", "0.5066262", "0.5061738", "0.5061738", "0.5060524", "0.50586516", "0.50583696", "0.50441265", "0.5036562", "0.5035941", "0.5030381", "0.50157183", "0.5015097", "0.50060076", "0.5005992", "0.50002545", "0.4999181", "0.49972802", "0.49948847", "0.49931616", "0.49915433", "0.49832425", "0.49807113", "0.49804127", "0.497597", "0.49754494", "0.49734885", "0.4972434", "0.49706182", "0.49694505", "0.49478114", "0.4943619", "0.49414387", "0.49403098", "0.4938696" ]
0.7374416
0
Returns the projection matrix for perspective Multiply the position of the camera by the matrix that this function returns
def get_perspective_matrix(fov_degrees, aspect, near, far): radians = math.radians(fov_degrees) zoom = 1 / math.tan(radians / 2) y_zoom = zoom x_zoom = y_zoom / aspect z_clip_a = (far + near) / (far - near) z_clip_b = (-2 * near * far) / (far - near) return np.matrix([[x_zoom, 0, 0, 0], [0, y_zoom, 0, 0], [0, 0, z_clip_a, z_clip_b], [0, 0, 1, 0]])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def projection_matrix(self):\n scene = self.figure.scene\n scene_size = tuple(scene.get_size())\n aspect_ratio = float(scene_size[0]) / float(scene_size[1])\n p = scene.camera.get_perspective_transform_matrix(\n aspect_ratio, -1, 1).to_array().astype(np.float32)\n return p", "def projection_matrix(self) -> TransformationMatrixType:\n if self._projection_matrix is None:\n if self.projection_mode == Projection.TOP_DOWN:\n self._projection_matrix = self.orthographic_matrix\n else:\n self._projection_matrix = self.perspective_matrix\n\n return self._projection_matrix", "def perspective_matrix(self) -> TransformationMatrixType:\n z_near, z_far = self._clipping[self.projection_mode.value]\n return perspective_matrix(\n math.radians(self.fov), self.aspect_ratio, z_near, z_far\n )", "def get_projection_matrix(left, right, bottom, top):\r\n zNear = -25.0\r\n zFar = 25.0\r\n inv_z = 1.0 / (zFar - zNear)\r\n inv_y = 1.0 / (top - bottom)\r\n inv_x = 1.0 / (right - left)\r\n mat = [[(2.0 * inv_x), 0.0, 0.0, (-(right + left) * inv_x)],\r\n [0.0, (2.0 * inv_y), 0.0, (-(top + bottom) * inv_y)],\r\n [0.0, 0.0, (-2.0 * inv_z), (-(zFar + zNear) * inv_z)],\r\n [0.0, 0.0, 0.0, 1.0]]\r\n return mat", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def perspective_transform():\n src = np.float32([(220,720), (1110, 720), (570, 470), (722, 470)]) # Manually get these numbers from plot\n dst = np.float32([[320, 720], [920, 720], [320, 1], [920, 1]])\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n\n return M, Minv", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()\n ymin, ymax = self.get_ylim3d()\n zmin, zmax = self.get_zlim3d()\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0\n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates\n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down\n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def getPerspectiveProjectionMatrix(l, r, b, t, n, f):\n e11 = 2 * n / (r - l)\n e13 = (r + l) / (r - l)\n e22 = (2 * n) / (t - b)\n e23 = (t + b) / (t - b)\n e33 = -1 * (f + n) / (f - n)\n e34 = (-2 * f * n) / (f - n)\n\n return MatrixExtended([\n [e11, 0, e13, 0],\n [0, e22, e23, 0],\n [0, 0, e33, e34],\n [0, 0, -1, 0]])", "def computeMVP(self):\n projMat = self.converterYUR\n modelViewMat = self.transforMat.invertCompose(\n Globals.render.getTransform(self.cameraNode)).getMat()\n return UnalignedLMatrix4f(modelViewMat * projMat)", "def _ProjectionMatrix(near, far, fov, aspectRatio):\r\n # Matrices are considered to be M[row][col]\r\n # Use DirectX convention, so need to do rowvec*Matrix to transform\r\n size = 1 / tan(radians(fov)/2.0)\r\n M = [[0] * 4 for i in range(4)]\r\n M[0][0] = size/aspectRatio\r\n M[1][1] = size #negative value reflects scene on the Y axis\r\n M[2][2] = (far + near) / (far - near)\r\n M[2][3] = 1\r\n M[3][2] = -(2 * far * near)/(far - near)\r\n return array(M, dtype=float)", "def perspective_projection(points, rotation, translation, focal_length, camera_center):\n batch_size = points.shape[0]\n K = torch.zeros([batch_size, 3, 3], device=points.device)\n K[:, 0, 0] = focal_length\n K[:, 1, 1] = focal_length\n K[:, 2, 2] = 1.0\n K[:, :-1, -1] = camera_center\n points = torch.einsum('bij,bkj->bki', rotation, points)\n points = points + translation.unsqueeze(1)\n projected_points = points / points[:, :, -1].unsqueeze(-1)\n projected_points = torch.einsum('bij,bkj->bki', K, projected_points)\n projected_points = projected_points[:, :, :-1]\n return projected_points", "def compute_projection_matrix(width,\n height,\n f_x,\n f_y,\n c_x,\n c_y,\n near,\n far):\n # pylint: disable=line-too-long\n matrix = vtk.vtkMatrix4x4()\n matrix.Zero()\n matrix.SetElement(0, 0, 2*f_x/width)\n matrix.SetElement(0, 1, -2*0/width) # Not doing skew, so this will be 0.\n matrix.SetElement(0, 2, (width - 2*c_x)/width)\n matrix.SetElement(1, 1, 2*f_y/height)\n matrix.SetElement(1, 2, (-height + 2*c_y)/height)\n matrix.SetElement(2, 2, (-far-near)/(far-near))\n matrix.SetElement(2, 3, -2*far*near/(far-near))\n matrix.SetElement(3, 2, -1)\n return matrix", "def get_projection_matrix(self, aspect):\n return self.ptr.get_projection_matrix(aspect)", "def perspective_projection(points, rotation, translation,\n focal_length, camera_center):\n batch_size = points.shape[0]\n K = torch.zeros([batch_size, 3, 3], device=points.device)\n K[:, 0, 0] = focal_length\n K[:, 1, 1] = focal_length\n K[:, 2, 2] = 1.\n K[:, :-1, -1] = camera_center\n\n # Transform points\n points = torch.einsum('bij,bkj->bki', rotation, points)\n points = points + translation.unsqueeze(1)\n\n # Apply perspective distortion\n projected_points = points / points[:, :, -1].unsqueeze(-1)\n\n # Apply camera intrinsics\n projected_points = torch.einsum('bij,bkj->bki', K, projected_points)\n\n return projected_points[:, :, :-1]", "def get_projection_matrix(K, rvec, tvec):\n R = cv2.Rodrigues(np.float32(rvec))[0]\n Rt = np.zeros((3, 4))\n Rt[:, 0:3] = R\n Rt[:, 3] = tvec\n return K @ Rt", "def _get_proj_mat(self): \n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vecs)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vecs, self.basis_vecs)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat", "def compute_projection(M):\n P = torch.mm(M, torch.pinverse(M.T.matmul(M)).matmul(M.T))\n P = P.double()\n return P", "def _get_proj_mat(self):\n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vec_handles)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vec_handles, self.basis_vec_handles)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat", "def __set_perspective(self):\n\n src = np.float32([[(.42 * self.img_shape[1],.65 * self.img_shape[0] ),\n (.58 * self.img_shape[1], .65 * self.img_shape[0]),\n (0 * self.img_shape[1],self.img_shape[0]),\n (1 * self.img_shape[1], self.img_shape[0])]])\n\n dst = np.float32([[0,0],\n [self.img_shape[1],0],\n [0,self.img_shape[0]],\n [self.img_shape[1],self.img_shape[0]]])\n\n self.M = cv2.getPerspectiveTransform(src, dst)\n self.M_inv = cv2.getPerspectiveTransform(dst, src)", "def project(points, camera_params):\n # print(camera_params.shape)\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = 2360*np.ones(camera_params.shape[0])\n # np.ones()\n # n = np.sum(points_proj**2, axis=1)\n r = 1\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def camera_matrix(e, p, t):\n # Translates all points such that the camera is centered at the origin.\n T = np.array([[1, 0, 0, -e[0]],\n [0, 1, 0, -e[1]],\n [0, 0, 1, -e[2]],\n [0, 0, 0, 1]])\n\n # Set up orthonormal basis.\n w = e - p\n w = w / np.linalg.norm(w)\n u = np.cross(t, w)\n u = u / np.linalg.norm(u)\n v = np.cross(w, u)\n\n # Rotate points such that camera is aligned with UVW-axes (g -> -z-axis).\n R = np.array([[u[0], u[1], u[2], 0],\n [v[0], v[1], v[2], 0],\n [w[0], w[1], w[2], 0],\n [ 0, 0, 0, 1]])\n return R.dot(T)", "def modelview_matrix(self):\n camera = self.figure.scene.camera\n return camera.view_transform_matrix.to_array().astype(np.float32)", "def getCameraMatrix(self): # real signature unknown; restored from __doc__\n pass", "def project(points, camera_params):\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj ** 2, axis=1)\n r = 1 + k1 * n + k2 * n ** 2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def parallel_projection(self):\n return self.camera.parallel_projection", "def project(points, camera_params):\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj ** 2, axis=1)\n r = 1 + k1 * n + k2 * n ** 2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def cameraToWorld(self, p):\n result = self.camPos\n result += p[2] * self.camZ # result is now in the middle of the view-plane\n result += p[0] * self.camX # result is now in the middle-left of the view-plane\n result += p[1] * self.camY # result is now the world-space equivalent of p\n return result", "def base_projection_matrix(self, fiber):\n return matrix(ZZ, fiber.vertices()).right_kernel_matrix()", "def camera_matrix(self) -> TransformationMatrixType:\n return numpy.matmul(\n self.rotation_matrix(*self.rotation),\n displacement_matrix(*-numpy.array(self.location)),\n )", "def perspective(self, fovy, aspect, near, far):\r\n\r\n top = near * math.tan(fovy * math.pi / 360.0)\r\n bottom = -top\r\n left = bottom * aspect\r\n right = top * aspect\r\n\r\n return self.frustum(left, right, bottom, top, near, far)", "def screenToCamera(self,x,y):\n #self.x = x\n #self.y = y\n new_x = x / (self.surf.get_width() - 1) - 0.5\n #-(new_x)\n new_y = y / (self.surf.get_height() - 1)\n new_y = (1.0 - cy) - 0.5\n new_z = -self.camNear\n formula = math3dsol.VectorN((new_x,new_y,new_z))\n return formula\n\n # FINISH ME!!!", "def get_M(self, theta, phi, gamma, dx, dy, dz):\n w = self.width\n h = self.height\n f = self.focal\n # Projection 2D -> 3D matrix\n A1 = np.array([[1, 0, -w / 2],\n [0, 1, -h / 2],\n [0, 0, 1],\n [0, 0, 1]])\n # Rotation matrices around the X, Y, and Z axis\n RX = np.array([[1, 0, 0, 0],\n [0, np.cos(theta), -np.sin(theta), 0],\n [0, np.sin(theta), np.cos(theta), 0],\n [0, 0, 0, 1]])\n RY = np.array([[np.cos(phi), 0, -np.sin(phi), 0],\n [0, 1, 0, 0],\n [np.sin(phi), 0, np.cos(phi), 0],\n [0, 0, 0, 1]])\n RZ = np.array([[np.cos(gamma), -np.sin(gamma), 0, 0],\n [np.sin(gamma), np.cos(gamma), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n # Composed rotation matrix with (RX, RY, RZ)\n R = np.dot(np.dot(RX, RY), RZ)\n # Translation matrix\n T = np.array([[1, 0, 0, dx],\n [0, 1, 0, dy],\n [0, 0, 1, dz],\n [0, 0, 0, 1]])\n # Projection 3D -> 2D matrix\n A2 = np.array([[f, 0, w / 2, 0],\n [0, f, h / 2, 0],\n [0, 0, 1, 0]])\n # Final transformation matrix\n return np.dot(A2, np.dot(T, np.dot(R, A1)))", "def perspective_projection(points, rotation, translation,\n focal_length, camera_center, distortion=None):\n batch_size = points.shape[0]\n \n # Extrinsic\n if rotation is not None:\n points = torch.einsum('bij,bkj->bki', rotation, points)\n\n if translation is not None:\n points = points + translation.unsqueeze(1)\n\n if distortion is not None:\n kc = distortion\n points = points[:,:,:2] / points[:,:,2:]\n \n r2 = points[:,:,0]**2 + points[:,:,1]**2\n dx = (2 * kc[:,[2]] * points[:,:,0] * points[:,:,1] \n + kc[:,[3]] * (r2 + 2*points[:,:,0]**2))\n\n dy = (2 * kc[:,[3]] * points[:,:,0] * points[:,:,1] \n + kc[:,[2]] * (r2 + 2*points[:,:,1]**2))\n \n x = (1 + kc[:,[0]]*r2 + kc[:,[1]]*r2.pow(2) + kc[:,[4]]*r2.pow(3)) * points[:,:,0] + dx\n y = (1 + kc[:,[0]]*r2 + kc[:,[1]]*r2.pow(2) + kc[:,[4]]*r2.pow(3)) * points[:,:,1] + dy\n \n points = torch.stack([x, y, torch.ones_like(x)], dim=-1)\n \n \n # Intrinsic\n K = torch.zeros([batch_size, 3, 3], device=points.device)\n K[:,0,0] = focal_length\n K[:,1,1] = focal_length\n K[:,2,2] = 1.\n K[:,:-1, -1] = camera_center\n\n # Apply camera intrinsicsrf\n points = points / points[:,:,-1].unsqueeze(-1)\n projected_points = torch.einsum('bij,bkj->bki', K, points)\n projected_points = projected_points[:, :, :-1]\n\n return projected_points", "def frustum(self, left, right, bottom, top, near, far):\r\n \r\n return mat4( (2.0*near)/(right-left), 0.0, float(right+left)/(right-left), 0.0,\r\n 0.0, (2.0*near)/(top-bottom), float(top+bottom)/(top-bottom), 0.0,\r\n 0.0, 0.0, -float(far+near)/(far-near), -(2.0*far*near)/(far-near),\r\n 0.0, 0.0, -1.0, 0.0)", "def camera_transformation_from_pose(azimutal, elevation):\n azimutal, elevation = azimutal * 2. * np.pi / 360., elevation * 2. * np.pi / 360.\n azimutal *= -1.\n elevation *= -1.\n r_y = np.array([[np.cos(elevation), 0, np.sin(elevation)],\n [0, 1, 0],\n [-np.sin(elevation), 0, np.cos(elevation)]])\n r_z = np.array([[np.cos(azimutal), -np.sin(azimutal), 0],\n [np.sin(azimutal), np.cos(azimutal), 0],\n [0, 0, 1]])\n r = r_z.dot(r_y)\n # world_to_camera matrix, camera_to_world matrix\n return r, np.linalg.inv(r)", "def project(points, camera_params, theta):\n \"\"\"\n Function takes input of 3d_points, transformations and Convert 3-D points to 2-D by projecting onto images. \n Input:\n points: 3D points in world frame\n camera_params: parameters of camera corrosponding to the point\n theta: Needed For PAL camera to specify the sub camera index for the points\n Output:\n points_proj: 2D reprojected points for 3D points \n\n \"\"\"\n # Convert the 3D points to Camera Frame by rotaion followes by translation\n points_proj1 = rotate(points[:,0:3], camera_params[:, :3])\n points_proj1 += camera_params[:, 3:6]\n # FOR PAL: Converting into the Sub-camera Frame by respective rotation\n thetas = theta * np.pi / 3 \n points_proj = np.copy(points_proj1)\n points_proj[:,0] = points_proj1[:,0]*np.cos(thetas) - points_proj1[:,2]*np.sin(thetas)\n points_proj[:,2] = points_proj1[:,0]*np.sin(thetas) + points_proj1[:,2]*np.cos(thetas)\n # Avoiding Zero error\n for i in range(len(points_proj)):\n if(points_proj[i,2]==0):\n points_proj[i,0] = 0\n points_proj[i,1] = 0\n points_proj[i,2] = 1\n # 2D projection\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj**2, axis=1)\n r = 1 + k1 * n + k2 * n**2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def project_points(X, K, R, T, distortion_flag=False, distortion_params=None):\n # Project points from 3d world coordinates to 2d image coordinates\n\n #get projection matrix\n pmatrix = projection_matrix(R, T, K)\n\n #add 4th component to points\n ones = np.ones([1,len(X[0])])\n xones=np.row_stack((X,ones))\n\n #calculate pixel coordinates\n X_camera = pmatrix.dot(xones)\n\n return X_camera", "def perspective(self):\n return self.container['perspective']", "def proj(self, x: np.ndarray):\n return self.matvec(self.pinv(x))", "def get_projection(self):\n return self.projection", "def calc_transform(src_, dst_):\n M_ = cv2.getPerspectiveTransform(src_, dst_)\n Minv_ = cv2.getPerspectiveTransform(dst_, src_)\n return M_, Minv_", "def projection(self):\n self.projection = Projection(self)\n return self.projection", "def transformation_matrix(self) -> TransformationMatrixType:\n # camera translation\n if self._transformation_matrix is None:\n self._transformation_matrix = numpy.matmul(\n self.projection_matrix,\n self.camera_matrix,\n )\n\n return self._transformation_matrix", "def get_perspective_transform(src, dst):\n if not isinstance(src, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(src)))\n\n if not isinstance(dst, torch.Tensor):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(type(dst)))\n\n if not src.shape[-2:] == (4, 2):\n raise ValueError(\"Inputs must be a Bx4x2 tensor. Got {}\".format(src.shape))\n\n if not src.shape == dst.shape:\n raise ValueError(\"Inputs must have the same shape. Got {}\".format(dst.shape))\n\n if not (src.shape[0] == dst.shape[0]):\n raise ValueError(\n \"Inputs must have same batch size dimension. Expect {} but got {}\".format(src.shape, dst.shape)\n )\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n p = []\n for i in [0, 1, 2, 3]:\n p.append(_build_perspective_param(src[:, i], dst[:, i], 'x'))\n p.append(_build_perspective_param(src[:, i], dst[:, i], 'y'))\n\n # A is Bx8x8\n A = torch.stack(p, dim=1)\n\n # b is a Bx8x1\n b = torch.stack(\n [\n dst[:, 0:1, 0],\n dst[:, 0:1, 1],\n dst[:, 1:2, 0],\n dst[:, 1:2, 1],\n dst[:, 2:3, 0],\n dst[:, 2:3, 1],\n dst[:, 3:4, 0],\n dst[:, 3:4, 1],\n ],\n dim=1,\n )\n\n # solve the system Ax = b\n X, LU = _torch_solve_cast(b, A)\n\n # create variable to return\n batch_size = src.shape[0]\n M = torch.ones(batch_size, 9, device=src.device, dtype=src.dtype)\n M[..., :8] = torch.squeeze(X, dim=-1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def m(self) -> np.ndarray:\n assert self._k is not None and self._r is not None and self._t is not None, \"camera must be calibrated\"\n return forge_projective_matrix(self._k, r=self._r, t=self._t)", "def get_transform_matrix(theta, phi = None, invert_rot = False, invert_focal = False):\n\n if phi is None:\n phi = const.PHI_IDX * 10.0\n\n #extrinsic x intrinsic\n camera_matrix = np.zeros((4, 4), dtype=np.float32)\n\n intrinsic_matrix = np.eye(4, dtype=np.float32)\n extrinsic_matrix = np.eye(4, dtype=np.float32)\n\n sin_phi = np.sin(float(phi) / 180.0 * np.pi)\n cos_phi = np.cos(float(phi) / 180.0 * np.pi)\n sin_theta = np.sin(float(-theta) / 180.0 * np.pi)\n cos_theta = np.cos(float(-theta) / 180.0 * np.pi)\n\n #theta rotation\n rotation_azimuth = np.zeros((3, 3), dtype=np.float32)\n rotation_azimuth[0, 0] = cos_theta\n rotation_azimuth[2, 2] = cos_theta\n rotation_azimuth[0, 2] = -sin_theta\n rotation_azimuth[2, 0] = sin_theta\n rotation_azimuth[1, 1] = 1.0\n\n #phi rotation\n rotation_elevation = np.zeros((3, 3), dtype=np.float32)\n rotation_elevation[0, 0] = cos_phi\n rotation_elevation[0, 1] = sin_phi\n rotation_elevation[1, 0] = -sin_phi\n rotation_elevation[1, 1] = cos_phi\n rotation_elevation[2, 2] = 1.0\n\n #rotate phi, then theta\n rotation_matrix = np.matmul(rotation_azimuth, rotation_elevation)\n if invert_rot:\n rotation_matrix = np.linalg.inv(rotation_matrix)\n\n displacement = np.zeros((3, 1), dtype=np.float32)\n displacement[0, 0] = const.DIST_TO_CAM\n displacement = np.matmul(rotation_matrix, displacement)\n\n #assembling 4x4 from R + T\n extrinsic_matrix[0:3, 0:3] = rotation_matrix\n extrinsic_matrix[0:3, 3:4] = -displacement\n\n if invert_focal:\n intrinsic_matrix[2, 2] = float(const.focal_length)\n intrinsic_matrix[1, 1] = float(const.focal_length)\n else:\n intrinsic_matrix[2, 2] = 1.0 / float(const.focal_length)\n intrinsic_matrix[1, 1] = 1.0 / float(const.focal_length)\n\n camera_matrix = np.matmul(extrinsic_matrix, intrinsic_matrix)\n return camera_matrix", "def transform_perspective(self, pt_x, pt_y):\n linear_y = pt_y * self.perspective_point_y / self.height\n\n diff_x = pt_x - self.perspective_point_x\n diff_y = self.perspective_point_y - linear_y\n factor_y = diff_y / self.perspective_point_y\n # factor_y = factor_y * factor_y\n factor_y = pow(factor_y, 2) # pow = mise au carre\n\n transfor_x = self.perspective_point_x + diff_x * factor_y\n transfor_y = self.perspective_point_y - factor_y * self.perspective_point_y\n\n return int(transfor_x), int(transfor_y)", "def perspectiveNormalizationXform(self):\n return np.array([[1.0/np.tan(self.view_angle_h), 0, 0, 0],\n [0, 1.0/np.tan(self.view_angle_v), 0, 0],\n [0, 0, (self.far + self.near)/(self.far - self.near),\n 2*self.far*self.near/(self.far - self.near)],\n [0, 0, -1, 0]])", "def projection(self) -> Projection:\n return self._projection", "def getProjectionMatrix(sorted_eigvecs):\n matrix_w = np.vstack(sorted_eigvecs).transpose()\n return matrix_w", "def compute_perspective_transform(corner_points, width, height, image):\n # Create an array out of the 4 corner points\n corner_points_array = np.float32(corner_points)\n # Create an array with the parameters (the dimensions) required to build the matrix\n img_params = np.float32([[0, 0], [width, 0], [0, height], [width, height]])\n # Compute and return the transformation matrix\n matrix = cv2.getPerspectiveTransform(corner_points_array, img_params)\n img_transformed = cv2.warpPerspective(image, matrix, (width, height))\n return matrix, img_transformed", "def get_perspective_transform3d(src: Tensor, dst: Tensor) -> Tensor:\n if not isinstance(src, (Tensor)):\n raise TypeError(f\"Input type is not a Tensor. Got {type(src)}\")\n\n if not isinstance(dst, (Tensor)):\n raise TypeError(f\"Input type is not a Tensor. Got {type(dst)}\")\n\n if not src.shape[-2:] == (8, 3):\n raise ValueError(f\"Inputs must be a Bx8x3 tensor. Got {src.shape}\")\n\n if not src.shape == dst.shape:\n raise ValueError(f\"Inputs must have the same shape. Got {dst.shape}\")\n\n if not (src.shape[0] == dst.shape[0]):\n raise ValueError(f\"Inputs must have same batch size dimension. Expect {src.shape} but got {dst.shape}\")\n\n if not (src.device == dst.device and src.dtype == dst.dtype):\n raise AssertionError(\n f\"Expect `src` and `dst` to be in the same device (Got {src.dtype}, {dst.dtype}) \"\n f\"with the same dtype (Got {src.dtype}, {dst.dtype}).\"\n )\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n p = []\n\n # 000, 100, 110, 101, 011\n for i in [0, 1, 2, 5, 7]:\n p.append(_build_perspective_param3d(src[:, i], dst[:, i], 'x'))\n p.append(_build_perspective_param3d(src[:, i], dst[:, i], 'y'))\n p.append(_build_perspective_param3d(src[:, i], dst[:, i], 'z'))\n\n # A is Bx15x15\n A = stack(p, 1)\n\n # b is a Bx15x1\n b = stack(\n [\n dst[:, 0:1, 0],\n dst[:, 0:1, 1],\n dst[:, 0:1, 2],\n dst[:, 1:2, 0],\n dst[:, 1:2, 1],\n dst[:, 1:2, 2],\n dst[:, 2:3, 0],\n dst[:, 2:3, 1],\n dst[:, 2:3, 2],\n # dst[:, 3:4, 0], dst[:, 3:4, 1], dst[:, 3:4, 2],\n # dst[:, 4:5, 0], dst[:, 4:5, 1], dst[:, 4:5, 2],\n dst[:, 5:6, 0],\n dst[:, 5:6, 1],\n dst[:, 5:6, 2],\n # dst[:, 6:7, 0], dst[:, 6:7, 1], dst[:, 6:7, 2],\n dst[:, 7:8, 0],\n dst[:, 7:8, 1],\n dst[:, 7:8, 2],\n ],\n 1,\n )\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return\n batch_size: int = src.shape[0]\n M = torch.empty(batch_size, 16, device=src.device, dtype=src.dtype)\n M[..., :15] = X[..., 0]\n M[..., -1].fill_(1)\n\n return M.view(-1, 4, 4) # Bx4x4", "def myPerspectiveTransform(pts, H):\n\n # Clone and reshape the list of points\n new_pts = np.reshape(pts, (-1, 2))\n # Allocate a vector filled with one with size (-1, 1)\n one_vector = np.zeros((pts.shape[0], 1)) + 1\n # Concatenate the one vector to the list of points to form the homogenious coordiniate system\n new_pts = np.concatenate((new_pts, one_vector), axis=len(new_pts.shape)-1)\n\n # Perform transformation and transform results into the pixel coord. system\n # i.e., x' = x/w, and y' = y/w\n for i, pt in enumerate(new_pts):\n new_pts[i] = H.dot(pt.T)\n new_pts[i] /= new_pts[i, -1]\n\n # Return results with the same shape as the input has\n return new_pts[:, :-1].reshape(pts.shape)", "def _calculate_camera_pose(frame, K, d, corners, pattern_shape=(6, 4), grid_size=30): # noqa: E501\n img = frame.copy()\n axis = np.float32([[grid_size, 0, 0], [0, grid_size, 0],\n [0, 0, -grid_size]]).reshape(-1, 3)*2\n\n objp = np.zeros((np.prod(pattern_shape), 3), np.float32)\n objp[:, :2] = np.mgrid[0:pattern_shape[0],\n 0:pattern_shape[1]].T.reshape(-1, 2) * grid_size\n\n _, rvecs, tvecs = cv2.solvePnP(objp, corners, K, d)\n R, _ = cv2.Rodrigues(rvecs)\n # project 3D points onto image plane\n imgpts, _ = cv2.projectPoints(axis,\n rvecs, tvecs,\n K, d)\n\n canvas = computer_vision.draw_axis(img, corners, imgpts)\n return R, tvecs, canvas", "def calcul_point_plan_projection(cls,cx,cy,cz,spx,spy,axe_x,axe_y):\n projX=gs.Vector3(spx*axe_x.x,spx*axe_x.y,spx*axe_x.z)\n projY=gs.Vector3(spy*axe_y.x,spy*axe_y.y,spy*axe_y.z)\n point=gs.Vector3(projX+projY)+gs.Vector3(cx,cy,cz)\n return point", "def set_projection_from_camera(K, width, height):\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n \n fx = K[0,0]\n fy = K[1,1]\n fovy = 2*np.arctan(0.5*height/fy)*180/np.pi\n aspect = (width*fy)/(height*fx)\n\n # define the near and far clipping planes\n near = 0.1\n far = 100.0\n\n # set perspective\n # Need to apt-get install freeglut3 and freeglut3-dev\n # https://github.com/thecountoftuscany/PyTeapot-Quaternion-Euler-cube-rotation/issues/1\n gluPerspective(fovy,aspect,near,far)\n glViewport(0,0,width,height)", "def compute_perspective_transform(self, binary_image):\r\n transform_src = np.float32([[300, 309], [500, 315], [120, 381], [685, 392]])\r\n transform_dst = np.float32([ [0,0], [800, 0], [0,600], [800,600]])\r\n perspective_transform = cv2.getPerspectiveTransform(transform_src, transform_dst)\r\n inverse_perspective_transform = cv2.getPerspectiveTransform(transform_dst, transform_src)\r\n warped_image = cv2.warpPerspective(binary_image, perspective_transform, \r\n (binary_image.shape[1], binary_image.shape[0]), \r\n flags=cv2.INTER_NEAREST)\r\n\r\n return warped_image, inverse_perspective_transform", "def project(self, point):\n return np.round(project(self.camera.P, point)).astype(int)", "def map_to_matrix(x, y):\n x_pos = round(x * ((MATRIX_SIZE_X - 1)/(FRAME_W - 1)))\n y_pos = round(y * ((MATRIX_SIZE_Y - 1)/(FRAME_H - 1)))\n\n x_pos = (MATRIX_SIZE_X - 1) - x_pos #invert x direction (left and right) to account for camera perspective\n\n return x_pos, y_pos", "def project(self, win_width, win_height, fov, viewer_distance):\n\t\tfactor = fov / (viewer_distance + self.z)\n\t\tx = self.x * factor + win_width / 2\n\t\ty = -self.y * factor + win_height / 2\n\t\treturn Point3D(x, y, 1)", "def orthographic_projection_matrix(width, height, w=5, h=5, n=0.01, f=5):\n # Transformation that maps from orthographic vv to canonical vv.\n Morth = np.array([[2/w, 0, 0, 0],\n [ 0, 2/h, 0, 0],\n [ 0, 0, 2/(n-f), -(n+f)/(n-f)],\n [ 0, 0, 0, 1]])\n\n # Transform canonical vv to viewport.\n Mvp = np.array([[width/2, 0, 0, (width-1)/2],\n [ 0, height/2, 0, (height-1)/2],\n [ 0, 0, 1, 0],\n [ 0, 0, 0, 1]])\n\n return Mvp.dot(Morth)", "def project_points(self, points_3d, camera):\n batch_size = points_3d.shape[0]\n device = points_3d.device\n cam_t = torch.stack([camera[:, 1], camera[:, 2], 2 * self.focal_length / (self.img_res * camera[:, 0] + 1e-09)], dim=-1)\n camera_center = camera.new_zeros([batch_size, 2])\n rot_t = torch.eye(3, device=device, dtype=points_3d.dtype).unsqueeze(0).expand(batch_size, -1, -1)\n joints_2d = perspective_projection(points_3d, rotation=rot_t, translation=cam_t, focal_length=self.focal_length, camera_center=camera_center)\n return joints_2d", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, 1)", "def coord_proj(self):\r\n return self._coord_proj", "def projection(self):\n return self._map_projection", "def projection(self):\n return self._map_projection", "def build_perspective_camera(field_of_view=60.0,\n aspect_ratio=1.0,\n near_plane=0.01,\n far_plane=1000.0,\n position=(0.0, 0.0, 5.0),\n enable_zoom=False):\n context = build_context()\n camera = context.THREE.PerspectiveCamera.new_object(field_of_view,\n aspect_ratio, near_plane,\n far_plane)\n camera.position.set(*position)\n controls = context.THREE.OrbitControls.new_object(camera)\n controls.enableZoom = enable_zoom\n return camera", "def transform(self, R, t, scale = 1):\n\n # Build 4-by-4 projection matrix from args ----------------------------\n # This is what we are doing internally:\n # Proj = np.r_[ scale * np.c_[R, t], [[0, 0, 0, 1]] ]\n # InvProj = np.r_[ scale * np.c_[R.T, -np.dot(R.T, t)], [[0,0,0,scale]] ]\n Proj = tf_format.tf_format('4x4', R, t)\n Proj[:-1,:] *= scale\n InvProj = tf_format.tf_format('i4x4', R, t) * scale\n \n \n # Apply transformation to pts3D ---------------------------------------\n if self.pts3D is not None and self.pts3D.shape[1] > 0:\n # Use homogeneous coords\n pts3D = np.r_[self.pts3D, np.ones((1, self.pts3D.shape[1]))]\n pts3D = np.dot(Proj, pts3D)\n self.pts3D = pts3D[:3, :]\n\n # Apply transformation to cameras -------------------------------------\n # Camera poses are stored using camera-to-world transformations, we \n # need to invert the projection matrix for this to work --> \n # we use InvProj\n\n cposes = self.cam_poses\n for i in range(cposes.shape[1]):\n\n # Extract camera projection matrix\n p_cam = tf_format.tf_format('4x4', cposes[:, i])\n\n # Transform camera projection matrix\n new_p_cam = np.dot(p_cam, InvProj)\n \n # Make sure it's a true rotation!\n [u, s, vT] = np.linalg.svd(new_p_cam[:3,:3])\n cposes[:3, i] = tf_format.rodrigues( np.dot(u,vT) ).ravel()\n cposes[3:, i] = new_p_cam[:3, 3]\n\n self.cam_poses = cposes", "def project(self, win_width, win_height, fov, viewer_distance):\r\n factor = fov / (viewer_distance + self.z)\r\n x = self.x * factor + win_width / 2\r\n y = -self.y * factor + win_height / 2\r\n return Point3D(x, y, 1)", "def grid_proj(self):\r\n return _get_projection(self._grid_proj, self.longitude, self.latitude)", "def ExtractCameraPose(E):\n u, s, v = np.linalg.svd(E, full_matrices=True)\n w = np.array([[0, -1, 0],\n [1, 0, 0],\n [0, 0, 1]]).reshape(3, 3)\n c1 = u[:, 2].reshape(3, 1)\n r1 = np.dot(np.dot(u, w), v).reshape(3, 3)\n c2 = -u[:, 2].reshape(3, 1)\n r2 = np.dot(np.dot(u, w), v).reshape(3, 3)\n c3 = u[:, 2].reshape(3, 1)\n r3 = np.dot(np.dot(u, w.T), v).reshape(3, 3)\n c4 = -u[:, 2].reshape(3, 1)\n r4 = np.dot(np.dot(u, w.T), v).reshape(3, 3)\n if np.linalg.det(r1) < 0:\n c1 = -c1\n r1 = -r1\n if np.linalg.det(r2) < 0:\n c2 = -c2\n r2 = -r2\n if np.linalg.det(r3) < 0:\n c3 = -c3\n r3 = -r3\n if np.linalg.det(r4) < 0:\n c4 = -c4\n r4 = -r4\n cam_center = np.array([c1, c2, c3, c4])\n cam_rotation = np.array([r1, r2, r3, r4])\n return cam_center, cam_rotation", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, self.z)", "def m_c(self) -> np.ndarray:\n assert self._k is not None, \"camera must be calibrated\"\n return forge_projective_matrix(self._k)", "def project_points(X, K, R, T, distortion_flag=False, distortion_params=None):\n # Project points from 3d world coordinates to 2d image coordinates\n X_camera = np.matmul(R, X) + T\n X_camera = X_camera / X_camera[2, :] # Normalize\n\n if distortion_flag:\n radiusSq = (X_camera[0, :] * X_camera[0, :]) + (X_camera[1, :] * X_camera[1, :])\n X_camera = X_camera * (1 + (distortion_params[0] * radiusSq) + (distortion_params[1] * (radiusSq * radiusSq)) + (distortion_params[4] * (radiusSq * radiusSq * radiusSq)))\n # X_camera = (X_camera * (1 + (distortion_params[0] * radiusSq) + (distortion_params[1] * (radiusSq * radiusSq)) + (distortion_params[4] * (radiusSq * radiusSq * radiusSq)))\n # + (2 * distortion_params[2] * X_camera[0,:] * X_camera[1,:]) + distortion_params[3] * (radiusSq + (2 * X_camera * X_camera)))\n\n X_camera[2, :] = 1.0\n X_camera = np.matmul(K, X_camera)\n X_camera = X_camera[:2, :]\n\n return X_camera", "def world_projection(self, aspect):\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n if aspect < 1:\n gluOrtho2D(\n -self.scale,\n +self.scale,\n -self.scale / aspect,\n +self.scale / aspect)\n else:\n gluOrtho2D(\n -self.scale * aspect,\n +self.scale * aspect,\n -self.scale,\n +self.scale)\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(\n self.x, self.y, +1.0,\n self.x, self.y, -1.0,\n sin(self.angle), cos(self.angle), 0.0)", "def projection(self, point):\n projected_point = self._iterate_over_factors(\"projection\", {\"point\": point})\n return projected_point", "def proj_to_velo(calib_data):\n rect = calib_data[\"R0_rect\"].reshape(3, 3)\n #to transform a point from Lidar framce to camera frame\n #reshape the flat line with 12 elements to 3X4 matrix\n velo_to_cam = calib_data[\"Tr_velo_to_cam\"].reshape(3, 4)\n#print('velo2cam', velo_to_cam)\n inv_rect = np.linalg.inv(rect)\n #select all rows and only first three columns\n#print('velo_to_cam[:, :3]', velo_to_cam[:, :3])\n #select all rows and only first three columns\n inv_velo_to_cam = np.linalg.pinv(velo_to_cam[:, :3])\n return np.dot(inv_velo_to_cam, inv_rect)", "def project_point(self, point: Point3D) -> Point3D:\n x, y, z = point\n cam_x, cam_y, cam_z = self._pos\n x -= cam_x\n y -= cam_y\n z -= cam_z\n dx = self._cy*(self._sz*y + self._cz*x) - self._sy*z\n dy = self._sx*(self._sy*(self._sz*y + self._cz*x) + self._cy*z) + self._cx*(self._cz*y - self._sz*x)\n dz = self._cx*(self._sy*(self._sz*y + self._cz*x) + self._cy*z) - self._sx*(self._cz*y - self._sz*x)\n return self._scale * dx/dz, self._scale * dy/dz, dz", "def _projection(emb_e, proj_vec):\n proj_vec = F.normalize(proj_vec, p=2, dim=-1)\n\n # [b, k], [b, k]\n return emb_e - torch.sum(emb_e * proj_vec, dim=-1, keepdims=True) * proj_vec", "def transform(self, image):\n # e) use cv2.warpPerspective() to warp your image to a top-down view\n # Warp the image using OpenCV warpPerspective()\n w, h = image.shape[1], image.shape[0]\n return cv2.warpPerspective(image, self.p_mat, (w, h))", "def make_project_matrix(X):\n X = np.mat(X)\n return np.eye(X.shape[0]) - (X*(np.linalg.inv(X.T*X)*X.T))", "def Reproject(points,tm, rm):\n proj_cuboid = np.array(points)\n\n rmat = np.identity(3)\n rmat[0:2] = rm\n tmat = np.identity(3)\n tmat[0:2] = tm\n\n new_cuboid = np.matmul(\n rmat, np.vstack((proj_cuboid.T, np.ones(len(points)))))\n new_cuboid = np.matmul(tmat, new_cuboid)\n new_cuboid = new_cuboid[0:2].T\n\n return new_cuboid", "def camera_position(self):\n return CameraPosition(\n scale_point(self.camera, self.camera.position, invert=True),\n scale_point(self.camera, self.camera.focal_point, invert=True),\n self.camera.up,\n )", "def __init__(self, at=(0, 0, 0), eye=(0, 0, -0.1), lens=None,\r\n is_3d=True, scale=1.0):\r\n super(Camera, self).__init__()\r\n\r\n self.at = at\r\n self.start_eye = eye # for reset with different lens settings\r\n self.eye = [eye[0], eye[1], eye[2]]\r\n if lens == None:\r\n from pi3d.Display import Display\r\n lens = [Display.INSTANCE.near, Display.INSTANCE.far, Display.INSTANCE.fov,\r\n Display.INSTANCE.width / float(Display.INSTANCE.height)]\r\n self.lens = lens\r\n self.view = _LookAtMatrix(at, eye, [0, 1, 0])\r\n if is_3d:\r\n self.projection = _ProjectionMatrix(lens[0], lens[1], lens[2] / scale, lens[3])\r\n else:\r\n self.projection = _OrthographicMatrix(scale=scale)\r\n self.model_view = dot(self.view, self.projection)\r\n # Apply transform/rotation first, then shift into perspective space.\r\n self.mtrx = array(self.model_view, copy=True)\r\n # self.L_reflect = _LookAtMatrix(at,eye,[0,1,0],reflect=True)\r\n self.rtn = [0.0, 0.0, 0.0]\r\n\r\n self.was_moved = True", "def intrinsic_matrix_from_camera(w, h, fov):\n (cx, cy), f = calc_focal_values(w, h, fov)\n return np.array([[f, 0, cx], [0, f, cy], [0, 0, 1]])", "def fun(params,n_cameras,n_points,camera_indices,point_indices,points_3d , points_2d):\n camera_params = params[:n_cameras * 6].reshape((n_cameras, 6))\n # points_3d = points_3d.T\n # points_3d = params[n_cameras * 7:].reshape((n_points, 3))\n # print(point_indices)\n points_proj = project(points_3d[point_indices], camera_params[camera_indices])\n return (points_proj - points_2d).ravel()", "def orthographic_matrix(self) -> TransformationMatrixType:\n near, far = self._clipping[self.projection_mode.value]\n return orthographic_matrix(self.fov, self.aspect_ratio, near, far)", "def problem3():\n t = np.array([-27.1, -2.9, -3.2])\n principal_point = np.array([8, -10])\n focal_length = 8\n\n # model transformations\n T = gettranslation(t)\n Ry = getyrotation(135)\n Rx = getxrotation(-30)\n Rz = getzrotation(90)\n print(T)\n print(Ry)\n print(Rx)\n print(Rz)\n\n K = getcentralprojection(principal_point, focal_length)\n\n P,M = getfullprojection(T, Rx, Ry, Rz, K)\n print(P)\n print(M)\n\n points = loadpoints()\n displaypoints2d(points)\n\n z = loadz()\n Xt = invertprojection(K, points, z)\n\n Xh = inverttransformation(M, Xt)\n\n worldpoints = hom2cart(Xh)\n displaypoints3d(worldpoints)\n\n points2 = projectpoints(P, worldpoints)\n displaypoints2d(points2)\n\n plt.show()", "def _project(self):\n ghosts_w = self.input_field.topology.ghosts()\n self.input_field.data[0], self.input_field.data[1], \\\n self.input_field.data[2] = \\\n fftw2py.projection_om_3d(self.input_field.data[0],\n self.input_field.data[1],\n self.input_field.data[2], ghosts_w)", "def perspective_transform(image, src, sizex, sizey, rotate=True):\n src = np.float32(src)\n \n if rotate and np.sum((src[0] - src[2])**2) > np.sum((src[0] - src[1])**2):\n dst = np.float32([(0, sizey), (0, 0), (sizex, sizey), (sizex, 0)])\n else:\n dst = np.float32([(0, 0), (sizex, 0), (0, sizey), (sizex, sizey)])\n #if np.sum((src[0] - src[2])**2) <= np.sum((src[0] - src[1])**2):\n # dst = np.float32([(0, 0), (sizex, 0), (0, sizey), (sizex, sizey)])\n #else:\n \n M = cv.getPerspectiveTransform(src, dst)\n\n warped = cv.warpPerspective(image, M, (sizex, sizey))\n\n return warped", "def apply_perspective_correction(image, M, width, height):\n warped = cv.warpPerspective(image, M, (width, height))\n return warped", "def matrix_translate_3d(tx: float, ty: float, tz: float) -> np.matrix:\n return np.matrix([[1, 0, 0, tx], [0, 1, 0, ty], [0, 0, 1, tz], [0, 0, 0, 1]])", "def get_warp_perspective(transpose_image, h_matrix, dimension):\n warped_image = np.zeros((dimension[0], dimension[1], 3))\n for index1 in range(0, transpose_image.shape[0]):\n for index2 in range(0, transpose_image.shape[1]):\n new_vec = np.dot(h_matrix, [index1, index2, 1])\n new_row, new_col, _ = (new_vec / new_vec[2] + 0.4).astype(int)\n if 5 < new_row < (dimension[0] - 5):\n if 5 < new_col < (dimension[1] - 5):\n warped_image[new_row, new_col] = transpose_image[index1, index2]\n warped_image[new_row - 1, new_col - 1] = transpose_image[index1, index2]\n warped_image[new_row - 2, new_col - 2] = transpose_image[index1, index2]\n warped_image[new_row - 3, new_col - 3] = transpose_image[index1, index2]\n warped_image[new_row + 1, new_col + 1] = transpose_image[index1, index2]\n warped_image[new_row + 2, new_col + 2] = transpose_image[index1, index2]\n warped_image[new_row + 3, new_col + 3] = transpose_image[index1, index2]\n\n return np.array(warped_image, dtype=np.uint8)", "def Reproject(points, tm, rm):\n proj_cuboid = np.array(points)\n\n rmat = np.identity(3)\n rmat[0:2] = rm\n tmat = np.identity(3)\n tmat[0:2] = tm\n\n new_cuboid = np.matmul(\n rmat, np.vstack((proj_cuboid.T, np.ones(len(points)))))\n new_cuboid = np.matmul(tmat, new_cuboid)\n new_cuboid = new_cuboid[0:2].T\n\n return new_cuboid", "def getProjections(self): \n x, y, z = self.XYZCoordinate\n origin = self.SkeletonPoints[0]\n self.coorOrigin = origin\n self.XYProjections = [GeometryToolBox.projected_point(p, origin, x, y) for p in self.SkeletonPoints]\n self.XZProjections = [GeometryToolBox.projected_point(p, origin, x, z) for p in self.SkeletonPoints]", "def four_point_transform(image, pts):\n\n\tmax_x, max_y = np.max(pts[:, 0]).astype(np.int32), np.max(pts[:, 1]).astype(np.int32)\n\n\tdst = np.array([\n\t\t[0, 0],\n\t\t[image.shape[1] - 1, 0],\n\t\t[image.shape[1] - 1, image.shape[0] - 1],\n\t\t[0, image.shape[0] - 1]], dtype=\"float32\")\n\n\twarped = cv2.warpPerspective(image, cv2.getPerspectiveTransform(dst, pts), (max_x, max_y))\n\n\treturn warped", "def displace_to_pose(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode='zeros', return_coordinates=False):\n check_sizes(img, 'img', 'B3HW')\n\n src_pixel_coords = get_displacement_pixel_transformation(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode=rotation_mode, padding_mode=padding_mode)\n projected_img = torch.nn.functional.grid_sample(img, src_pixel_coords, padding_mode=padding_mode)\n if return_coordinates:\n return projected_img, src_pixel_coords\n else:\n return projected_img", "def get_2d_point(point_3d_homogeneous, projection_matrix):\r\n\r\n temp = np.matmul(projection_matrix, point_3d_homogeneous)\r\n\r\n return cv.convertPointsFromHomogeneous(np.array([temp], np.float))" ]
[ "0.82109255", "0.76279783", "0.7519905", "0.7222865", "0.72145", "0.7192118", "0.71746266", "0.71618414", "0.71552086", "0.7016673", "0.7000344", "0.69891137", "0.68717843", "0.6742718", "0.6740875", "0.6684819", "0.66156596", "0.66130567", "0.6575204", "0.6541649", "0.652216", "0.6493427", "0.6492008", "0.64603627", "0.64499253", "0.6415194", "0.64079314", "0.6397289", "0.637808", "0.6357317", "0.63477117", "0.63446766", "0.6335824", "0.6230407", "0.620823", "0.6203201", "0.61904764", "0.61875784", "0.61779153", "0.6171593", "0.6151573", "0.61455566", "0.613982", "0.6115348", "0.61113846", "0.6099551", "0.6073287", "0.60717607", "0.6048769", "0.60434073", "0.6042026", "0.6012604", "0.5999303", "0.5952651", "0.59166825", "0.5874261", "0.5860882", "0.5859171", "0.5851592", "0.5837961", "0.5837119", "0.5829105", "0.5809788", "0.5802787", "0.58022267", "0.58022267", "0.58003855", "0.57941717", "0.5791375", "0.5786267", "0.57694477", "0.5761482", "0.5761482", "0.5761482", "0.5761299", "0.5761218", "0.5746178", "0.57435536", "0.5743224", "0.5739762", "0.5736395", "0.5731719", "0.57177645", "0.57160026", "0.56994534", "0.5691431", "0.5680688", "0.5671422", "0.56669194", "0.5653704", "0.5647478", "0.5643635", "0.56431776", "0.56296587", "0.5628499", "0.5621221", "0.5620172", "0.5611373", "0.5611111", "0.5601019" ]
0.6639653
16
Return the x,y coordinates to be drawn onto the screen
def to_screen_space(vertex_coordinates): w = DISPLAY_WIDTH / 2 h = DISPLAY_HEIGHT / 2 screen_matrix = np.matrix([[w, 0, w], [0, -h, h], [0, 0, 1]]) x, y = vertex_coordinates.item(0), vertex_coordinates.item(1) xy_matrix = np.array([[x], [y], [1]]) new_coordinates = screen_matrix * xy_matrix return new_coordinates.item(0), new_coordinates.item(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pos(self):\n x = (self.ec._win._mouse_x -\n self.ec._win.width / 2.) / (self.ec._win.width / 2.)\n y = (self.ec._win._mouse_y -\n self.ec._win.height / 2.) / (self.ec._win.height / 2.)\n return np.array([x, y])", "def position(self):\n return self.x, self.y", "def position(self):\n return self._x, self._y", "def pos(self, x, y):\n\n if isinstance(x, float):\n x = int(x)\n\n self.screen.write(colorama.Cursor.POS(x, y), ansi=True)\n self.x = x\n self.y = y\n\n return x, y", "def getXY(self):\n return (self.X,self.Y)", "def get_pos(self):\n return (self.x, self.y)", "def get_location(self):\r\n return self.__x, self.__y", "def display_coordinates(self) -> None:\n\n print('Y coordinate: ', self.player.row_position + 1)\n print('X coordinate: ', self.player.column_position + 1)", "def get_coords(self):\n return [self.x,self.y,self.w,self.h]", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def getPos(self):\n return self.Xpos,self.Ypos", "def get_position_coords(cls):\n row = math.floor(cls.position / cls.size)\n col = cls.position - row * cls.size\n return row, col", "def get_position(self):\n return (self.x_pos, self.y_pos)", "def xy(self) -> Tuple[int, int]:\n return self._x, self._y", "def get_xy(self):\r\n return self.board.get_xy()", "def _coord(self, x, y):\n gridEdge = 7 # originally 5\n y = gridEdge - y\n cx = 100 * (x - 1) + 50\n cy = 100 * (y - 1) + 50\n r = 20\n return (cx - r, cy - r, cx + r, cy + r)", "def coordinates(self) -> Tuple[int, int]:\n return self.x, self.y", "def get_point(self):\n return self._x, self._y", "def coordinates(self) -> Tuple[float, float, float, float, float]:\n return (self.x, self.y, self.x + self.width, self.y + self.height)", "def xy(self):\n return self.coords.xy", "def world_to_screen(self, x, y):\n return x-self.x, self.h-(y-self.y)", "def getCoord(self, i):\n _x = self.__xpts[i]\n _y = self.__ypts[i]\n return _x, _y", "def getMousePosition(self):\n return (self.mouseData.x, self.mouseData.y)", "def get_pos(self, cx, cy):\n x = self.min_x + cx*(self.size+0.5)\n y = self.min_y + cy*(self.size+0.5)\n return (x,y)", "def _pos(self):\n sw = self.parent.winfo_screenwidth()\n sh = self.parent.winfo_screenheight()\n w = sw * 0.8\n h = sh * 0.8\n x = (sw - w) / 2\n y = (sh - h) / 2\n self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))", "def get_pos_in_pixels(self):\n pixelpos = Vector(self.pos.x * 32, -self.pos.y * 32)\n return pixelpos + self.offset", "def cartesianToScreen(x, y):\n screenx = xCenter + (x * xScale)\n screeny = yCenter + (y * yScale)\n screeny = HEIGHT - screeny\n\n return screenx, screeny", "def mousePos():\n data = display.Display().screen().root.query_pointer()._data\n return data[\"root_x\"], data[\"root_y\"]", "def get_pix_pos(self):\r\n return vec((self.grid_pos[0]*self.app.cell_width)+TOP_BOTTOM_BUFFER//2+self.app.cell_width//2,\r\n (self.grid_pos[1]*self.app.cell_height) +\r\n TOP_BOTTOM_BUFFER//2+self.app.cell_height//2)\r\n # where Pac-Man starts relative to the board\r", "def screen_coordinates(pos):\n\n return [int((pos[0] % screen_width) / px), screen_height - int((pos[1] % screen_height) / px)]", "def get_position(self):\n position = (self.position_x * SPRITE_SIZE, self.position_y * SPRITE_SIZE)\n return position", "def get_aa_pos_on_screen(self,position,frame):\n position=position*3+float(frame)-1\n x,y=self.get_base_pos_on_screen(position)\n y=y+20.0+float(frame)*15.0\n return x,y", "def get(self):\n return (self.x,self.y);", "def coordinates(self):\n return self.xy", "def _posToScreenCoords(self, pos):\n camLim_x = self.camCenter.x - self.camSize / 2\n camLim_y = self.camCenter.y - self.camSize / 2\n\n x = (self.screenSize[0] / self.camSize) * (pos.x - camLim_x)\n y = (self.screenSize[1] / self.camSize) * (pos.y - camLim_y)\n\n # Invert orientation of y\n y = self.screenSize[1] - y\n\n return int(x), int(y)", "def mouse_position(pos):\n x, y = pos\n m = x // SQUARE_SIZE\n n = y // SQUARE_SIZE\n return n, m", "def getBallPos(self) -> (int,int):\n return self.x, self.y", "def img2widgetcoords(self, x,y):\n\t\tif self._i2w_matrix is None: self._calc_matrix()\n\t\treturn self._i2w_matrix.transform_point(x,y)", "def screenPos(self):\n return Point(self._screenPos)", "def screenPos(self):\n return Point(self._screenPos)", "def screenPos(self):\n return Point(self._screenPos)", "def compute_coordinates(self):\n self._x, self._y = self.board.index_to_coordinates(self.index)", "def dimscr(self):\n return (self.startx, self.starty, self.endx - self.startx, self.endy - self.starty)", "def drawOrigin():\n if xMin < 0 < xMax:\n if yMin < 0 < yMax:\n x, y = cartesianToScreen(0, 0)\n\n pygame.draw.line(display, WHITE, (x - 6, y),\n (x + 6, y), 3)\n\n pygame.draw.line(display, WHITE, (x, y - 6),\n (x, y + 6), 3)", "def get(self):\n return self.x, self.y", "def get_pos(self):\n return (self.x/3, 3**0.5*self.y/3, self.r/3)", "def mouse_coords(desktop=False):\n x, y = c_int(0), c_int(0)\n if desktop:\n mouse.SDL_GetGlobalMouseState(byref(x), byref(y))\n else:\n mouse.SDL_GetMouseState(byref(x), byref(y))\n return (int(x.value), int(y.value))", "def xy(self) -> Tuple[float, float]:\n return (self.x, self.y)", "def coords2D(self):\n return (self.x, self.y)", "def get_xy(self, x, y):\r\n\t\treturn self.grid[y, x]", "def draw(self):\n self.screen.blit(self.image, (self.x_pos1, self.y_pos))\n self.screen.blit(self.image, (self.x_pos2, self.y_pos))", "def get_pixels(self):\n\n # pygame board needs to be initialized the first time\n if not self.board:\n self.setup_display(render_gui=False)\n\n self.draw_window(draw_leaderboard=False)\n pixels = pygame.surfarray.array3d(self.window)\n return np.moveaxis(pixels, 1, 0)", "def draw(self, screen, offsets: tuple):\r\n pass", "def get_coords(self):\n xTK = int(jeu.coords(self.rectangle)[0]) # Coordonnées TKinter x1 et y1 du rectangle correspondant à la voiture\n yTK = int(jeu.coords(self.rectangle)[1])\n # On divise par la largeur d'une case et on renvoie les valeurs obtenues sous la forme d'un tuple\n X = xTK//100\n Y = yTK//100\n resultat = [X, Y]\n return resultat", "def get_xy_position(row, col):\n spacing_x = 86 + 11\n spacing_y = 98 + 8\n top_y = 50\n left_x = 50\n return left_x + col * spacing_x, top_y + row * spacing_y", "def draw(self, screen):", "def Canvas_onclick(event):\n global ix, iy\n ix, iy = event.xdata, event.ydata\n print 'x = %f -> i = %d, y = %f' % (ix,ix/0.5*fig.Fulllength, iy)\n\n global coords\n coords = [ix, iy]\n\n return coords", "def coordinates(self):\n return np.array([self.x, self.y])", "def xstagg_xy_coordinates(self):\n\n x_s = self.corner_grid.x0 + np.arange(self.nx+1) * self.dx\n y = self.center_grid.y0 + np.arange(self.ny) * self.dy\n return np.meshgrid(x_s, y)", "def calculate_window_position(self):\n self.x = SQUARE_SIZE * self.col + SQUARE_SIZE // 2\n self.y = SQUARE_SIZE * self.row + SQUARE_SIZE // 2", "def _drawOrigin(self):\n screen_coords = self._posToScreenCoords(Vec2())\n\n if not self._isInScreen(screen_coords):\n return\n\n pygame.draw.line(\n self.screen,\n (150, 150, 150),\n (screen_coords[0] - 3, screen_coords[1]),\n (screen_coords[0] + 3, screen_coords[1]),\n )\n pygame.draw.line(\n self.screen,\n (150, 150, 150),\n (screen_coords[0], screen_coords[1] - 3),\n (screen_coords[0], screen_coords[1] + 3),\n )", "def get_pixel_pos(self):\n\n c = self.get_center()\n\n return Tank.three_by_three(c[0],c[1])", "def xy(event):\n return map(int, event.get_coords())", "def __getxy(x1, y1, x2, y2):\n\t\treturn x1*27+y1*9+x2*3+y2", "def to_coords(self, px, py):\n if px not in range(self.SIZE**2) or py not in range(self.SIZE**2):\n raise IndexError\n return (px // self.SIZE, py // self.SIZE,\n px % self.SIZE, py % self.SIZE)", "def mouse_position(self):\r\n # TODO: add: Now deprecated in favor of pi3d.events\r\n if self.mouse:\r\n return self.mouse.position()\r\n elif self.tkwin:\r\n return self.tkwin.winfo_pointerxy()\r\n else:\r\n return -1, -1", "def follow(self):\n\t\tpos = pygame.mouse.get_pos()\n\t\tself.x = pos[0]\n\t\tself.y = pos[1]\n\t\tself.draw()", "def _get_display_coordinates(self) :\n \n return self._display_coordinates", "def getxy():\n if g.detectable_size:\n x, y = terminalsize.get_terminal_size()\n max_results = y - 4 if y < 54 else 50\n max_results = 1 if y <= 5 else max_results\n\n else:\n x, max_results = Config.CONSOLE_WIDTH.get, Config.MAX_RESULTS.get\n y = max_results + 4\n\n return XYTuple(x, y, max_results)", "def get_pick_position(self):\n x0 = int(self.GetPickX1())\n x1 = int(self.GetPickX2())\n y0 = int(self.GetPickY1())\n y1 = int(self.GetPickY2())\n return x0, y0, x1, y1", "def calculate_screen_position(self):\r\n\r\n character_select_start_y = 604\r\n character_select_end_y = 646\r\n\r\n if self.slotNumber <= 6:\r\n start_y = 585 # 595\r\n end_y = 627 # 637\r\n x_hero_number = self.slotNumber\r\n else:\r\n start_y = 300 # 290\r\n end_y = 342 # 332\r\n x_hero_number = self.slotNumber - 6\r\n\r\n start_x = 249 + (x_hero_number * 192)\r\n end_x = 326 + (x_hero_number * 192)\r\n\r\n self.screenPositionCharacterSelect = {\r\n \"start_x\": start_x,\r\n \"end_x\": end_x,\r\n \"start_y\": character_select_start_y,\r\n \"end_y\": character_select_end_y\r\n }\r\n self.screenPositionTab = {\r\n \"start_x\": start_x,\r\n \"end_x\": end_x,\r\n \"start_y\": start_y,\r\n \"end_y\": end_y\r\n }", "def event_to_x_y(self, event):\n\t\treturn (round(event.x / self.w_to_px), round((HEIGHT - event.y) / self.h_to_px))", "def coordinates(self):", "def tex_coord(x, y, n=4):\n m = 1.0 / n\n dx = x * m\n dy = y * m\n return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m", "def screen_status(self):\n height, width = self.stdscr.getmaxyx()\n i_start = self.y_start * width + self.x_start\n\n return i_start, height, width", "def tex_coord(x, y, n=16):\r\n m = 1.0 / n\r\n dx = x * m\r\n dy = y * m\r\n return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m", "def get_mouse_coordinate(self):\n pos = pygame.mouse.get_pos()\n mov = pygame.mouse.get_rel()\n row = pos[0] // (self.CELL_WIDTH + self.MARGIN)\n col = (pos[1] - self.PANEL_HEIGHT) // (self.CELL_WIDTH + self.MARGIN)\n if mov != (0, 0) and not self.env.not_in_grid(row, col):\n return (row, col)\n return self.markerPos", "def getMachineCoordinates(self):\n return (self.x, self.y, self.z)", "def toScreenCoordinate(self):\n return self.currentLevel.transformToScreenCoordinate(self.position)", "def draw( self ):\n\t\t\t\n\t\ttransposition = lambda point: (point[0] + WINDOW_X, WINDOW_Y - point[1])\n\t\t\t \n\t\tx, y = transposition( self.position.xy )\n\t\tpygame.draw.circle(self.screen, self.color, ( int(x + 0.5), int(y + 0.5) ), self.r)", "def display(self, canvas, x, y, width, height):\n pass", "def dest_xy(self) -> Tuple[int, int]:\n return self.entity.x + self.dx, self.entity.y + self.dy", "def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y", "def pixel2coord(x, y,a,b,xoff,yoff,d,e):\n xp = a * x + b * y + xoff\n yp = d * x + e * y + yoff\n return(xp, yp)", "def draw(self, **kwargs):\r\n self.simulate()\r\n return self.positions.get_value(borrow=False)", "def display_image_coords_to_canvas_coords(display_image_yx_coords):\n\n return [(yx[1], yx[0]) for yx in display_image_yx_coords]", "def tex_coord(x, y, n=16):\n m = 1.0 / n\n dx = x * m\n dy = y * m\n return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m", "def get_grid_position(self):\n tile_size_x = constants.WINDOW_WIDTH / constants.GRID_TILE_LENGTH\n tile_size_y = constants.WINDOW_HEIGHT / constants.GRID_TILE_LENGTH\n grid_x = tile_size_x / self.host.x\n grid_y = tile_size_y / self.host.y\n return grid_x, grid_y", "def draw(self,x=0,y=0):\n\t\tself.center = x,y\n\t\tr\t\t= max(Person.WIDTH>>1,1)\n\t\tself.canvas.coords('node_'+self.identifier, x-r,y-r,x+r,y+r)", "def get_window_info (self):\n \n # g.trace(self.w,self.h,self.x,self.y)\n \n return self.w,self.h,self.x,self.y", "def getCellpos(self, event):\n e = event.widget\n cx, cy = cart(e.canvasx(event.x), e.canvasy(event.y))\n cellx = int(cx) // self.cell_width\n celly = int(cy) // self.cell_height\n return cellx, celly", "def x(self):\r\n return self.position.x", "def get_pos(self) -> tuple:\n return self.rect.center", "def widget2imgcoords(self, x,y):\n\t\tif self._w2i_matrix is None: self._calc_matrix()\n\t\treturn self._w2i_matrix.transform_point(x,y)", "def getCoords(self): # real signature unknown; restored from __doc__\r\n pass", "def getCoords(self):\r\n \r\n return self.coords", "def coords(self):\n return (self.x, self.y, self.z)", "def coords(self):\n return (self.x, self.y, self.z)", "def getPosicion(self):\r\n\t\treturn [self._x, self._y]" ]
[ "0.74338204", "0.7273958", "0.7184046", "0.71780956", "0.7090324", "0.7082188", "0.70618874", "0.7055416", "0.6984277", "0.6973121", "0.6973121", "0.69694185", "0.6959739", "0.69450396", "0.6911599", "0.69074255", "0.6878503", "0.68669933", "0.68637353", "0.6858139", "0.68471473", "0.6841321", "0.6793995", "0.67886984", "0.6783212", "0.67739314", "0.67708623", "0.6767633", "0.6762328", "0.67556316", "0.67504895", "0.67430735", "0.67381597", "0.67195356", "0.67162937", "0.6708801", "0.6697387", "0.6691903", "0.6691846", "0.6678166", "0.6667485", "0.6667485", "0.6667485", "0.6651717", "0.664867", "0.6645272", "0.6628877", "0.6623083", "0.661746", "0.6609742", "0.66091895", "0.6595035", "0.6594053", "0.6578291", "0.6544904", "0.6540029", "0.6533412", "0.65278035", "0.65068114", "0.64982986", "0.64809036", "0.6477005", "0.64538896", "0.6453805", "0.6436583", "0.6431938", "0.6419772", "0.6402823", "0.64003015", "0.6390502", "0.6389549", "0.6380345", "0.63703465", "0.63427705", "0.6341529", "0.6336933", "0.63319945", "0.63319016", "0.63254577", "0.6324565", "0.63238627", "0.63204694", "0.6314108", "0.6313604", "0.63039964", "0.63001597", "0.62974775", "0.62967587", "0.629053", "0.6286", "0.6285316", "0.62742233", "0.627252", "0.6266859", "0.62634075", "0.62612486", "0.62539285", "0.62483245", "0.62407994", "0.62407994", "0.6240312" ]
0.0
-1
Use push so that you can apply some transformations at the same time? 1. Push the matrix from stack 2. Translate to position 3. Rotate it 4. Draw it 5. Pop the matrix
def animate_car(): car_lines = [] tire_lines = [] # move car along # glPushMatrix() offset = [car.position.x, car.position.y, car.position.z] transformation_matrix = push_translation_rotation_matrix(offset, 0) # glTranslated(car.position.x, car.position.y, car.position.z) car_lines.extend(draw_object(loadCar(), transformation_matrix)) # draw car # drawCar() # translate and rotate tires for tire in car.tires: offset = [tire.x, tire.y, tire.z] transformation_matrix = push_translation_rotation_matrix(offset, car.tire_rotation, rot_type="z") tire_lines.extend(draw_object(loadTire(), transformation_matrix)) pop_matrix() pop_matrix() return car_lines, tire_lines
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def push_back(self, *args):\n return _osgAnimation.vectorMatrixKeyframe_push_back(self, *args)", "def __compose_transformation(self):\n s = self.scale\n rotR = self.rotation\n t = self.translation\n T = np.eye(4)\n T[0:3, 3] = t\n R = np.eye(4)\n R[0:3, 0:3] = rotR\n M = T.dot(R)\n if s == 1:\n M = T.dot(R)\n else:\n S = np.eye(4)\n S[0:3, 0:3] = np.diag([s, s, s])\n M = T.dot(R).dot(S)\n return M", "def Translate(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Translate(*args, **kwargs)", "def op_rot(self, args):\n stack_level = 3\n if args != None:\n stack_level = int(args[0])\n if stack_level < 3:\n raise ValueError(\"ERROR: You can't rotate fewer than 3 items!\")\n self.require_stack(stack_level)\n val = self.stack.pop(-stack_level)\n self.stack.append(val)", "def __call__(self, *args):\n return _osgAnimation.RigTransformHardware___call__(self, *args)", "def drawMatrix(self,screen,pos):\n bp = pos # base position bp\n namesurface = myfont.render(self.parentname, False, self.parentcolour)\n screen.blit(namesurface,(bp[0]-50,bp[1] -60),) \n tempOffset = namesurface.get_width()\n namesurface = myfont.render(self.name, False, self.colour)\n screen.blit(namesurface,(bp[0]-40 ,bp[1]-30),) \n size = 10\n separationx = 12\n separationy = 20\n # Cycle through array of inputs\n for i in range(self.dimensions[0]):\n # Create red - green colour based on array\n temp_colour = (int((1-self.scan[i])*240),int(self.scan[i]*240),0)\n # Draw square that is slightly offset of previous square\n pygame.draw.rect(screen,temp_colour ,(bp[0] - separationx *int(i / len(self.inputdistance)),\n bp[1] - separationx*(i%len(self.inputdistance)) + 3*separationx,size,size))\n # Calculate intermediate decision array\n temp_vector = self.scan \n # Repeat\n for j, bs in enumerate(self.bias):\n temp_vector = np.add(temp_vector.dot(self.weights[j]), bs)\n for i in range(temp_vector.shape[0]):\n temp_colour = (int(max(min((1-temp_vector[i])*240,240),0)),int(max(min(temp_vector[i]*240,240),0)),0)\n pygame.draw.rect(screen,temp_colour ,(bp[0] + (j+1)*separationy,bp[1] + separationx*i,size,size))", "def mrotate(self):\n result_matrix = [[0 for col in range(len(self.matrix[0]))] for row in range(len(self.matrix))]\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[0])):\n result_matrix[i][j] = self.matrix[i][len(self.matrix[0]) - 1 - j]\n # left turn -> result_matrix[i][j] = self.matrix[len(self.matrix) - 1 - i][j]\n self.matrix = result_matrix\n pass", "def compose_mat(rot):\n trans_mat = oMa.MTransformationMatrix()\n trans_mat.setRotation(rot)\n\n mat = trans_mat.asMatrix()\n\n return mat", "def rotate(Y,Mat):\r\n # Converting angle into radians \r\n Y = radians(Y)\r\n # RM is the Rotation Matrix (3 X 3)\r\n RM=[[cos(Y),-sin(Y),0],[sin(Y),cos(Y),0],[0,0,1]]\r\n Rotate_Matrix = Multiply(RM,Mat)\r\n # Rotate_Matrix[0][0] is the updated x coordinate\r\n # Rotate_Matrix[1][0] is the updated y coordinate\r\n return Rotate_Matrix[0][0],Rotate_Matrix[1][0],Rotate_Matrix[2][0]", "def push_up(self, event):\n self.transpose()\n self.stack()\n self.merge()\n self.transpose()\n\n if self.any_empty_tiles():\n self.add_two()\n\n self.update_grid()\n self.is_game_finished()", "def snapshot(self,):\n self.stack.append((self.pos,self.dataptr))", "def setTransform(self, first_move):\n #Helper function composes two functions\n def compose(f,g):\n return lambda x: f(g(x))\n \n x , y = self.getCoordinates(first_move)\n size = self.size\n \n parity = size+1 # For even numbered boards, vertical and horizontal\n midline = size/2 - 0.5*(parity%2) # lines of symmetry are halfway between integer values\n \n transformations = []\n if x > midline: # If position lies below horizontal\n reflectX = lambda (x,y): (int(midline - (x - midline)), y)\n x , y = reflectX( (x,y) )\n transformations.append(reflectX)\n \n \n if y > midline: # If position lies to the right of vertical\n reflectY = lambda (x,y): (x, int(midline - (y - midline)) )\n x , y = reflectY( (x,y) )\n transformations.append(reflectY)\n \n if y > x: # If position lies below main 'positive' diagonal\n reflectDiagonal = lambda (x,y): (y,x)\n transformations.append(reflectDiagonal)\n \n for k, trans in enumerate(transformations):\n self.transform = compose(trans, self.transform)\n self.inverseTF = compose(transformations[-(k+1) ], self.inverseTF)", "def push(self):\n self.aut_stack.append(self.aut_to_push.pop())", "def test_matrix22(gridsize=50):\n\n v1 = vec2(3,0)\n v2 = vec2(0,3)\n\n #rotate 45 degrees \n m22 = matrix22()\n m22.from_euler(45)\n\n # make a second matrix, also 45 degrees, should give us 90 total \n m22_2 = matrix22()\n m22_2.from_euler(45)\n m22 = m22_2 * m22\n\n # mutliply a vector by the matrix \n v3 = m22 * v2 \n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n \n pts = [ (0,0), (0,1), (2,1), (0,2) ]\n #bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n\n vecs = [v2,v3]\n bloody_simple_2drender('2d_rotation.png', vecs=vecs, gridsize=50, pfb=fb)\n\n #rotate the points by matrix multiplication \n pts = m22.batch_mult_pts(pts) \n bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n fb.save('2d_rotation.png')", "def push_down(self, event):\n self.transpose()\n self.reverse()\n self.stack()\n self.merge()\n self.reverse()\n self.transpose()\n\n if self.any_empty_tiles():\n self.add_two()\n\n self.update_grid()\n self.is_game_finished()", "def rotate(self, matrix: list[list[int]]) -> None:", "def translate(self, x=0, y=0, z=0):\n\t\ttranslation = np.identity(4)\n\t\ttranslation[0, 3] += x\n\t\ttranslation[1, 3] += y\n\t\ttranslation[2, 3] += z\n\t\t\n\t\tself.matrix = np.matmul(self.matrix, translation)", "def rotate(self, matrix):\n n = len(matrix)\n #转置\n for i in range(n):\n for j in range(i+1,n):\n matrix[i][j],matrix[j][i] = matrix[j][i],matrix[i][j]\n #镜像\n mid = n//2\n for i in range(n):\n for j in range(mid):\n matrix[i][j],matrix[i][n-j-1] = matrix[i][n-j-1],matrix[i][j]", "def BackTransform(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD3_BackTransform(self, *args)", "def BackTransform(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_BackTransform(self, *args)", "def __call__(self, *args):\n return _osgAnimation.RigTransform___call__(self, *args)", "def rotate(self, matrix: list) -> None:\n for i in range(len(matrix)):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n # matrix[i].reverse()\n print(matrix)\n for i in range(len(matrix)):\n matrix[i].reverse()\n print(matrix)", "def translate(dx,dy,Mat):\r\n # MT is the Translation (3 X 3) Matrix\r\n MT=[[1,0,dx],[0,1,dy],[0,0,1]]\r\n Translated= Multiply(MT,Mat)\r\n # Translated[0][0] is the updated x coordinate\r\n # Translated[1][0] is the updated y coordinate\r\n return Translated[0][0],Translated[1][0],Translated[2][0]", "def rotate(self, matrix):\n # # 解法1. 使用zip函数的解压缩,将matrix按列打包\n # for i, conlum in enumerate(zip(*matrix)):\n # matrix[i] = list(conlum)[::-1]\n # return matrix\n\n\n # 解法2. 规规矩矩的inplace替换\n # 首先 对每一个元素横竖坐标互换,相当于沿左上到右下的对角线翻转, 这样每一行再逆序就是最终结果\n for i in range(len(matrix[0])):\n for j in range(i,len(matrix)):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n \n # 下面每一行逆序,python有更简便的方法\n for i in range(len(matrix)):\n matrix[i] = matrix[i][::-1]", "def rotate(self, matrix):\r\n matrix[:] = [list(row)[::-1] for row in zip(*matrix)]\r\n print(matrix)", "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "def transform(self, R, t, scale = 1):\n\n # Build 4-by-4 projection matrix from args ----------------------------\n # This is what we are doing internally:\n # Proj = np.r_[ scale * np.c_[R, t], [[0, 0, 0, 1]] ]\n # InvProj = np.r_[ scale * np.c_[R.T, -np.dot(R.T, t)], [[0,0,0,scale]] ]\n Proj = tf_format.tf_format('4x4', R, t)\n Proj[:-1,:] *= scale\n InvProj = tf_format.tf_format('i4x4', R, t) * scale\n \n \n # Apply transformation to pts3D ---------------------------------------\n if self.pts3D is not None and self.pts3D.shape[1] > 0:\n # Use homogeneous coords\n pts3D = np.r_[self.pts3D, np.ones((1, self.pts3D.shape[1]))]\n pts3D = np.dot(Proj, pts3D)\n self.pts3D = pts3D[:3, :]\n\n # Apply transformation to cameras -------------------------------------\n # Camera poses are stored using camera-to-world transformations, we \n # need to invert the projection matrix for this to work --> \n # we use InvProj\n\n cposes = self.cam_poses\n for i in range(cposes.shape[1]):\n\n # Extract camera projection matrix\n p_cam = tf_format.tf_format('4x4', cposes[:, i])\n\n # Transform camera projection matrix\n new_p_cam = np.dot(p_cam, InvProj)\n \n # Make sure it's a true rotation!\n [u, s, vT] = np.linalg.svd(new_p_cam[:3,:3])\n cposes[:3, i] = tf_format.rodrigues( np.dot(u,vT) ).ravel()\n cposes[3:, i] = new_p_cam[:3, 3]\n\n self.cam_poses = cposes", "def rotate(self, matrix):\n # matrix[:] = zip(*matrix[::-1])\n n = len(matrix)\n # 水平翻转\n for i in range(n // 2):\n for j in range(n):\n matrix[i][j], matrix[n - i - 1][j] = matrix[n - i - 1][j], matrix[i][j]\n # 主对角线翻转\n for i in range(n):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]", "def rotate(self, matrix) -> None:\n c = len(matrix)\n matrix[:] = [[matrix[c-i-1][j] for i in range(c)] for j in range(c)]", "def botStack_x(self):\r\n self.x_stack=self.img.shape[2]-1\r\n \r\n self.pixmap5=self.drawPixmap(\"yz\")\r\n self.lbl5.setPixmap(self.pixmap5)\r\n self.pixmap6= self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)\r\n self.x_stack_lbl.setText(str(self.x_stack+1) + '/' + str(self.img.shape[2]))", "def rotateZ(self, *args, **kwargs):\n ...", "def calibrateStacks(self, x_stack, y_stack, z_stack):\r\n self.x_stack= x_stack\r\n self.y_stack=y_stack\r\n self.z_stack=z_stack\r\n \r\n self.resetImages()", "def push(self, op):\n self.top += 1\n self.stack.append(op)", "def push_back(self, *args):\n return _osgAnimation.vectorQuatKeyframe_push_back(self, *args)", "def botStack(self):\r\n\r\n self.z_stack=self.img.shape[0]-1\r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaleds(500,500)\r\n self.pixmap= self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def push(self,transition):\n \n input_to_buffer = transpose_list(transition)\n \n for item in input_to_buffer:\n self.deque.append(item)", "def push(scene):\n if _stack:\n spyral.event.handle('director.scene.exit', _scene = _stack[-1])\n old = _stack[-1]\n spyral.sprite._switch_scene()\n _stack.append(scene)\n spyral.event.handle('director.scene.enter', _scene = scene)\n pygame.event.get()", "def stack(tensor_list, axis):\n return stack_op(tensor_list, axis)", "def quick_rot(line):\n\treturn zip(*reversed(create_matrix(line)))", "def transform(self, x, y):\n # return self.transform_2D(x, y)\n return self.transform_perspective(x, y)", "def push_right(self, event):\n self.reverse()\n self.stack()\n self.merge()\n self.reverse()\n\n if self.any_empty_tiles():\n self.add_two()\n\n self.update_grid()\n self.is_game_finished()", "def transform(self, mat: TxMatrix) -> None:\n self.rotation = self.rotation - int(0x10000 * mat.angle / math.pi / 2) & 0xFFFF\n if mat.flipped:\n self.flip_y = not self.flip_y\n self.rotation = -self.rotation & 0xFFFF", "def draw(self,matrix):\n\n # Create a PIL image from the matrix \n img = self._img.fromarray(np.uint8(matrix))\n\n # Draw it.\n self._canvas.SetImage(img,0,0)\n self._canvas = self._matrix.SwapOnVSync(self._canvas)", "def topStack(self):\r\n\r\n self.z_stack=0\r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaled(500,500)\r\n self.pixmap= self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def Reproject(points,tm, rm):\n proj_cuboid = np.array(points)\n\n rmat = np.identity(3)\n rmat[0:2] = rm\n tmat = np.identity(3)\n tmat[0:2] = tm\n\n new_cuboid = np.matmul(\n rmat, np.vstack((proj_cuboid.T, np.ones(len(points)))))\n new_cuboid = np.matmul(tmat, new_cuboid)\n new_cuboid = new_cuboid[0:2].T\n\n return new_cuboid", "def run(self):\n\n # User input loop\n global vertices\n global C\n\n repeat = 1\n while repeat:\n # asking command\n command, parameters = matrix_transformation.input_command()\n result = vertices\n\n # calling translate function\n if \"translate\" in command:\n try:\n dx = parameters[0]\n except:\n dx = 0\n try:\n dy = parameters[1]\n except:\n dy = 0\n try:\n dz = parameters[2]\n except:\n dz = 0\n\n transformation = matrix_transformation.translate(dx=dx, dy=dy, dz=dz, dim=dimension)\n result = matrix_transformation.multiplication(transformation, vertices)\n\n # calling dilate function\n elif \"dilate\" in command:\n\n try:\n scale = parameters[0]\n except:\n scale = 1\n\n transformation = matrix_transformation.dilate(scale=scale, dim=dimension)\n result = matrix_transformation.multiplication(transformation, vertices)\n\n # calling rotate function\n elif \"rotate\" in command:\n\n try:\n degree = parameters[0]\n except:\n degree = 0\n try:\n pivot_x = parameters[1]\n except:\n pivot_x = 0\n try:\n pivot_y = parameters[2]\n except:\n pivot_y = 0\n try:\n pivot_z = parameters[3]\n except:\n pivot_z = 0\n\n try:\n axis = parameters[4]\n except:\n axis = 'x'\n\n transformation = matrix_transformation.rotate(degree=degree, pivot_x=pivot_x, pivot_y=pivot_y, pivot_z=pivot_z, dim=dimension, axis=axis)\n result = matrix_transformation.multiplication(transformation, vertices)\n\n # calling reflect function\n elif \"reflect\" in command:\n\n try:\n cond = parameters[0]\n except:\n cond = \"(0,0)\"\n\n transformation = matrix_transformation.reflect(cond=cond, dim=dimension)\n result = matrix_transformation.multiplication(transformation, vertices)\n\n # calling shear function\n elif \"shear\" in command:\n\n try:\n axis = parameters[0]\n except:\n axis = 'x'\n try:\n scale = parameters[1]\n except:\n scale = 1\n\n transformation = matrix_transformation.shear(axis=axis, scale=scale, dim=dimension)\n result = matrix_transformation.multiplication(transformation, vertices)\n\n # calling stretch function\n elif \"stretch\" in command:\n\n try:\n axis = parameters[0]\n except:\n axis = 'x'\n try:\n scale = parameters[1]\n except:\n scale = 1\n\n transformation = matrix_transformation.stretch(axis=axis, scale=scale, dim=dimension)\n result = matrix_transformation.multiplication(transformation, vertices)\n\n # calling custom function\n elif \"custom\" in command:\n try:\n value_list = parameters[0:]\n except:\n value_list = []\n\n transformation = matrix_transformation.custom(value_list, dim=dimension)\n result = matrix_transformation.multiplication(transformation, vertices)\n\n # calling multiple function\n elif \"multiple\" in command:\n try:\n repeat = int(parameters[0]) + repeat\n except:\n repeat = 1 + repeat\n print(\"please re input your command\")\n result = vertices\n\n # reset the vertices value\n elif \"reset\" in command:\n result = vertices_ori\n\n # exiting command\n elif \"exit\" in command:\n C = 0\n print(\"exitting...\")\n glutLeaveMainLoop() #terminating the window\n exit(1)\n\n repeat -= 1\n\n # animation drawing new vertices\n if \"multiple\" not in command:\n try:\n frame = 200\n delta = numpy.subtract(result, vertices)\n transformation_split = (1 / frame) * delta\n\n for _ in range(0, frame):\n vertices = vertices + transformation_split\n sleep((1/frame))\n except:\n pass\n\n print(\"Process successfully executed...\")", "def transform_mat(R, t):\n return torch.cat([F.pad(R, [0, 0, 0, 1]), F.pad(t, [0, 0, 0, 1], value=1)], dim=2)", "def transform_mat(R, t):\n return torch.cat([F.pad(R, [0, 0, 0, 1]), F.pad(t, [0, 0, 0, 1], value=1)], dim=2)", "def rotate(self):\n\n self.pins = self.pins[1:] + list(self.pins[0])\n self.mapping = self.mapping[1:] + list(self.mapping[0])", "def get_transformation_matrix(self, fromFrame, toFrame):\n fromIndex = self.frameNames.index(fromFrame)\n toIndex = self.frameNames.index(toFrame)\n #return get_transformation_matrix(self.frameStack, fromIndex, toIndex)\n return self._get_transformation_matrix_with_indices(fromIndex, toIndex)", "def extractMatrix(self,groups):\n a = float(groups[0])\n b = float(groups[1])\n c = float(groups[2])\n d = float(groups[3])\n e = float(groups[4])\n f = float(groups[5])\n self.matrix=[[a,c,e], [b,d,f]]\n self.translateX = e\n self.translateY = f\n self.scaleX = math.sqrt(a**2+c**2)\n self.scaleY = math.sqrt(b**2+d**2)\n self.rotate = math.atan2(b,d)", "def generate(self):\n inside = self.crystal.is_inside(self.x,self.y,self.z)\n X = np.vstack((self.x[inside],self.y[inside],self.z[inside]))\n return self.rot.rotate(X)", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n left_top = [0, 0]\n right_top = [0, n-1]\n left_down = [n-1, 0]\n right_down = [n-1, n-1]\n \n for level in range(n//2):\n for step in range(n-1-2*level):\n matrix[left_top[0]+level][left_top[1]+level+step], \\\n matrix[right_top[0]+level+step][right_top[1]-level], \\\n matrix[right_down[0]-level][right_down[1]-level-step], \\\n matrix[left_down[0]-level-step][left_down[1]+level] = \\\n matrix[left_down[0]-level-step][left_down[1]+level], \\\n matrix[left_top[0]+level][left_top[1]+level+step], \\\n matrix[right_top[0]+level+step][right_top[1]-level], \\\n matrix[right_down[0]-level][right_down[1]-level-step]", "def rotate(self, matrix: List[List[int]]) -> None:\n height=len(matrix)\n for h in range(math.ceil(height/2)):\n for i in range(h,height-h-1):\n # print((h,i), (height-i-1,h))\n temp=matrix[h][i]\n matrix[h][i] = matrix[height-i-1][h]\n matrix[height-i-1][h] = matrix[height-h-1][height-i-1]\n matrix[height-h-1][height-i-1] = matrix[i][height-h-1]\n matrix[i][height-h-1] = temp", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for i in range(n // 2):\n top = matrix[i][i:n-i]\n\n right = []\n for j in range(i, n-i):\n right.append(matrix[j][n-i-1])\n\n bottom = [] \n for j in range(i, n-i):\n bottom.append(matrix[n-i-1][j])\n\n left = []\n for j in range(i, n-i):\n left.append(matrix[j][i])\n\n # print('i:', i, 'top:', top, 'right:', right, 'bottom:', bottom, 'left:', left)\n\n # top -> right\n for j in range(i, n-i):\n matrix[j][n-i-1] = top[j-i]\n # right -> bottom\n for j in range(i, n-i):\n matrix[n-i-1][j] = right[n-i-j-1]\n # bottom -> left\n for j in range(i, n-i):\n matrix[j][i] = bottom[j-i]\n # left -> top\n for j in range(i, n-i):\n matrix[i][j] = left[n-i-j-1]", "def rotate(self, matrix: List[List[int]]) -> None:\n flip(transpose(matrix))", "def rotate_matrix(self, mat):\r\n N=3\r\n for x in range(0, int(N / 2)):\r\n for y in range(x, N-x-1):\r\n temp = mat[x][y]\r\n mat[x][y] = mat[y][N-1-x]\r\n mat[y][N-1-x] = mat[N-1-x][N-1-y]\r\n mat[N-1-x][N-1-y] = mat[N-1-y][x]\r\n mat[N-1-y][x] = temp\r\n return mat", "def transformation_matrix(self):\n t = np.array([[0.0], [0.0], [0.0]])\n Rt = np.hstack([self.rotation_matrix, t])\n return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])", "def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))", "def rotate_matrix(matrix):\n \n n = len(matrix)\n if n == 0 or len(matrix[0]) != n:\n return False\n\n for layer in range(n // 2):\n first = layer\n last = n - 1 - layer\n for i in range(first, last):\n offset = i - first\n \n #save top\n top = matrix[first][i]\n\n # left -> top\n matrix[first][i] = matrix[last-offset][first]\n\n #bottom -> left\n matrix[last-offset][first] = matrix[last][last-offset]\n\n #right -> bottom\n matrix[last][last-offset] = matrix[i][last]\n\n #top -> right\n matrix[i][last] = top\n\n return True", "def extractTranslate(self,groups):\n self.translateX = float(groups[0])\n self.translateY = float(groups[0])\n if len(groups) == 2 and groups[1]:\n self.translateY = float(groups[1])\n self.matrix = [[1.0, 0.0, self.translateX], \\\n [0.0, 1.0, self.translateY]]", "def Translate(shift_x, shift_y): \n shifted = numpy.matrix([[1.0, 0.0, 1.0 ],\n [0.0, 1.0, 1.0 ],\n [shift_x, shift_y, 1.0 ]]) \n return shifted", "def preTransform(self, R: Rotation) -> None:\n for i,m in enumerate(self.milestones):\n assert len(m) == 18\n mq = m[:9]\n mv = m[9:]\n self.milestones[i] = so3.mul(R,mq) + so3.mul(R,mv)", "def on_draw(self):\n # Clearing the buffers\n self.clear()\n self.set3d()\n # Makes it so color can be added\n glColor3d(1, 1, 1)\n\n self.push(self.player.pos, self.player.rot)\n self.model.draw()\n glPopMatrix()\n self.model.process_queue_slowly()\n\n # Draws the crosshairs on the screen\n self.set2d()\n self.draw_position_label()\n self.draw_reticle()", "def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here", "def rebuildMatrixCache(self):\n self.converterYUR = Mat4.convertMat(CSYupRight, self.lens.getCoordinateSystem()) * self.lens.getProjectionMat()", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def rotate(self, matrix: List[List[int]]) -> None:\n \n n = len(matrix) \n \n def rotateLayer(n,scaler):\n \n for i in range(n-1):\n \n r0 = 0 +scaler\n c0 = 0 +i +scaler\n\n r1 = 0 +i +scaler\n c1 = n-1 +scaler\n\n r2 = n-1 +scaler\n c2 = n-1 -i +scaler\n\n r3 = n-1 -i +scaler\n c3 = 0 +scaler\n \n temp = matrix[r3][c3]\n matrix[r3][c3] = matrix[r2][c2]\n matrix[r2][c2] = matrix[r1][c1]\n matrix[r1][c1] = matrix[r0][c0]\n matrix[r0][c0] = temp\n\n scaler = 0\n \n for i in range(n,1,-2):\n \n rotateLayer(i,scaler)\n scaler += 1", "def topStack_x(self):\r\n self.x_stack=0\r\n \r\n self.pixmap5=self.drawPixmap(\"yz\")\r\n self.lbl5.setPixmap(self.pixmap5)\r\n self.pixmap6= self.writeEdge(\"yz\")\r\n self.lbl6.setPixmap(self.pixmap6)\r\n self.x_stack_lbl.setText(str(self.x_stack+1) + '/' + str(self.img.shape[2]))", "def draw_next_transform(self) -> List[Callable]:\n # Sample parameters for each transformation\n angle = random.randint(-self.max_angle, self.max_angle)\n x_shift = random.uniform(-self.max_x_shift, self.max_x_shift)\n y_shift = random.uniform(-self.max_y_shift, self.max_y_shift)\n contrast = random.uniform(self.min_constrast, self.max_constrast)\n brightness = random.uniform(self.min_brightness, self.max_brightness)\n horizontal_flip = ImageTransformationBase._toss_fair_coin()\n # Returns the corresponding operations\n if random.random() < self.probability_transformation:\n ops = [self.rotate(angle),\n self.translateX(x_shift),\n self.translateY(y_shift)]\n if horizontal_flip:\n ops.append(self.horizontal_flip())\n if self.for_segmentation_input_maps:\n return ops\n ops.extend([self.adjust_contrast(contrast),\n self.adjust_brightness(brightness)])\n else:\n ops = []\n return ops", "def push_down (grid):\n grid_new=[]\n #copy grid into new grid\n grid_new=copy.deepcopy(grid)\n #invert the original grid to get a new grid\n grid_new.reverse()\n #use push function to merge the rows\n grid_2=push_up (grid_new)\n #reverse new grid\n grid_2.reverse()\n return grid_2", "def push_transform(self, data, *args, **kwargs):\n lazy_eval = kwargs.get(\"lazy\", False)\n transform_info = self.get_transform_info()\n do_transform = transform_info.get(TraceKeys.DO_TRANSFORM, True)\n kwargs = kwargs or {}\n replace = kwargs.pop(\"replace\", False) # whether to rewrite the most recently pushed transform info\n if replace and get_track_meta() and isinstance(data, MetaTensor):\n if not lazy_eval:\n xform = self.pop_transform(data, check=False) if do_transform else {}\n meta_obj = self.push_transform(data, orig_size=xform.get(TraceKeys.ORIG_SIZE), extra_info=xform)\n return data.copy_meta_from(meta_obj)\n if do_transform:\n xform = data.pending_operations.pop()\n extra = xform.copy()\n xform.update(transform_info)\n else: # lazy, replace=True, do_transform=False\n xform, extra = transform_info, {}\n meta_obj = self.push_transform(data, transform_info=xform, lazy=True, extra_info=extra)\n return data.copy_meta_from(meta_obj)\n kwargs[\"lazy\"] = lazy_eval\n if \"transform_info\" in kwargs and isinstance(kwargs[\"transform_info\"], dict):\n kwargs[\"transform_info\"].update(transform_info)\n else:\n kwargs[\"transform_info\"] = transform_info\n meta_obj = TraceableTransform.track_transform_meta(data, *args, **kwargs)\n return data.copy_meta_from(meta_obj) if isinstance(data, MetaTensor) else data", "def preTransform(self,R: Rotation) -> None:\n for i,m in enumerate(self.milestones):\n self.milestones[i] = so3.mul(R,m)", "def rotateXOut(self):\n MV = self.MV\n MV[:3, 2] = 1, 0, 0 # 3rd col is normal vector, make it point along x axis\n # set top left and top middle values to zero:\n MV[0, 0] = 0\n MV[0, 1] = 0\n b = MV[2, 0] # grab bottom left value\n a = np.sqrt(1 - b**2) # calc new complementary value to get normalized vectors\n #if MV[1, 0] < 0:\n # a = -a # keep a -ve, reduce jumping around of axes\n MV[1, 0] = a\n MV[2, 1] = a\n MV[1, 1] = -b # needs to be -ve of MV[2, 0]\n self.MV = MV", "def rotateXOut(self):\n MV = self.MV\n MV[:3, 2] = 1, 0, 0 # 3rd col is normal vector, make it point along x axis\n # set top left and top middle values to zero:\n MV[0, 0] = 0\n MV[0, 1] = 0\n b = MV[2, 0] # grab bottom left value\n a = np.sqrt(1 - b**2) # calc new complementary value to get normalized vectors\n #if MV[1, 0] < 0:\n # a = -a # keep a -ve, reduce jumping around of axes\n MV[1, 0] = a\n MV[2, 1] = a\n MV[1, 1] = -b # needs to be -ve of MV[2, 0]\n self.MV = MV", "def matrix_rotate(matrix, N):\n\n for layer in range(N/2):\n last_col = N - 1 - layer\n for col_idx in range(layer, last_col):\n\t # copy the top value\n\t top = matrix[layer][col_idx]\n\n\t # rotate the left to the top\n matrix[layer][col_idx] = matrix[N-1-col_idx][layer]\n\n\t # rotate the bottom to the left\n\t matrix[N-1-col_idx][layer] = matrix[N-1-layer][N-1-col_idx]\n\n\t # rotate the right to the bottom\n\t matrix[N-1-layer][N-1-col_idx] = matrix[col_idx][N-1-layer]\n\n\t # rotate the top to the right\n\t matrix[col_idx][N-1-layer] = top\n\n return matrix", "def rotate(self, matrix: List[List[int]]) -> None:\n for r in range(len(matrix)):\n for c in range(r):\n matrix[r][c], matrix[c][r] = matrix[c][r], matrix[r][c]\n for row in matrix:\n row.reverse()", "def __call__(self, *args):\n return _osgAnimation.RigTransformSoftware___call__(self, *args)", "def rotateZUp(self):\n MV = self.MV\n MV[:3, 1] = 0, 0, 1 # 2nd col is up vector, make it point along z axis\n # set bottom left and bottom right z values to zero:\n MV[2, 0] = 0\n MV[2, 2] = 0\n a = MV[0, 0] # grab top left value\n b = np.sqrt(1 - a**2) # calc new complementary value to get normalized vectors\n if MV[1, 0] < 0:\n b = -b # keep b -ve, reduce jumping around of axes\n MV[1, 0] = b\n MV[0, 2] = b\n MV[1, 2] = -a # needs to be -ve of MV[0, 0]\n self.MV = MV", "def rotateZUp(self):\n MV = self.MV\n MV[:3, 1] = 0, 0, 1 # 2nd col is up vector, make it point along z axis\n # set bottom left and bottom right z values to zero:\n MV[2, 0] = 0\n MV[2, 2] = 0\n a = MV[0, 0] # grab top left value\n b = np.sqrt(1 - a**2) # calc new complementary value to get normalized vectors\n if MV[1, 0] < 0:\n b = -b # keep b -ve, reduce jumping around of axes\n MV[1, 0] = b\n MV[0, 2] = b\n MV[1, 2] = -a # needs to be -ve of MV[0, 0]\n self.MV = MV", "def push(self, data):\n self.STACK.appendleft(data)\n self.SP = self.STACK[0]", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix) # 行\n\n # 以x=y为轴翻转\n # [[1,2,3],\n # [4,5,6],\n # [7,8,9]]\n # 变为\n # [1 4 7]\n # [2 5 8]\n # [3 6 9]\n for i in range(n):\n for j in range(i, n):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n # 以中点为轴翻转\n for i in range(n):\n for j in range(n // 2):\n matrix[i][j], matrix[i][n - j - 1] = matrix[i][n - j - 1], \\\n matrix[i][j]\n\n # 非原地修改写法,先上下翻转,再以x=y为轴复制对应数字\n # n = len(matrix)\n # r = list(zip(*matrix[::-1]))\n # for i in range(n):\n # for j in range(n):\n # matrix[i][j] = r[i][j]", "def reset_position(self):\n self.translate_to_point_O()\n\n # inverse rotation:\n rotation_matrix = np.stack(\n (self.pcs.i_hat, self.pcs.j_hat, self.pcs.k_hat), axis=0\n )\n\n self.rotate(rotation_matrix)", "def get_transformation_matrix(theta=45):\n\n theta = theta/360 * 2 * np.pi # in radians\n hx = np.cos(theta)\n sy = np.sin(theta)\n\n S = np.array([[1, hx, 0],\n [0, sy, 0],\n [0, 0, 1]])\n #S_inv = np.linalg.inv(S)\n #old_coords = np.array([[2, 2, 1], [6, 6, 1]]).T\n #new_coords = np.matmul(S, old_coords)\n #recovered_coords = np.matmul(S_inv, new_coords)\n #print('new coords: ', new_coords)\n #print('recovered coords: ', recovered_coords)\n return S", "def rotate(self, matrix: List[List[int]]) -> None:\n length = len(matrix)\n for row in range(length//2):\n for col in range(row, length-row-1):\n # matrix[row][col], matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row]\n matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row], matrix[row][col] = matrix[row][col], matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row]\n return", "def rotate(self, matrix: List[List[int]]) -> None:\n # 对角线对称\n num_row = len(matrix)\n num_col = len(matrix[0])\n for i in range(num_row):\n for j in range(i, num_col):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n # 列对称\n for i in range(num_row):\n for j in range(num_col // 2):\n matrix[i][j], matrix[i][num_col-1-j] = matrix[i][num_col-1-j], matrix[i][j]\n return matrix", "def rotateElement(element):\n # Make sure that the rotation will not place the tetromino out of map at the top\n for sq in element:\n if sq.pos[1] <= 0:\n return\n \n for sq in element:\n screen.blit(background, sq.pos, (150, 0, 26, 26))\n \n # Also, make sure that element stays within the map after rotation\n tmpArr = []\n for sq in element:\n sq.rotate()\n tmpArr.append(sq.pos.left)\n\n leftmostElement = min(tmpArr)\n rightmostElement = max(tmpArr)\n if leftmostElement < 150:\n xShift = 150 - leftmostElement\n elif rightmostElement > 425:\n xShift = 425 - rightmostElement\n else:\n xShift = 0\n \n for sq in element:\n sq.pos.left += xShift\n screen.blit(sq.image, sq.pos)", "def convert_stack(g, op, blcok):\n\n x = op.input(\"X\")\n all_inputs = []\n for inp in x:\n all_inputs.append(g.get_node(inp))\n axis = op.attr(\"axis\")\n out = _op.stack(all_inputs, axis)\n g.add_node(op.output(\"Y\")[0], out)", "def grow_stack_arm(top):\n if top is not None and top.name in ['sandwichtop', 'sandwichtop_no_label']:\n _bot = find_sandwich_bottom(top)\n if _bot is None:\n return\n if top.ey > 0:\n top.reset_y()\n _ty = top.spr.get_xy()[1]\n _th = top.spr.get_dimensions()[1]\n _by = _bot.spr.get_xy()[1]\n _dy = _by - (_ty + _th)\n if _dy > 0:\n top.expand_in_y(_dy / top.scale)\n top.refresh()", "def push_right (grid): \r\n \r\n for row in range (4):\r\n section = []\r\n for col in range (4):\r\n section.append(grid[row][3-col])\r\n add(section) \r\n for i in range (4):\r\n grid[row][i] = section[3-i]", "def Reproject(points, tm, rm):\n proj_cuboid = np.array(points)\n\n rmat = np.identity(3)\n rmat[0:2] = rm\n tmat = np.identity(3)\n tmat[0:2] = tm\n\n new_cuboid = np.matmul(\n rmat, np.vstack((proj_cuboid.T, np.ones(len(points)))))\n new_cuboid = np.matmul(tmat, new_cuboid)\n new_cuboid = new_cuboid[0:2].T\n\n return new_cuboid", "def push_up (grid):\r\n for a in range (4): \r\n for i in range(3,0,-1): \r\n for j in range(4): \r\n if grid[i-1][j]==0: \r\n grid[i-1][j]=grid[i][j] \r\n grid[i][j]=0\r\n #joining like numbers \r\n for i in range(3): \r\n for j in range(4): \r\n if grid[i][j]==grid[i+1][j]: \r\n grid[i][j]=(grid[i][j])*2\r\n grid[i+1][j]=0\r\n #pafter adding the numbers continue to move them \r\n for a in range (4): \r\n for i in range(3,0,-1): \r\n for j in range(4): \r\n if grid[i-1][j]==0: \r\n grid[i-1][j]=grid[i][j] \r\n grid[i][j]=0", "def set_matrix(self):\n theta1 = -90\n theta2 = 105\n theta3 = 180\n\n if self.number > 8:\n theta2 = 75\n\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glPushMatrix()\n glRotatef(theta2, 0.0, 1.0, 0.0)\n glRotatef(theta1, 1.0, 0.0, 0.0)\n glRotatef(theta3, 0.0, 0.0, 1.0)\n matrix = glGetDoublev(GL_MODELVIEW_MATRIX)\n glPopMatrix()\n glPopMatrix()\n return matrix", "def push(self,x,y):\n\t\tself.x_sum += np.sum(x,axis=0)[:,np.newaxis]\n\t\tself.y_sum += np.sum(y,axis=0)[:,np.newaxis]\n\t\tself.xy_sum += np.matmul(np.transpose(x),y)\n\t\tself.xx_sum += np.matmul(np.transpose(x),x)\n\t\tself.yy_sum += np.matmul(np.transpose(y),y)\n\t\tself.n += np.shape(x)[0]", "def enfaceStack(stack):\n enface=np.swapaxes(stack,0,1)\n enface_downsize=np.empty((enface.shape[0],256,256))\n # writeText('\\n')\n for i, frame in enumerate(enface):\n enface_downsize[i] = transform.resize(frame,(256,256),order=3,mode='reflect')\n print('\\rResizing: {:.2f} % done'.format((100.0*((i+1)/enface.shape[0]))), end='', flush=True)\n print('\\n')\n mask=np.any(enface_downsize!=0,axis=(1,2))\n enface_cleaned = enface_downsize[mask]\n\n return enface_cleaned", "def rotate(self, matrix: List[List[int]]) -> None:\n # Step 1 flip row in reverse order\n top, down = 0, len(matrix) - 1\n while top < down:\n temp = matrix[top]\n matrix[top] = matrix[down]\n matrix[down] = temp\n top += 1\n down -= 1\n\n # Step 2 flip (i,j) -> (j, i)\n for i in range(len(matrix)):\n for j in range(i+1, len(matrix[i])):\n temp = matrix[i][j]\n matrix[i][j] = matrix[j][i]\n matrix[j][i] = temp", "def postTransform(self, R:Rotation) -> None:\n for i,m in enumerate(self.milestones):\n assert len(m) == 18\n mq = m[:9]\n mv = m[9:]\n self.milestones[i] = so3.mul(mq,R) + so3.mul(mv,R)", "def _transform(self, matrix):\n for x in list(self.keys()):\n ar = self[x]\n if len(ar.shape) == 2 and ar.shape[1] == 3:\n self[x] = np.dot(matrix, ar.transpose()).transpose()", "def attrTransform(self, matrix, transform):\n for ttype, targs in self.reTransformFind.findall(transform):\n targs = list(map(lambda x: float(x), self.reNumberFind.findall(targs)))\n if ttype == 'matrix':\n newmatrix = [ targs[0], targs[1],\n targs[2], targs[3],\n targs[4], targs[5] ]\n self.matrixMul(matrix, newmatrix)\n elif ttype == 'translate':\n tx = targs[0]\n ty = targs[1] if len(targs) > 1 else 0\n newmatrix = [ 1, 0, 0, 1, tx, ty ]\n self.matrixMul(matrix, newmatrix)\n elif ttype == 'scale':\n sx = targs[0]\n sy = targs[1] if len(targs) > 1 else sx\n newmatrix = [ sx, 0, 0, sy, 0, 0 ]\n self.matrixMul(matrix, newmatrix)\n elif ttype == 'rotate':\n if len(targs) == 1:\n alpha = targs[0]\n newmatrix = [ math.cos(alpha), math.sin(alpha),\n -math.sin(alpha), math.cos(alpha),\n 0, 0]\n self.matrixMul(matrix, newmatrix)\n else:\n alpha = targs[0]\n newmatrix = [ 1, 0, 0, 1, targs[1], targs[2] ]\n self.matrixMul(matrix, newmatrix)\n newmatrix = [ math.cos(alpha), math.sin(alpha),\n -math.sin(alpha), math.cos(alpha),\n 0, 0]\n self.matrixMul(matrix, newmatrix)\n newmatrix = [ 1, 0, 0, 1, -targs[1], -targs[2] ]\n self.matrixMul(matrix, newmatrix)\n elif ttype == 'skewX' or ttype == 'skewY':\n self.alert(\"skewX and skewY transformations are not supported\", elem)\n else:\n print('unknown transform type: ', ttype)\n return matrix", "def rotate(self, matrix):\n n = len(matrix)\n \n for circle in range(n//2):\n r_circle = n - circle - 1\n for i in range(circle, n - circle - 1):\n a = matrix[circle][i]\n b, matrix[i][r_circle] = matrix[i][r_circle], a\n c, matrix[r_circle][n - i - 1] = matrix[r_circle][n - i - 1], b\n d, matrix[n - i - 1][circle] = matrix[n - i - 1][circle], c\n matrix[circle][i] = d", "def PrePush(self, image):\n pass" ]
[ "0.5731514", "0.55714095", "0.5549019", "0.5508666", "0.54710567", "0.5439961", "0.54302526", "0.54240656", "0.54170287", "0.538028", "0.5372096", "0.53379256", "0.5318458", "0.52991873", "0.5291928", "0.5278679", "0.52785045", "0.5269717", "0.5255685", "0.5236553", "0.52279913", "0.52268034", "0.5202192", "0.52016306", "0.51966643", "0.51938325", "0.51860833", "0.5184506", "0.51844615", "0.5174152", "0.5169716", "0.51490206", "0.5146067", "0.51458573", "0.513794", "0.5133375", "0.5129512", "0.5124836", "0.51085097", "0.51067555", "0.5100176", "0.50974107", "0.5086993", "0.50816137", "0.5078077", "0.5075892", "0.5067279", "0.5067279", "0.5060273", "0.50544065", "0.50466853", "0.5046685", "0.5038683", "0.5037523", "0.5029422", "0.50228703", "0.50140536", "0.50082994", "0.50044847", "0.5002273", "0.4999277", "0.49990225", "0.49971864", "0.49942997", "0.4993988", "0.4985935", "0.49848026", "0.4982673", "0.49803537", "0.49781498", "0.4977233", "0.49768037", "0.4973579", "0.4971762", "0.4971762", "0.49616498", "0.4959685", "0.4955107", "0.49527502", "0.49527502", "0.4951048", "0.494951", "0.4946985", "0.49451414", "0.49425393", "0.49416482", "0.4940179", "0.49376196", "0.49337026", "0.49335894", "0.4929493", "0.49281973", "0.49276865", "0.4923777", "0.49226168", "0.4920851", "0.4918474", "0.49156833", "0.4911148", "0.49110729", "0.49060464" ]
0.0
-1
Return escaped replacement text
def esc(matchObj): if matchObj.group(1) == None: # no tags found return u'<xsl:text>%s</xsl:text>' % \ escape(matchObj.group(3), escDict) if matchObj.group(1): # leading text and tag return u'<xsl:text>%s</xsl:text>%s' % \ (escape(matchObj.group(1), escDict), matchObj.group(2)) return matchObj.group(2) # tag only
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def escape_tex(value):\n newval = value\n for pattern, replacement in LATEX_SUBS:\n newval = pattern.sub(replacement, newval)\n return newval", "def escape_tex(value):\n # This code, and the code that call this is courtesy of Clemens Kaposi\n # http://flask.pocoo.org/snippets/55/\n\n LATEX_SUBS = (\n (re.compile(r'\\\\'), r'\\\\textbackslash'),\n (re.compile(r'([{}_#%&$])'), r'\\\\\\1'),\n (re.compile(r'~'), r'\\~{}'),\n (re.compile(r'\\^'), r'\\^{}'),\n (re.compile(r'\"'), r\"''\"),\n (re.compile(r'\\.\\.\\.+'), r'\\\\ldots'),\n )\n\n newval = value\n for pattern, replacement in LATEX_SUBS:\n newval = pattern.sub(replacement, newval)\n return newval", "def replace_escaped_characters(data: Text) -> Text:\n return re.sub(r'\\\\(.)', r'\\1', data)", "def escape(cls, text):\n\n pattern = re.compile(r\"([^\\\\]?)<\", re.DOTALL | re.IGNORECASE);\n return pattern.sub(r\"\\1\\<\", text);", "def replace_func(match_obj):\r\n # Extract quote content.\r\n quote = match_obj.group(1)\r\n # Implement paragraphs with vspace and w/o indentation.\r\n quote = quote.replace(\r\n \"\\n\\n\", \"\\n\\n\\\\noindent\\n\")\r\n # Implement LaTeX command.\r\n result = \"\\\\enquote{%s}\" % quote\r\n return result", "def escape(orig):\n return '\"{}\"'.format(orig.replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"'))", "def escape_string(text):\n return escape(text)", "def escape(text: str) -> str:\n\n def replace(match_obj):\n \"\"\"\n Returns the match text prefixed with backslash\n\n :param re.match match_obj: The match.\n\n :rtype: str\n \"\"\"\n return '\\\\' + match_obj.group(0)\n\n return re.sub(r'[\\\\{}]', replace, text)", "def escape_tex(value, linebreaks=False):\n latex_subs = [\n (re.compile(r'\\\\'), r'\\\\textbackslash'),\n (re.compile(r'([{}_#%&$])'), r'\\\\\\1'),\n (re.compile(r'~'), r'\\~{}'),\n (re.compile(r'\\^'), r'\\^{}'),\n (re.compile(r'\"'), r\"''\"),\n ]\n if linebreaks:\n latex_subs.append((re.compile(r'\\n'), r'\\\\\\\\'))\n\n result = str(value)\n for pattern, replacement in latex_subs:\n result = pattern.sub(replacement, result)\n return result", "def texify(value):\n for k, v in REPLACEMENTS.items():\n value = value.replace(k, v)\n return mark_safe(value)", "def html_escape(text):\r\n\treturn \"\".join(html_escape_table.get(c,c) for c in text)", "def encode(self, text):\n if self.verbatim:\n return text\n # compile the regexps once. do it here so one can see them.\n #\n # first the braces.\n if not self.__dict__.has_key('encode_re_braces'):\n self.encode_re_braces = re.compile(r'([{}])')\n text = self.encode_re_braces.sub(r'{\\\\\\1}',text)\n if not self.__dict__.has_key('encode_re_bslash'):\n # find backslash: except in the form '{\\{}' or '{\\}}'.\n self.encode_re_bslash = re.compile(r'(?<!{)(\\\\)(?![{}]})')\n # then the backslash: except in the form from line above:\n # either '{\\{}' or '{\\}}'.\n text = self.encode_re_bslash.sub(r'{\\\\textbackslash}', text)\n\n # then dollar\n text = text.replace(\"$\", '{\\\\$}')\n # then all that needs math mode\n text = text.replace(\"<\", '{$<$}')\n text = text.replace(\">\", '{$>$}')\n # then\n text = text.replace(\"&\", '{\\\\&}')\n text = text.replace(\"_\", '{\\\\_}')\n # the ^:\n # * verb|^| does not work in mbox.\n # * mathmode has wedge. hat{~} would also work.\n text = text.replace(\"^\", '{\\\\ensuremath{^\\\\wedge}}')\n text = text.replace(\"%\", '{\\\\%}')\n text = text.replace(\"#\", '{\\\\#}')\n text = text.replace(\"~\", '{\\\\~{}}')\n if self.insert_newline:\n # HACK: insert a blank before the newline, to avoid \n # ! LaTeX Error: There's no line here to end.\n text = text.replace(\"\\n\", '~\\\\\\\\\\n')\n elif self.mbox_newline:\n text = text.replace(\"\\n\", '}\\\\\\\\\\n\\\\mbox{')\n if self.insert_none_breaking_blanks:\n text = text.replace(' ', '~')\n # unicode !!! \n text = text.replace(u'\\u2020', '{$\\\\dagger$}')\n return text", "def escape(text):\n return text.replace('&', '&amp;'). \\\n replace('<', '&lt;'). \\\n replace('>', '&gt;').replace('\"', '&quot;'). \\\n replace(\"'\", '&#39;')", "def html_escape(text):\n return \"\".join(html_escape_table.get(c,c) for c in text)", "def html_escape(text):\n return \"\".join(html_escape_table.get(c,c) for c in text)", "def html_escape(text):\n return escape(text, escape_table)", "def escape(self, text, escape_chars):\n _bs = \"\\\\\"\n # backslash is always escaped\n text = text.replace(_bs, _bs * 2)\n for _el in escape_chars:\n assert _el != _bs, \"Backslash has been already escaped\"\n text = text.replace(_el, _bs + _el)\n return text", "def escape(text):\n return text_type(text).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')", "def _escape_backticks(text: str, escape_with='\\u200b'):\r\n return text.replace('`', '`'+escape_with)", "def _safe(text):\n return text.replace(\"'\", \"''\").replace(\"\\\\\", \"\\\\\\\\\")", "def encode(self, text):\n if self.verbatim:\n return text\n # compile the regexps once. do it here so one can see them.\n #\n # first the braces.\n if not self.__dict__.has_key('encode_re_braces'):\n self.encode_re_braces = re.compile(r'([{}])')\n text = self.encode_re_braces.sub(r'{\\\\\\1}',text)\n if not self.__dict__.has_key('encode_re_bslash'):\n # find backslash: except in the form '{\\{}' or '{\\}}'.\n self.encode_re_bslash = re.compile(r'(?<!{)(\\\\)(?![{}]})')\n # then the backslash: except in the form from line above:\n # either '{\\{}' or '{\\}}'.\n text = self.encode_re_bslash.sub(r'{\\\\textbackslash}', text)\n\n # then dollar\n text = text.replace(\"$\", '{\\\\$}')\n if not ( self.literal_block or self.literal or self.mathmode ):\n # the vertical bar: in mathmode |,\\vert or \\mid\n # in textmode \\textbar\n text = text.replace(\"|\", '{\\\\textbar}')\n text = text.replace(\"<\", '{\\\\textless}')\n text = text.replace(\">\", '{\\\\textgreater}')\n # then\n text = text.replace(\"&\", '{\\\\&}')\n # the ^:\n # * verb|^| does not work in mbox.\n # * mathmode has wedge. hat{~} would also work.\n # text = text.replace(\"^\", '{\\\\ensuremath{^\\\\wedge}}')\n text = text.replace(\"^\", '{\\\\textasciicircum}')\n text = text.replace(\"%\", '{\\\\%}')\n text = text.replace(\"#\", '{\\\\#}')\n text = text.replace(\"~\", '{\\\\textasciitilde}')\n # Separate compound characters, e.g. \"--\" to \"-{}-\". (The\n # actual separation is done later; see below.)\n separate_chars = '-'\n if self.literal_block or self.literal:\n # In monospace-font, we also separate \",,\", \"``\" and \"''\"\n # and some other characters which can't occur in\n # non-literal text.\n separate_chars += ',`\\'\"<>'\n # pdflatex does not produce doublequotes for ngerman.\n text = self.babel.double_quotes_in_tt(text)\n if self.font_encoding == 'OT1':\n # We're using OT1 font-encoding and have to replace\n # underscore by underlined blank, because this has\n # correct width.\n text = text.replace('_', '{\\\\underline{ }}')\n # And the tt-backslash doesn't work in OT1, so we use\n # a mirrored slash.\n text = text.replace('\\\\textbackslash', '\\\\reflectbox{/}')\n else:\n text = text.replace('_', '{\\\\_}')\n else:\n text = self.babel.quote_quotes(text)\n text = text.replace(\"_\", '{\\\\_}')\n for char in separate_chars * 2:\n # Do it twice (\"* 2\") becaues otherwise we would replace\n # \"---\" by \"-{}--\".\n text = text.replace(char + char, char + '{}' + char)\n if self.insert_newline or self.literal_block:\n # Insert a blank before the newline, to avoid\n # ! LaTeX Error: There's no line here to end.\n text = text.replace(\"\\n\", '~\\\\\\\\\\n')\n elif self.mbox_newline:\n if self.literal_block:\n closings = \"}\" * len(self.literal_block_stack)\n openings = \"\".join(self.literal_block_stack)\n else:\n closings = \"\"\n openings = \"\"\n text = text.replace(\"\\n\", \"%s}\\\\\\\\\\n\\\\mbox{%s\" % (closings,openings))\n # lines starting with \"[\" give errors.\n text = text.replace('[', '{[}')\n if self.insert_none_breaking_blanks:\n text = text.replace(' ', '~')\n if self.latex_encoding != 'utf8':\n text = self.unicode_to_latex(text)\n return text", "def HtmlEscape(text):\n return escape(text, _HTML_ESCAPE_TABLE)", "def replace(match_obj):\n return '\\\\' + match_obj.group(0)", "def refined_text(text):\n import re\n text = text.replace('<e1>','')\n text = text.replace('</e1>','')\n text = text.replace('<e2>','')\n text = text.replace('</e2>','')\n\n text = text[1:-1] # trim quotes\n # text = text.replace('\"','')\n # text = text.replace(',','')\n # text = text.replace('.','')\n # text = text.replace(';','')\n # text = text.replace('`','')\n # text = text.replace('\\'','')\n # text = text.replace('(','')\n # text = text.replace(')','')\n # text = text.replace('/','')\n\n return text", "def addslashes(val):\n return re.escape(val)", "def dummyOutEscapeCharacters(self, text):\n \n return re.sub(\"\\\\\\\\.\", \"\\$\", text)\n \n #escape = False\n #escapedText = text\n \n #for i in range(len(text)):\n #if escape:\n #escapedText = escapedText[:i] + self.DUMMY_CHAR + escapedText[i+1:]\n #escape = False\n #elif text[i] == \"\\\\\":\n #escape = True\n #return escapedText", "def _as_inline_code(text):\n escaped = text.replace(\"`\", r\"\\`\")\n return f\"`{escaped}`\"", "def replace_special(text):\r\n text = text.replace('\\r\\n', ' ')\r\n text = text.replace('\\n', ' ')\r\n text = text.replace('``', \"''\")\r\n text = text.replace('`', \"'\")\r\n text = text.replace('“', '\"')\r\n text = text.replace('”', '\"')\r\n text = text.replace('’', \"'\")\r\n text = text.replace('‘', \"'\")\r\n text = text.replace(\"'\", \"'\")\r\n text = text.replace('–', \"-\")\r\n text = text.replace('\\\"', '\"')\r\n text = text.replace(\"\\'\", \"'\")\r\n return text", "def escape(self, text):\n\t\tif not self.escape_html or text is None:\n\t\t\treturn text\n\n\t\treturn (\n\t\t\ttext.replace('&', '&amp;').replace('<', '&lt;')\n\t\t\t.replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')\n\t\t)", "def _escape(self,text):\n\t\tif not text: return \"_\" # escape empty string\n\t\ttext = text.replace(\"_\",\"__\") # escape underscores\n\t\ttext = text.replace(\" \",\"_\") # escape spaces\n\t\ttext = text.replace(\"-\",\"--\") # escape dashes\n\t\ttext = text.replace(\"''\",'\"') # escape double quote\n\t\ttext = text.replace(\"?\",\"~q\") # escape question marks\n\t\ttext = text.replace(\"%\",\"~p\") # escape question marks\n\t\ttext = text.replace(\"#\",\"~h\") # escape question marks\n\t\ttext = text.replace(\"/\",\"~s\") # escape question marks\n\t\treturn text", "def htmlEscape(text):\n try:\n result = '\"\".join(html_escape_table.get(c,c) for c in text)'\n except SyntaxError:\n print \"HTML mode not supported prior to Python 2.4\"\n sys.exit(1)\n return result", "def escape(txt):\n txt = sax_escape(txt, entities=ENTITIES)\n return mark_safe(txt)", "def _escapeSpecialCharacters(text):\n text.replace('\\\\', '\\\\\\\\')\n escape = ['~', '#', '&', '%', '_']\n for c in escape:\n text = text.replace(c, '\\\\' + c )\n return text", "def escape_quote(text):\n return text_type(text).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;')", "def escape_triple_quotes(text):\n return text.replace(u'\"\"\"', u'\\\\\"\\\\\"\\\\\"')", "def shQuote(text):\n\treturn \"'%s'\" % text.replace(\"'\", r\"'\\''\")", "def escape_django_tags(txt):\n for source, dest in ENTITIES.iteritems():\n txt = txt.replace(source, dest)\n return txt", "def quotemeta(text):\n return re.sub(\"(\\W)\", r\"\\\\\\1\", text)", "def escape_replace(s, old, new, escape='\\\\'):\n newstr = []\n if len(old) == 0:\n return\n\n for i, c in enumerate(s):\n if old[0] == c and s[i-1] != escape:\n newstr.extend(new)\n else:\n newstr.append(c)\n return ''.join(newstr)", "def _replace(match):\n match = match.groups()[0]\n if match in _html_escapes:\n ret = _html_escapes[match]\n else:\n ret = unicode(chr(int(match[1:])), 'latin-1')\n return ret", "def _escape(s):\n assert isinstance(s, str), \\\n \"expected %s but got %s; value=%s\" % (type(str), type(s), s)\n s = s.replace(\"\\\\\", \"\\\\\\\\\")\n s = s.replace(\"\\n\", \"\\\\n\")\n s = s.replace(\"\\t\", \"\\\\t\")\n s = s.replace(\",\", \"\\t\")\n return s", "def html_escape(text):\n L=[]\n for c in text:\n L.append(html_escape_table.get(c,c))\n return \"\".join(L)", "def escape(text):\n if text is None:\n return\n else:\n return cgi.escape(text).encode('ascii', 'xmlcharrefreplace')", "def addpoemslashes(value):\n return value.replace(\"\\r\", \"\").replace(\"\\n\", ' / ')", "def escape_latex_characters(line):\n line = line.replace('\\\\', '\\\\textbackslash')\n line = line.replace('&', '\\&')\n line = line.replace('%', '\\%')\n line = line.replace('$', '\\$')\n line = line.replace('#', '\\#')\n line = line.replace('_', '\\_')\n line = line.replace('{', '\\{')\n line = line.replace('}', '\\}')\n line = line.replace('~', '\\\\textasciitilde')\n line = line.replace('^', '\\\\textasciicircum')\n line = line.replace('<', '\\\\textless')\n line = line.replace('>', '\\\\textgreater')\n return line", "def escape(self, value):\n return re.sub(r\"\\$\", \"$$\", value)", "def substitute(line:str) -> str:\n to_replace=( \n (r'\\$', '$$'), # replace $ with $$ to prevent interpretation by conky\n (r'#', '\\#'), # replace # with \\# \n (r'\\x1b\\[([0-9;]*)m', csi_to_conky),# ESC[(x;y;z;...)m => CSI code to convert\n )\n for (pattern, repl) in to_replace:\n line=re.sub(pattern, repl, line)\n \n return line", "def escape_string(value: str) -> str:\n\n def replace(match: Match) -> str:\n return ESCAPE_DCT[match.group(0)]\n\n return ESCAPE.sub(replace, value)", "def escape(self):\n pass", "def initial_quotes(self, text):\n\n quote_finder = re.compile(r\"\"\"\n ( # Start group capture\n (\"|&ldquo;|&\\#8220;) # A double quote\n | # Or\n ('|&lsquo;|&\\#8216;) # A single quote\n ) # End group capture\n \"\"\", re.VERBOSE)\n\n replace_function = lambda match: \"\"\"<span class=\"%s\">%s</span>\"\"\"\\\n % ('dquo' if match.group(2) else 'quo', match.group(1))\n text = quote_finder.sub(replace_function, text, 1) \n \n return text", "def escape_cell(cell):\n cell = cell.replace(u'\\\\', u'\\\\\\\\')\n cell = cell.replace(u'\\n', u'\\\\n')\n cell = cell.replace(u'|', u'\\\\|')\n return cell", "def esc_quotes(strng):\n\n return strng.replace('\"','\\\\\"').replace(\"'\",\"\\\\'\")", "def escape(s):\r\n return str(s).replace('<', '&lt;').replace('>', '&gt;')", "def html_escape(text): \n html_escape_table = {\n \"&\": \"&amp;\",\n '\"': \"&quot;\",\n \"'\": \"&apos;\",\n \">\": \"&gt;\",\n \"<\": \"&lt;\",\n }\n return \"\".join(html_escape_table.get(c,c) for c in text)", "def replace(text,pattern,replace=\"\"):\n\n thisFunc = inspect.currentframe().f_code.co_name\n result = re.sub(pattern,replace,text)\n return result", "def escape(s, format=HTML):\r\n #Note: If you have to make sure that every character gets replaced\r\n # only once (and if you cannot achieve this with the following code),\r\n # use something like u\"\".join([replacedict.get(c,c) for c in s])\r\n # which is about 2-3 times slower (but maybe needs less memory).\r\n #Note: This is one of the most time-consuming parts of the template.\r\n # So maybe speed this up.\r\n\r\n if format is None or format == NONE:\r\n pass\r\n elif format == HTML:\r\n s = s.replace(u\"&\", u\"&amp;\") # must be done first!\r\n s = s.replace(u\"<\", u\"&lt;\")\r\n s = s.replace(u\">\", u\"&gt;\")\r\n s = s.replace(u'\"', u\"&quot;\")\r\n s = s.replace(u\"'\", u\"&#39;\")\r\n elif format == LATEX:\r\n #TODO: which are the \"reserved\" characters for LaTeX?\r\n # are there more than these?\r\n s = s.replace(\"\\\\\", u\"\\\\backslash{}\") #must be done first!\r\n s = s.replace(\"#\", u\"\\\\#\")\r\n s = s.replace(\"$\", u\"\\\\$\")\r\n s = s.replace(\"%\", u\"\\\\%\")\r\n s = s.replace(\"&\", u\"\\\\&\")\r\n s = s.replace(\"_\", u\"\\\\_\")\r\n s = s.replace(\"{\", u\"\\\\{\")\r\n s = s.replace(\"}\", u\"\\\\}\")\r\n else:\r\n raise ValueError('Invalid format (only None, HTML and LATEX are supported).')\r\n return unicode(s)", "def _escaped_text_from_text(text, escapes=\"eol\"):\n #TODO:\n # - Add 'c-string' style.\n # - Add _escaped_html_from_text() with a similar call sig.\n import re\n\n if isinstance(escapes, base_string_type):\n if escapes == \"eol\":\n escapes = {'\\r\\n': \"\\\\r\\\\n\\r\\n\", '\\n': \"\\\\n\\n\", '\\r': \"\\\\r\\r\"}\n elif escapes == \"whitespace\":\n escapes = {'\\r\\n': \"\\\\r\\\\n\\r\\n\", '\\n': \"\\\\n\\n\", '\\r': \"\\\\r\\r\",\n '\\t': \"\\\\t\", ' ': \".\"}\n elif escapes == \"eol-one-line\":\n escapes = {'\\n': \"\\\\n\", '\\r': \"\\\\r\"}\n elif escapes == \"whitespace-one-line\":\n escapes = {'\\n': \"\\\\n\", '\\r': \"\\\\r\", '\\t': \"\\\\t\", ' ': '.'}\n else:\n raise ValueError(\"unknown text escape style: %r\" % escapes)\n\n # Sort longer replacements first to allow, e.g. '\\r\\n' to beat '\\r' and\n # '\\n'.\n escapes_keys = list(escapes.keys())\n try:\n escapes_keys.sort(key=lambda a: len(a), reverse=True)\n except TypeError:\n # Python 2.3 support: sort() takes no keyword arguments\n escapes_keys.sort(lambda a,b: cmp(len(a), len(b)))\n escapes_keys.reverse()\n def repl(match):\n val = escapes[match.group(0)]\n return val\n escaped = re.sub(\"(%s)\" % '|'.join([re.escape(k) for k in escapes_keys]),\n repl,\n text)\n\n return escaped", "def escape(t):\n return (t\n .replace(\"&\", \"&amp;\").replace(\"<\", \"&lt;\").replace(\">\", \"&gt;\")\n .replace(\"'\", \"&#39;\").replace('\"', \"&quot;\")\n )", "def unescape(self, text):\r\n try:\r\n stash = self.markdown.treeprocessors['inline'].stashed_nodes\r\n except KeyError:\r\n return text\r\n def get_stash(m):\r\n id = m.group(1)\r\n value = stash.get(id)\r\n if value is not None:\r\n try:\r\n return self.markdown.serializer(value)\r\n except:\r\n return '\\%s' % value\r\n \r\n return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)", "def QuotedEscaped (s):\n return repr(s)", "def escape_em(text):\n return re.sub(r\"''(.*?)''\",\n r'<em><font color=\"#39499b\">\\1</font></em>',\n text)", "def markdown_escape(text):\n return text.translate(MARDOWN_TRANS)", "def fix(text):\n\n text = text.replace(\"\\\\\", \"\\\\\\\\\")\n text = text.replace(\"{\", \"\\\\{\").replace(\"}\", \"\\\\}\")\n text = _nonAsciiPattern.sub(_replace, text)\n return text", "def _html_esc(string):\n repls = {\n '<': 'lt',\n '>': 'gt',\n '&': 'amp',\n '\"': 'quot',\n }\n\n def repl(matchobj):\n return \"&%s;\" % repls[matchobj.group(0)]\n\n regex = \"([%s])\" % ''.join(repls.keys())\n return re.sub(regex, repl, string)", "def escape(text):\n if isinstance(text, list):\n for i, t in enumerate(text):\n t = t.replace(r'\\&', r'&amp;')\n t = t.replace(r'<', r'&lt;')\n t = t.replace(r'>', r'&gt;')\n text[i] = t\n else:\n text = text.replace(r'\\&', r'&amp;')\n text = text.replace(r'<', r'&lt;')\n text = text.replace(r'>', r'&gt;')\n return text", "def xhtml_escape(value):\r\n return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)],\r\n to_basestring(value))", "def escape(cls, html):\n return (\"%s\" % (html)).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def fix_output(text: str) -> str:\n\n text = text.replace(\" n't\", \"n't\")\n return text", "def escape(t):\r\n return (t\r\n .replace(\"&\", \"&amp;\").replace(\"<\", \"&lt;\").replace(\">\", \"&gt;\")\r\n .replace(\"´\", \"&#39;\").replace('\"', \"&quot;\").replace(\"'\",'&apos;')\r\n )", "def escape(s, pattern=r'(\\W)'):\n r = re.compile(pattern)\n return r.subn(r'\\\\\\1', s)[0]", "def sanitize(text):\n #text = re.sub(r'[*]',r'\\*',text) \n text = re.sub(r'~',r'\\~',text) \n #text = re.sub(r'<',r'\\textless',text) \n #text = re.sub(r'>',r'\\textgreater',text) \n text = re.sub(r'\\|',r'\\|',text) \n text = re.sub(r'_',r'\\\\_',text) \n return text", "def replace_symbol(text, replacement_text=\"\"):\n\n return __RE_SYMBOL.sub(replacement_text, text)", "def htmlquote(text):\r\n text = text.replace(\"&\", \"&amp;\") # Must be done first!\r\n text = text.replace(\"<\", \"&lt;\")\r\n text = text.replace(\">\", \"&gt;\")\r\n text = text.replace(\"'\", \"&#39;\")\r\n text = text.replace('\"', \"&quot;\")\r\n return text", "def encodeText(text):\r\n#\treturn repr( quote_plus(text.replace(\"'\", '\"')) )\r\n\ttry:\r\n\t\treturn repr( quote_plus(text.replace(\"'\", '\"').encode('utf-8')) )\r\n\texcept:\r\n\t\tlogError(\"encodeText()\")\r\n\treturn repr(text.replace(\"'\", '\"'))", "def convert_text(s):\n for d in config.repl: # loaded from config.py\n if \"flags\" in d:\n s = re.sub(d[\"ptrn\"], d[\"repl\"], s, flags=d[\"flags\"])\n else:\n s = re.sub(d[\"ptrn\"], d[\"repl\"], s)\n return s", "def md_escape(raw):\n md_unsafe = (\n ('[', '&#91;'),\n (']', '&#93;'),\n )\n escaped = escape(raw)\n for unsafe, safer in md_unsafe:\n escaped = escaped.replace(unsafe, safer)\n return escaped", "def escape_perl_string(v):\n s = str(v).replace(\"$\", \"\\\\$\").replace(\"\\\"\", \"\\\\\\\"\").replace(\"@\", \"\\\\@\")\n return re.sub('[\\n\\t\\r]+', ' ', s)", "def escape_single_quote(unescaped):\n\t# requirements = re\n\treturn re.sub(r'(\\'|\\\\)', r'\\\\\\1', unescaped)", "def _escape(var):\n return f\"({re.escape(var)})\"", "def ansi_escape(text: object) -> str:\n return str(text).replace(\"\\x1b\", \"?\").replace(\"\\b\", \"?\")", "def quote_slashes(text):\r\n return re.sub(ur'[;/]', _quote_slashes, text)", "def _escape(html):\n return encoding.force_unicode(html).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def re_sub(pattern, replacement, txt):\n # for some reason the java security managar does not accept module 're'\n # return re.sub(\"[ \\\\n]+\", \" \", txt)\n pattern = regex.Pattern.compile(pattern)\n matcher = pattern.matcher(txt)\n return matcher.replaceAll(replacement)", "def defang_text(text):\n text = text.replace(\"'\", \"''\")\n text = text.replace('\"', '\"\"')\n return text", "def escape_lg(s):\n return s.replace('>','\\>').replace('<','\\<')", "def escape_meme_text(text):\n replacements = {\n \" \": \"_\",\n \"?\": \"~q\",\n \"%\": \"~p\",\n \"#\": \"~h\",\n \"/\": \"~s\",\n \"''\": \"\\\"\",\n }\n\n for r in replacements.keys():\n text = text.replace(r, replacements[r])\n\n return text", "def test_symlit_escape():\n return \"\\\"=\\\"\"", "def substitute_macros(text):\n f_text = text\n for (pattern,replacement) in context.environment.items():\n replacement = replacement.replace(os.path.sep,'/')\n f_text = f_text.replace('$(%s)' % pattern.upper(), replacement)\n return f_text", "def escape(raw_string): \n return ''.join(\n [_caret_escapes_for_unprintables.get(c, c) for c in raw_string])", "def escape_newlines(self, the_string):\n the_string = the_string.replace('\\n', r'\"\"\\n\"\"')\n return the_string", "def EscapeBackslashes(string: Text) -> Text:\n precondition.AssertType(string, Text)\n return string.replace(\"\\\\\", \"\\\\\\\\\")", "def escape_latex(s):\n return \"\".join(LATEX_CHARS.get(c, c) for c in s)", "def escape(text):\n if (isinstance(text, basestring)):\n try: text = encode(text)\n except: text = copy(text)\n text = text.replace(\"&\", \"&amp;\")\n text = text.replace(\"<\", \"&lt;\")\n text = text.replace(\">\", \"&gt;\")\n return text", "def _latex_(self):\n return \"\\\\textnormal{Extended code coming from %s}\" % self.original_code()", "def escape(input):\n # first correct the HTML\n output=html(input)\n # print \"HTML is: %s\" % output\n # then escape it\n output=atpic.cleaner_escape.escape(output)\n # print \"ESCAPD is: %s\" % output\n return output", "def md_inline_code(raw_text):\n return '`%s`' % md_escape(raw_text, characters='`')", "def substitution(plainText, key):\n return plainText", "def escapeLaTeX(self,string):\n string_to_escape = \"{(&$#%)}\" # fixed the problem of producing \\[ \\] math environment\n new_str_list = map(lambda x: \"\\\\\" + x if x in string_to_escape else x,\n string)\n new_symbolfied_list = map(lambda x: symbols.unicode_to_latex_dict[x] if x in symbols.unicode_to_latex_dict else x, \n new_str_list)\n return ''.join(new_symbolfied_list)", "def REGEXREPLACE(text, regular_expression, replacement):\n return re.sub(regular_expression, replacement, text)", "def replacement(self):\n assert (self.src or self.inline) and not (self.src and self.inline)\n if self.src:\n return '<script async type=\"text/javascript\" src=\"%s\"></script>' % urllib.quote(self.src)\n else:\n return '<script>\\n%s\\n</script>' % self.inline" ]
[ "0.7457513", "0.72451824", "0.70088613", "0.7005204", "0.69938093", "0.69180393", "0.6908549", "0.6808867", "0.67575586", "0.6719324", "0.6687371", "0.66694325", "0.66232425", "0.6604365", "0.6604365", "0.65418816", "0.65407073", "0.6530385", "0.6503265", "0.64981174", "0.6381249", "0.6344406", "0.6343246", "0.6279259", "0.6275575", "0.6255139", "0.62501997", "0.6249956", "0.62471217", "0.62390524", "0.62199146", "0.61937577", "0.6189551", "0.6177561", "0.61755675", "0.61661625", "0.61605906", "0.6159079", "0.6154964", "0.6153839", "0.6107416", "0.6106963", "0.6086809", "0.6079646", "0.60761374", "0.60710883", "0.6065726", "0.60630274", "0.60553664", "0.60429543", "0.6040348", "0.6031483", "0.60259837", "0.60134673", "0.5999098", "0.59959084", "0.59743905", "0.59706193", "0.59638643", "0.5950558", "0.5941718", "0.5939941", "0.59339625", "0.5906462", "0.5905818", "0.5902308", "0.5900619", "0.58999527", "0.5899127", "0.5898337", "0.5896697", "0.58752704", "0.58731073", "0.5872923", "0.58669335", "0.58665425", "0.58644617", "0.58596486", "0.5846205", "0.58403414", "0.58400404", "0.5833482", "0.5833067", "0.58311653", "0.5829118", "0.5824201", "0.58171654", "0.58095425", "0.5802473", "0.5793698", "0.579341", "0.57927364", "0.57876945", "0.57803315", "0.5772333", "0.57703185", "0.5743068", "0.5742994", "0.5723974", "0.5711092" ]
0.6266012
25
Any prefix, suffix, html info in attrs dict
def __init__(self, name, attrs={}): self.name = name self.enName = '' # used only by fileFormat field for i18n self.format = attrs.get(u'format', self.defaultFormat) self.prefix = attrs.get(u'prefix', '') self.suffix = attrs.get(u'suffix', '') # defaults to no html (line breaks preserved) self.html = attrs.get(u'html', '').startswith('y') and True or False self.isRequired = attrs.get(u'required', '').startswith('y') and \ True or False self.hidden = attrs.get(u'hidden', '').startswith('y') and \ True or False try: self.numLines = int(attrs.get(u'lines', repr(self.defaultNumLines))) except ValueError: self.numLines = 1 self.initDefault = attrs.get(u'init', '') self.linkAltField = attrs.get(u'linkalt', '') self.parentLevel = 0 self.useFileInfo = False self.showInDialog = True self.initFormat()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def html_attrs(attrs):\n html = \"\"\n for a in attrs.items():\n if a[1]:\n html = html + \"%s=\\\"%s\\\" \"%(a)\n return html", "def attrs(context):\n result = \"\"\n for key, value in context.flatten().items():\n if key not in [\"True\", \"False\", \"None\", \"content\", \"element\"]:\n if \"hx_\" in key:\n key = key.replace(\"_\", \"-\")\n result += f' {key}=\"{value}\"'\n return mark_safe(result)", "def attr(*attrs: ATTRIBUTE) -> str:\n return PyHTML.attr(*attrs)", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def gen_tag_attrs(self, *a, **kw):\n return gen_tag_attrs(self, *a, **kw)", "def make_attrs(self, mixed):\n if isinstance(mixed, dict):\n return ''.join('%s=\"%s\" ' % (k, v) for k, v in mixed.items())\n return str(mixed)", "def _formatAttributes(self, attr=None, allowed_attrs=None, **kw):\n\n # Merge the attr dict and kw dict into a single attributes\n # dictionary (rewriting any attribute names, extracting\n # namespaces, and merging some values like css classes).\n attributes = {} # dict of key=(namespace,name): value=attribute_value\n if attr:\n for a, v in attr.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n if kw:\n for a, v in kw.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n\n # Add title attribute if missing, but it has an alt.\n if ('html', 'alt') in attributes and ('html', 'title') not in attributes:\n attributes[('html', 'title')] = attributes[('html', 'alt')]\n\n # Force both lang and xml:lang to be present and identical if\n # either exists. The lang takes precedence over xml:lang if\n # both exist.\n #if ('html', 'lang') in attributes:\n # attributes[('xml', 'lang')] = attributes[('html', 'lang')]\n #elif ('xml', 'lang') in attributes:\n # attributes[('html', 'lang')] = attributes[('xml', 'lang')]\n\n # Check all the HTML attributes to see if they are known and\n # allowed. Ignore attributes if in non-HTML namespaces.\n if allowed_attrs:\n for name in [key[1] for key in attributes if key[0] == 'html']:\n if name in _common_attributes or name in allowed_attrs:\n pass\n elif name.startswith('on'):\n pass # Too many event handlers to enumerate, just let them all pass.\n else:\n # Unknown or unallowed attribute.\n err = 'Illegal HTML attribute \"%s\" passed to formatter' % name\n raise ValueError(err)\n\n # Finally, format them all as a single string.\n if attributes:\n # Construct a formatted string containing all attributes\n # with their values escaped. Any html:* namespace\n # attributes drop the namespace prefix. We build this by\n # separating the attributes into three categories:\n #\n # * Those without any namespace (should only be xmlns attributes)\n # * Those in the HTML namespace (we drop the html: prefix for these)\n # * Those in any other non-HTML namespace, including xml:\n\n xmlnslist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if not k[0]]\n htmllist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] == 'html']\n otherlist = ['%s:%s=\"%s\"' % (k[0], k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] and k[0] != 'html']\n\n # Join all these lists together in a space-separated string. Also\n # prefix the whole thing with a space too.\n htmllist.sort()\n otherlist.sort()\n all = [''] + xmlnslist + htmllist + otherlist\n return ' '.join(all)\n return ''", "def back_to_tag(tag, attrs):\n sol = '<' + tag\n for (prop, val) in attrs:\n sol += ' ' + prop + '=\"' + val + '\"'\n sol += '>'\n return sol", "def attributes(self):\n _attrs = [\"label\"]\n if self.attrs:\n _attrs.append(\"attrs\")\n return _attrs", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def render_attr(key, value, attr_format='{key}=\"{value}\"'):\n\n if not key or ' ' in key:\n raise InvalidAttribute('Invalid name \"{}\"'.format(key))\n\n if value:\n if type(value) is RawNode:\n value = str(value)\n else:\n value = html.escape(str(value))\n\n return attr_format.format(key=key, value=value)\n\n return key", "def extend_attribute_dictionary(attributedict, ns, name, value):\n\n key = ns, name\n if value is None:\n if key in attributedict:\n del attributedict[key]\n else:\n if ns == 'html' and key in attributedict:\n if name == 'class':\n # CSS classes are appended by space-separated list\n value = attributedict[key] + ' ' + value\n elif name == 'style':\n # CSS styles are appended by semicolon-separated rules list\n value = attributedict[key] + '; ' + value\n elif name in _html_attribute_boolflags:\n # All attributes must have a value. According to XHTML those\n # traditionally used as flags should have their value set to\n # the same as the attribute name.\n value = name\n attributedict[key] = value", "def add_attrs(value, arg):\n try:\n # Split list on comma\n kv_pairs = arg.split(\",\")\n except ValueError:\n raise template.TemplateSyntaxError(\n \"add_attrs requires as an argument a string in the format 'key:value, key1:value1, key2:value2...'\"\n )\n\n\n # Create dictionary\n html_attrs = dict()\n\n # Clean items and add attribute pairs to dictionary\n for item in kv_pairs:\n item = item.strip()\n k, v = item.split(\":\")\n html_attrs.update({k.strip():v.strip()})\n\n return value.as_widget(attrs=html_attrs)", "def handleAttributes(text, parent):\r\n def attributeCallback(match):\r\n parent.set(match.group(1), match.group(2).replace('\\n', ' '))\r\n return ATTR_RE.sub(attributeCallback, text)", "def extract_attrs(attr_string):\n attributes = {}\n for name, val in FIND_ATTRS.findall(attr_string):\n val = (\n val.replace(\"&lt;\", \"<\")\n .replace(\"&gt;\", \">\")\n .replace(\"&quot;\", '\"')\n .replace(\"&amp;\", \"&\")\n )\n attributes[name] = val\n return attributes", "def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict", "def __get_attr_format (self, attrs):\r\n format = { \r\n 'editor': None,\r\n 'min': None,\r\n 'max': None,\r\n 'step': None,\r\n 'subtype': None,\r\n 'flags': None,\r\n 'enums': None\r\n }\r\n\r\n for attr in attrs: \r\n attr_type = attr[\"type\"]\r\n if \"editor\" == attr_type:\r\n format['editor'] = attr[\"value\"] \r\n if \"min\" == attr_type:\r\n format['min'] = attr[\"value\"] \r\n if \"max\" == attr_type:\r\n format['max'] = attr[\"value\"] \r\n if \"default\" == attr_type:\r\n format['default'] = attr[\"value\"] \r\n if \"step\" == attr_type:\r\n format['step'] = attr[\"value\"]\r\n if \"subtype\" == attr_type:\r\n format['subtype'] = attr[\"value\"]\r\n if \"flags\" == attr_type:\r\n format['flags'] = attr['value']\r\n if \"enums\" == attr_type:\r\n format['enums'] = attr['value']\r\n\r\n return format", "def get_format_attrs(self, name, field, alt_field_info={}):\n # important_props = ('initial', 'autofocus', 'widget')\n if name in alt_field_info:\n field = deepcopy(field)\n for prop, value in alt_field_info[name].items():\n setattr(field, prop, value)\n initial = field.initial\n initial = initial() if callable(initial) else initial\n attrs, result = {}, []\n if initial and not isinstance(field.widget, Textarea):\n attrs['value'] = str(initial)\n data_val = self.form.data.get(get_html_name(self.form, name), None)\n if data_val not in ('', None):\n attrs['value'] = data_val\n attrs.update(field.widget_attrs(field.widget))\n result = ''.join(f'{key}=\"{val}\" ' for key, val in attrs.items())\n if getattr(field, 'autofocus', None):\n result += 'autofocus '\n if issubclass(self.form.__class__, FormOverrideMixIn):\n # TODO: Expand for actual output when using FormOverrideMixIn, or a sub-class of it.\n result += '%(attrs)s' # content '%(attrs)s'\n else:\n result = '%(attrs)s' + result # '%(attrs)s' content\n return result", "def attrs(*attributes):\n return ';'.join([ str(i) for i in attributes ])", "def attrs(**kwds):\n\n def decorate(f):\n for k in kwds:\n setattr(f, k, kwds[k])\n return f\n\n return decorate", "def format_attr(attr: str) -> str:\r\n prefix = query_params[Toml.REMOVE_PREFIX]\r\n suffix = query_params[Toml.REMOVE_SUFFIX]\r\n prefix_len = len(prefix)\r\n suffix_len = len(suffix)\r\n stripped = attr.strip()\r\n if stripped[:prefix_len] == prefix:\r\n stripped = stripped[prefix_len:]\r\n if stripped[-suffix_len:] == suffix:\r\n stripped = stripped[:-suffix_len]\r\n return constcase(stripped).replace('__', '_')", "def ATTRIBUTE():\n return \"author\", \"title\", \"publisher\", \"shelf\", \"category\", \"subject\"", "def string_for_attrs(attrs):\n if not attrs: return ''\n return ''.join(' %s=\"%s\"' % (attr, value) for attr, value in attrs)", "def attrs(self):\n return self.size, self.propSuffix, self.specified", "def _attrs(self, element, attrs):\n for attr, val in list(attrs.items()):\n element.setAttribute(attr, val)\n return element", "def set_attrs(dict, elem, attrs):\n for attr in attrs:\n if attr in elem.keys():\n dict[attr] = elem.get(attr)", "def attrs(xml):\r\n return lxml.html.fromstring(xml).attrib", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def _base_attrs(self, service):\n keys = ['name', 'desc', 'url']\n return {name:getattr(service, name, None) for name in keys}", "def get_attrs(foreground, background, style):\n return foreground + (background << 4) + style", "def _wrap_attr(attr):\n return '={0},'.format(attr)", "def fix_attributes(string):\n defs = re.compile('<dl class=\"attribute\">(?P<descrip>.*?)</dl>',flags=re.DOTALL)\n name = re.compile('<code class=\"descclassname\">(?P<name>[^<]*)</code>')\n prefix = ''\n remain = string\n \n match = defs.search(remain)\n while match:\n prefix += remain[:match.start(1)]\n prefsub = ''\n remnsub = remain[match.start(1):match.end(1)]\n descrip = name.search(remnsub)\n if descrip:\n prefix += remnsub[:descrip.start()]\n prefix += remnsub[descrip.end():]\n prefix += remain[match.end(1):match.end(0)]\n else:\n prefix += remain[match.start(1):match.end(0)]\n remain = remain[match.end(0):]\n match = defs.search(remain)\n return prefix+remain", "def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):\n attrs = dict(base_attrs, **kwargs)\n if extra_attrs:\n attrs.update(extra_attrs)\n return attrs", "def dot_node_attrs(self):\n\n lbl_name = '%s' % self.format_name(True, True, 24)\n lbl_acc = '<font point-size=\"8.0\">%s</font>' % self.format_id()\n label = self.node_label_fmt % (self.url(), self.name,\n lbl_name, lbl_acc)\n\n node_attrs = {'label': label}\n return node_attrs", "def get_attribute_data(self, attrs):\n return {\n 'id': attrs['data-id'],\n }", "def prepare_node_attrs(self):", "def attr(*attrs: ATTRIBUTE) -> str:\n formatted = []\n for attr_ in attrs:\n if isinstance(attr_, str):\n formatted.append(attr_)\n elif isinstance(attr_, tuple) and len(attr_) == 2:\n formatted.append(f'{attr_[0]}=\"{attr_[1]}\"')\n else:\n raise ValueError(f\"Bad attribute: {attr_}\")\n return \" \".join(formatted)", "def attrsToString(self, attrs):\n string = \"\"\n # for every attribut\n for attr in attrs:\n # converts its name and value to string and adds this to string\n string += \" {}=\\\"{}\\\"\".format(attr[0], attr[1])\n # no exception!\n print(\"Das Attribut ist zu lang!\") if len(attr) > 2 else None\n return string", "def a_attr_dict (self) :\n return dict (href = self.abs_href)", "def handle_meta(self, tag, attrs):\n ad = {}\n for tup in attrs:\n ad[tup[0]] = tup[1]\n if 'name' in ad.keys() \\\n and 'keywords' == ad['name'] \\\n and 'content' in ad.keys():\n self.filetype = ad['content']\n if 'name' in ad.keys() \\\n and 'description' == ad['name']:\n self.description = 'present'\n if 'charset' in ad.keys():\n self.charset = 'present'", "def get_attrs(self):\n req_attrv = self._ptr.contents.attrv\n attrs = {}\n if bool(req_attrv):\n i = 0\n while 1:\n s = bytestostr(req_attrv[i])\n i += 1\n if s == None:\n break\n try:\n k, v = s.split(\"=\", 1)\n attrs[k] = v\n except:\n pass\n return attrs", "def add_attributes(self, attrs):\n self.attrs.add_container(attrs)", "def attkey_to_SVG_attribs(self,k):\n atts= k.split('@')\n o= ''\n acodes= {'C':'stroke','W':'stroke-width','S':'stroke-dasharray','O':'stroke-opacity'}\n for a in atts:\n if a[0] in acodes:\n o+= '%s=\"%s\" ' % (acodes[a[0]],a[1:])\n# elif a[0] == 'S': # Maybe do something special like this.\n# o+= 'stroke-dasharray=\"%\" ' % a[1:]\n return o", "def getAttrsDict(attrs):\r\n attrsDict = json.loads(re.sub('/\\\"(?!(,\\s\"|}))','\\\\\"',attrs).replace(\"\\t\",\" \").replace(\"\\n\",\" \")) if len(attrs)>0 else {}\r\n return attrsDict", "def getAttributeInfoDictionary(attr, format=None):\n format = format or _getDocFormat(attr)\n return {'name': attr.getName(),\n 'doc': renderText(attr.getDoc() or '', format=format)}", "def _get_annotation_data_attr(self, index, el):\r\n\r\n data_attrs = {}\r\n attrs_map = {\r\n 'body': 'data-comment-body',\r\n 'title': 'data-comment-title',\r\n 'problem': 'data-problem-id'\r\n }\r\n\r\n for xml_key in attrs_map.keys():\r\n if xml_key in el.attrib:\r\n value = el.get(xml_key, '')\r\n html_key = attrs_map[xml_key]\r\n data_attrs[html_key] = {'value': value, '_delete': xml_key}\r\n\r\n return data_attrs", "def handle_starttag(self, tag, attrs):\n forbidden_tags = ['data-srcset', 'srcset']\n if tag != 'a':\n attr = dict(attrs)\n self.links_text.append(attr)\n else:\n if tag not in forbidden_tags:\n attr = dict(attrs)\n self.links.append(attr)", "def parse_tag_attrs(tag_str, options_d=None, font_d=None, case=\"\", **kwargs):\n attr_b = kwargs.pop(\"attr\", \"\")\n auto_b = kwargs.pop(\"auto\", False)\n font_d = kwargs.pop(\"font_d\", font_d or {})\n options_d = kwargs.pop(\"options_d\", options_d or {})\n case = kwargs.pop(\"case\", case)\n widget = kwargs.pop(\"widget\", None)\n text_w = kwargs.pop(text_s, None)\n bad_opts = []\n # INTs: height repeatdelay repeatinterval underline width; size fun fov\n for keyval in split_attrs(tag_str):\n if \"=\" in keyval:\n key, val = keyval.split(\"=\")\n val = unquote(val)\n elif keyval:\n key, val = keyval, None\n else:\n continue\n key = key.lower()\n key2, key3, key4 = key[:2], key[:3], key[:4]\n lowval = val.lower() if val else val\n key = unalias(key)\n kalias = alias(key)\n if val == \"None\": # in ('False', 'None') #\n pass\n elif key3 in (\n bg_s,\n background_s[:3],\n fg_s,\n foreground_s[:3],\n ) or kalias in (bg_s, fg_s):\n options_d.update(**{key: val})\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n options_d.update(**{key: val})\n if auto_b and compound_s not in options_d:\n options_d.update(compound=tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],) or kalias == bd_s:\n options_d.update(borderwidth=val)\n elif key4 in (command_s[:4], compound_s[:4],) or kalias in (\n command_as,\n compound_as,\n ):\n options_d.update(**{key: val})\n elif (\n key2 in (height_s[:2], width_s[:2])\n or key3 in (repeatdelay_s[:3], repeatinterval_s[:3])\n or kalias\n in (height_as, width_as, repeatdelay_as, repeatinterval_as)\n ):\n options_d.update(**{key: int(val)})\n elif (\n key2 in (cursor_s[:2],)\n or key3 == font_s[:3]\n or kalias in (cursor_as, font_as)\n ):\n options_d.update(**{key: val})\n elif key2 in (\"r\", relief_s[:2],) or kalias == relief_as:\n options_d.update(relief=val)\n if auto_b and borderwidth_s not in options_d and val != tk.FLAT:\n options_d.update(borderwidth=str(1))\n elif key2 == underline_s[:2] or kalias == underline_as:\n options_d.update(underline=-1 if val is None else int(val))\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ) or kalias in (selectbackground_as, selectforeground_as):\n options_d.update(**{key: val})\n # special for fonts\n elif key2 in (family_s[:2],) or kalias == family_as:\n font_d[family_s] = val\n elif key2 in (size_s[:2],) or kalias == size_as:\n try:\n font_d[size_s] = int(val)\n except ValueError:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: ERROR Setting Font Size to %r\" % val,\n Raise=True,\n )\n elif key3 in (bold_as, tk_font.BOLD[:3]) or kalias == bold_as:\n font_d[weight_s] = (\n tk_font.BOLD\n if str(val) not in (\"0\", \"False\",)\n else tk_font.NORMAL\n )\n elif key2 in (weight_s[:2],) or kalias == weight_as:\n font_d[weight_s] = val\n elif key2 in (italic_as, tk_font.ITALIC[:2]) or kalias == italic_as:\n font_d[slant_s] = (\n tk_font.ITALIC\n if str(val) not in (\"0\", \"False\",)\n else tk_font.ROMAN\n )\n elif key2 in (slant_s[:2],) or kalias == slant_as:\n font_d[slant_s] = val\n elif (\n key3 in (funderline_as, funderline_s[:3])\n or kalias == funderline_as\n ):\n font_d[underline_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n elif (\n key3 in (foverstrike_as, foverstrike_s[:3])\n or kalias == foverstrike_as\n ):\n font_d[overstrike_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n # special \"case\" implementation\n elif key3 in (case_s[:3],) or kalias == case_as:\n for s in (upper_s, capitalize_s, lower_s, title_s, swapcase_s):\n if s.startswith(lowval):\n case = s if s != capitalize_s else upper_s\n break\n elif (\n key2 == upper_s[:2]\n or key3 in (capitalize_s[:3],)\n or kalias in (upper_as, capitalize_as)\n ):\n if str(val) not in (\"0\", \"False\",):\n case = upper_s\n elif key2 in (lower_s[:2],) or kalias == lower_as:\n if str(val) not in (\"0\", \"False\",):\n case = lower_s\n elif key2 == title_s[:2] or kalias == title_as:\n if str(val) not in (\"0\", \"False\",):\n case = title_s\n elif key2 == swapcase_s[:2] or kalias == swapcase_as:\n if str(val) not in (\"0\", \"False\",):\n case = swapcase_s\n elif key in ():\n bad_opts.append((key, val))\n else:\n options_d.update(**{key: val})\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n if attr_b:\n return (\n case\n if attr_b == case_s\n else options_d.get(attr_b, font_d.get(attr_b))\n )\n return options_d, font_d, case", "def __init__(self, attrs: Dict[str, Any]) -> None:\n self.attrs = attrs", "def get_attributes(self) -> Dict[str, str]:\n pass", "def add_attributes(self, attrs):\n self.attrs.add_attributes(attrs)", "def addattrs(field, my_attrs):\n my_attrs = my_attrs.split(',')\n my_attrs = dict([attr.split('=') for attr in my_attrs])\n return field.as_widget(attrs=my_attrs)", "def _checkTableAttr(self, attrs, prefix):\n if not attrs:\n return {}\n\n result = {}\n s = [] # we collect synthesized style in s\n for key, val in attrs.items():\n # Ignore keys that don't start with prefix\n if prefix and key[:len(prefix)] != prefix:\n continue\n key = key[len(prefix):]\n val = val.strip('\"')\n # remove invalid attrs from dict and synthesize style\n if key == 'width':\n s.append(\"width: %s\" % val)\n elif key == 'height':\n s.append(\"height: %s\" % val)\n elif key == 'bgcolor':\n s.append(\"background-color: %s\" % val)\n elif key == 'align':\n s.append(\"text-align: %s\" % val)\n elif key == 'valign':\n s.append(\"vertical-align: %s\" % val)\n # Ignore unknown keys\n if key not in self._allowed_table_attrs[prefix]:\n continue\n result[key] = val\n st = result.get('style', '').split(';')\n st = '; '.join(st + s)\n st = st.strip(';')\n st = st.strip()\n if not st:\n try:\n del result['style'] # avoid empty style attr\n except:\n pass\n else:\n result['style'] = st\n #logging.debug(\"_checkTableAttr returns %r\" % result)\n return result", "def _get_attrs_symbols():\n return {\n 'True', 'False', 'None', # those are identifiers in Python 2.7\n 'self',\n 'parent',\n 'id',\n 'uid',\n 'context',\n 'context_today',\n 'active_id',\n 'active_ids',\n 'allowed_company_ids',\n 'current_company_id',\n 'active_model',\n 'time',\n 'datetime',\n 'relativedelta',\n 'current_date',\n 'abs',\n 'len',\n 'bool',\n 'float',\n 'str',\n 'unicode',\n }", "def _arg_attr(identifier, attr1, attr2):\n return attr1 if identifier.startswith('t') else attr2", "def replace_tag_attributes(code_attrs, tag, tag_attrs):\n\n new_attrs = code_attrs.copy()\n for key, value in tag_attrs.items():\n if key in new_attrs:\n new_attrs[key] = new_attrs[key].replace(tag, value)\n\n return new_attrs", "def extensible_attributes():\n return 'extensibleattributedef?'", "def unknown_starttag(self, tag, attrs):\n starttrs = \"\".join(['%s=\"%s\"' % (key, value) for key, value in attrs])\n self.pieces.append(\"<%(tag)s %(starttrs)s>\" % locals())", "def date_attrs(name):\n attrs = battrs(name)\n attrs.update({'class': 'form-control datepicker'})\n return attrs", "def tag(func):\n @functools.wraps(func)\n def wrapper(**kwargs):\n\n name = func.__name__\n\n if kwargs:\n \n try:\n\n check_text = kwargs['text']\n del kwargs['text']\n \n \n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n open('index.html', 'a+').write(f\"{check_text}\")\n open('index.html', 'a+').write(f\"</{name}>\")\n\n except KeyError:\n\n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n else:\n\n open('index.html', 'a+').write(f\"\\n<{name}>\")\n\n\n func(**kwargs)\n \n return wrapper", "def read_attribs(self):\n\n attribs = {}\n while self.index < self.length:\n self.ignore_whitespaces()\n if self.xtext[self.index] == '>':\n break\n name = self.read_until('=')\n self.index += 1\n self.read_until('\"')\n self.index += 1\n value = self.read_until('\"')\n self.index += 1\n\n attribs[name] = value\n\n return attribs", "def assign_attrs(elem, attrs):\n for k, v in attrs:\n # assign attr k with v\n # override class\n elem.set(sanitize_name(k), v)", "def extract_request(self, attrs):\n try:\n _base = self.attributes()\n _new_dict = {}\n for key in attrs:\n if key in _base:\n _new_dict[key] = attrs[key]\n return _new_dict\n except Exception as e:\n print(e)", "def transform(attrs: dict) -> dict:\n\n pass", "def strpatt(self, name):\n return name.replace(\"att.\", \"\")", "def _attribs(self, name=None, description=None):\n a = {}\n if name:\n a['name'] = name\n if description:\n a['description'] = description\n return a", "def dict_with_attrs2(*args):\n class CustomDict(object):\n __slots__ = args\n __dict__ = {}\n\n def __init__(self, *args, **kwargs):\n super(CustomDict, self).__init__()\n if args:\n self.__dict__.update(*args)\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __getattr__(self, name):\n return self.__dict__[name]\n\n return CustomDict", "def gen_tag_attrs(widget=None, options_d=None, font=None, case=None, **kwargs):\n auto_b = kwargs.get(\"auto\", False)\n case = kwargs.get(case_s, case)\n extend_b = kwargs.get(\"extend\", False)\n font = kwargs.pop(\"font\", font or {})\n index_i = kwargs.pop(\"index\", None)\n kmode_s = kwargs.get(\"kmode\", \"\") # a=alias, o=options, ''=unchanged\n options_d = kwargs.pop(\"options\", options_d or {})\n pare_b = kwargs.get(\"pare\", True)\n widget = kwargs.pop(\"widget\", widget)\n text_w = kwargs.get(text_s, None)\n recurse_b = kwargs.pop(\"recurse\", widget and isinstance(widget, TTWidget))\n fmt_s = \"\"\n font_d = {}\n w_font_d, w_options_d = {}, {}\n if index_i is not None and widget is None:\n raise Exception(\"Cannot set 'index' when 'widget' is None\")\n if widget: # and isinstance(widget, TTWidget): #\n excludes_t = () if widget.emulation_b else ()\n w_options_d = {\n k: v[-1]\n for k, v in widget.config().items()\n if len(v) == 5 and str(v[-1]) != str(v[-2]) and k not in excludes_t\n }\n try:\n w_options_d[case_s] = widget.case\n except AttributeError:\n pass\n w_font = widget.cget(font_s) # w_options_d.pop(font_s, None)\n w_font_d = get_font_dict(w_font) if w_font else {}\n if pare_b and w_font_d:\n def_w_font = widget.config(font_s)[-2]\n def_w_font_d = get_font_dict(def_w_font)\n w_font_d = pare_dict(w_font_d, def_w_font_d)\n if font:\n if isinstance(font, str):\n try:\n font = tk_font.nametofont(font)\n except tk.TclError:\n pass\n elif type(font) in (list, tuple):\n font = tk_font.Font(font=font)\n if isinstance(font, tk_font.Font):\n font = font.actual()\n if isinstance(font, dict):\n font_d = font\n if case: # is not None:\n options_d = _merge_dicts(options_d, dict(case=case))\n d = _merge_dicts(\n w_options_d,\n convert_font_dict_to_ttoptions_dict(w_font_d),\n options_d,\n convert_font_dict_to_ttoptions_dict(font_d),\n kwargs,\n )\n bad_opts = []\n for key, val in d.items():\n key = key.lower()\n if key in (\"auto\", \"extend\", \"kmode\", \"pare\",): # text_s, ): #\n continue\n key2, key3, key4 = key[:2], key[:3], key[:4]\n kalias = alias(key)\n koption = unalias(key)\n if kmode_s:\n if kmode_s[0] == \"a\": # alias\n keyout = kalias\n kfunc = alias\n auto_cpd, auto_bd = compound_as, bd_s\n elif kmode_s[0] == \"o\": # option\n keyout = koption\n kfunc = unalias\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n else:\n keyout = key\n kfunc = str\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n if val:\n val = quote(val)\n if (\n key3 in (bg_s, background_s[:3], fg_s, foreground_s[:3])\n or key2 == underline_s[:2]\n or kalias in (bg_s, fg_s, underline_as)\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_cpd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_cpd, tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],):\n if \"%s=%s \" % (auto_bd, 1) in fmt_s:\n if val != 1:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_bd, 1), \"%s=%s \" % (keyout, val)\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key4 in (compound_s[:4],) or kalias == compound_as:\n if \"%s=%s \" % (auto_cpd, tk.CENTER) in fmt_s:\n if val != tk.CENTER:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_cpd, tk.CENTER),\n \"%s=%s \" % (keyout, val),\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == cursor_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == font_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, get_named_font(val))\n elif key2 in (relief_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_bd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_bd, 1)\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sbd_s,\n selectborderwidth_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n # special for fonts\n elif key2 in (family_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (size_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (weight_s[:2],):\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.BOLD),\n 1\n if isinstance(val, str) and val.lower() == tk_font.BOLD\n else 0,\n )\n elif key2 == slant_s[:2]:\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.ITALIC),\n 1\n if isinstance(val, str) and val.lower() == tk_font.ITALIC\n else 0,\n )\n elif key3 in (funderline_as, funderline_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(funderline_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n elif key3 in (foverstrike_as, foverstrike_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(foverstrike_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n # special \"case\" implementation\n elif key3 == case_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(case_s), val)\n elif key2 == upper_s[:2] or key3 == capitalize_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(upper_s), val)\n elif key2 in (lower_s[:2], title_s[:2], swapcase_s[:2]):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key in ():\n bad_opts.append((key, val))\n elif key in (text_s, text_as):\n if extend_b or widget:\n fmt_s += \"%s=%s \" % (keyout, val)\n else:\n # bad_opts.append((key, val))\n fmt_s += \"%s=%s \" % (keyout, val)\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n fmt = fmt_s.strip()\n if widget and isinstance(widget, TTWidget) and recurse_b:\n fmt = [\n fmt,\n ]\n for _, gathering in widget._get_kids(items=True):\n child = gathering[\"label\"]\n case = gathering.get(case_s, \"\")\n kid_options = {\n k: v[-1]\n for k, v in child.config().items()\n if len(v) == 5\n and str(v[-1]) != str(v[-2])\n and (k, v[-1]) not in w_options_d.items()\n and not (k in label_override_d and str(v[-1]) == \"0\")\n } #\n cf = kid_options.pop(font_s, None)\n cdf = child.config(font_s)[-2]\n if cf != cdf:\n c_font_d = pare_dict(get_font_dict(cf), get_font_dict(cdf))\n else:\n c_font_d = {}\n if case:\n kid_options.update(case=case)\n fmt.append(\n gen_tag_attrs(options=kid_options, font=c_font_d, **kwargs)\n )\n return fmt if index_i is None else fmt[index_i]", "def proc_attr(inp):\n dic = {}\n for att in inp.attrs.keys():\n if getattr(inp.attrs[att], \"dtype\", None) is None:\n dic[att] = inp.attrs[att]\n elif inp.attrs[att].dtype.char == 'S':\n dic[att] = [\n x.strip() for x in inp.attrs[att].tostring().decode('ascii').split(',')\n ]\n else:\n dic[att] = (\n inp.attrs[att][0]\n if isinstance(inp.attrs[att],np.ndarray) and\n inp.attrs[att].size==1\n else inp.attrs[att]\n )\n return dic\n pass", "def _iterattrs(self, handle=\"\"):\n if not handle:\n handle = self.handle\n attr = gv.firstattr(handle)\n while gv.ok(attr):\n yield gv.nameof(attr), decode_page(gv.getv(handle, attr))\n attr = gv.nextattr(handle, attr)", "def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}", "def parse_tag_attrs(\n self, tags_str, options_d=None, font_d=None, case=\"\", **kwargs\n ):\n return parse_tag_attrs(\n tags_str,\n options_d,\n font_d,\n case,\n widget=self,\n text=getattr(self, \"debug_text\", None),\n **kwargs\n )", "def set_attrs(self, username, attrs):\n pass", "def attributes(table,attrs): \n if isinstance(table,Table):\n table.html_attributes = attrs\n return table", "def _attrs_map(self) -> \"dict[int, str]\":\n return {i: attr.name for i, attr in enumerate(self._attrs())}", "def extractAttrs(obj, justLabel=False, dictName=''):\n return extractAttrsCore(obj, {}, justLabel, dictName)", "def get_html_element_attributes(self):\n html_element_attributes = {\n 'class': self.css_classes or False, # Fall back to false to avoid class=\"\"\n }\n if self.should_render_as_link():\n html_element_attributes['href'] = self.url\n return html_element_attributes", "def add_attributes(self, attrs):\n for attr in attrs:\n self.add_attribute(attr)", "def transform_attributes(attrs):\n transformed = {}\n for key, value in attrs.items():\n if key in [\"raw_message\", \"text\"]:\n transformed[\"raw_content\"] = value\n elif key in [\"diaspora_handle\", \"sender_handle\", \"author\"]:\n transformed[\"handle\"] = value\n elif key == \"recipient_handle\":\n transformed[\"target_handle\"] = value\n elif key == \"parent_guid\":\n transformed[\"target_guid\"] = value\n elif key == \"first_name\":\n transformed[\"name\"] = value\n elif key == \"image_url\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"large\"] = value\n elif key == \"image_url_small\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"small\"] = value\n elif key == \"image_url_medium\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"medium\"] = value\n elif key == \"tag_string\":\n transformed[\"tag_list\"] = value.replace(\"#\", \"\").split(\" \")\n elif key == \"bio\":\n transformed[\"raw_content\"] = value\n elif key == \"searchable\":\n transformed[\"public\"] = True if value == \"true\" else False\n elif key == \"target_type\":\n transformed[\"entity_type\"] = DiasporaRetraction.entity_type_from_remote(value)\n elif key == \"remote_photo_path\":\n transformed[\"remote_path\"] = value\n elif key == \"remote_photo_name\":\n transformed[\"remote_name\"] = value\n elif key == \"status_message_guid\":\n transformed[\"linked_guid\"] = value\n transformed[\"linked_type\"] = \"Post\"\n elif key in BOOLEAN_KEYS:\n transformed[key] = True if value == \"true\" else False\n elif key in DATETIME_KEYS:\n try:\n # New style timestamps since in protocol 0.1.6\n transformed[key] = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n # Legacy style timestamps\n transformed[key] = datetime.strptime(value, \"%Y-%m-%d %H:%M:%S %Z\")\n elif key in INTEGER_KEYS:\n transformed[key] = int(value)\n else:\n transformed[key] = value or \"\"\n return transformed", "def get_attributes(doc):\n\treturn doc.keys()", "def gen_tag_attrs(self, *a, **kw):\n if kw.get(\"widget\", sentinel) is not None:\n raise Exception(\n \"TTToolTip.gen_tag_attrs(): 'widget' keyword must be set\"\n \" to None\"\n )\n return gen_tag_attrs(None, *a, **kw)", "def modify_html(html):\n def modify_attr(attr):\n nonlocal html\n html = re.sub(\n r'(?<=' + attr + r'=\")(?!/)',\n '/static/bundle/',\n html\n )\n modify_attr('src')\n modify_attr('href')\n return html", "def bind_attrs(cls, params: Dict, metadata: XmlMeta, attrs: Dict, ns_map: Dict):\n\n if not attrs:\n return\n\n for qname, value in attrs.items():\n var = metadata.find_attribute(qname)\n if var and var.name not in params:\n cls.bind_attr(params, var, value, ns_map)\n else:\n var = metadata.find_any_attributes(qname)\n if var:\n cls.bind_any_attr(params, var, qname, value, ns_map)", "def applyAttrs(data, attrs):\n\tassert(len(data[0]) == len(attrs) + 1)\n\tnum_attrs = len(attrs)\n\tnum_instances = len(data)\n\n\tout = [None] * len(data)\n\tfor row in range(num_instances):\n\t\tinstance = data[row]\n\t\tout[row] = [instance[0]] + ['?' if instance[i+1] == '?' else attrs[i]['vals'][int(instance[i+1])] for i in range(num_attrs)]\n\n\treturn out", "def split_attrs(s, *a, **kw):\n return split_attrs(s, *a, **kw)", "def attrs(self):\n return list(name for name in self.__dict__\n if not name.startswith(\"_\"))", "def _build_kwargs(element, plugin):\n lookup_table = PrefixLookupDict(plugin['args'])\n kwargs = {}\n for attr in element.attributes:\n if attr.name in lookup_table:\n kwargs[lookup_table[attr.name]] = attr.value\n element.removeAttribute(attr.name)\n return kwargs", "def attr(elem, attr):\n try:\n return elem[attr]\n except:\n return \"\"", "def attr(elem, attr):\n try:\n return elem[attr]\n except:\n return \"\"", "def catch_unquoted_attrs(self, text, attrlist):\n for tup in attrlist:\n (an, av) = tup\n rgx = \"%s\\s*=\\s*\" % (an) \\\n + \"['\" \\\n + '\"]%s[\"' % (re.escape(av)) \\\n + \"']\"\n q = re.search(rgx, self.unescape(text))\n if q == None:\n self.errmsg(\"unquoted attribute in '%s'\" % (text))", "def _process_attrs(attrs):\n new_attrs = OrderedDict()\n for attr in attrs:\n col = attr\n if isinstance(attr, tuple):\n col, attr = attr\n # special cases\n if attr == 'class_name':\n attr = '__class__.__name__'\n if attr == 'repr':\n attr = repr\n new_attrs[col] = attr\n\n return new_attrs", "def init_attrs(self):\n raise NotImplementedError", "def extra_from_record(self, record):\n return {\n attr_name: record.__dict__[attr_name]\n for attr_name in record.__dict__\n if attr_name not in BUILTIN_ATTRS\n }", "def get_switched_form_field_attrs(self, prefix, input_type, name):\n attributes = {'class': 'switched', 'data-switch-on': prefix + 'field'}\n attributes['data-' + prefix + 'field-' + input_type] = name\n return attributes", "def format_attributes(attributes):\n return ';'.join([k + '=' + v for k, v in attributes.items()])", "def parse_any_attribute(cls, value: str, ns_map: Dict) -> str:\n prefix, suffix = text.split(value)\n if prefix and prefix in ns_map and not suffix.startswith(\"//\"):\n value = build_qname(ns_map[prefix], suffix)\n\n return value", "def add_attr(self, section_name: str, attr_name: str, value: str) -> None:\n pass", "def _parse_attr(self, attr_proto):\n attrs = {}\n for key, value in attr_proto.items():\n attrs[key] = self._get_attr(value)\n\n return attrs", "def _get_var_attrs(var):\n\n generic_dict = {'instrument': '', 'valid_range': (-1e+35,1e+35),\n 'missing_value': -9999, 'height': '',\n 'standard_name': '', 'group_name': '',\n 'serial_number': ''}\n\n generic_dict.update(attrs_dict[var])\n return generic_dict" ]
[ "0.7300376", "0.6683452", "0.6659504", "0.64727515", "0.64727515", "0.62650347", "0.6180911", "0.6167985", "0.6159097", "0.61425793", "0.60942423", "0.60043186", "0.59867054", "0.5983518", "0.5977403", "0.59542763", "0.5950838", "0.59435284", "0.593862", "0.5918919", "0.5918368", "0.59109366", "0.5907472", "0.59048265", "0.590289", "0.58816016", "0.5822607", "0.58213097", "0.58151597", "0.58151597", "0.58102083", "0.5772015", "0.5729163", "0.5698077", "0.56733686", "0.56685674", "0.56532747", "0.56465954", "0.56446564", "0.5634013", "0.56172764", "0.56169415", "0.5576985", "0.55390626", "0.5528054", "0.5517172", "0.55077755", "0.5504294", "0.5486108", "0.5481757", "0.54804087", "0.54701954", "0.5464042", "0.545664", "0.5456021", "0.5441045", "0.54372007", "0.5434729", "0.5433024", "0.5427747", "0.54269344", "0.540738", "0.5404263", "0.53995836", "0.53959", "0.5379427", "0.53749627", "0.53729516", "0.53649145", "0.53607893", "0.53446513", "0.5334334", "0.5331949", "0.5330269", "0.5327214", "0.5325136", "0.5323124", "0.53130394", "0.5312749", "0.53097475", "0.5296279", "0.5292913", "0.52825433", "0.5277434", "0.5275497", "0.5268138", "0.5265727", "0.5264679", "0.52634853", "0.5256518", "0.5256518", "0.5238555", "0.5236359", "0.52302825", "0.5229071", "0.5229068", "0.52253085", "0.5222722", "0.5218228", "0.5216617", "0.5216027" ]
0.0
-1
Called by base init, after class change or format text change
def initFormat(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_text(self):\n d = self.declaration\n if d.text:\n self.set_text(d.text)\n if d.text_color:\n self.set_text_color(d.text_color)\n if d.text_alignment:\n self.set_text_alignment(d.text_alignment)\n if d.font_family or d.text_size:\n self.refresh_font()\n if hasattr(d, 'max_lines') and d.max_lines:\n self.set_max_lines(d.max_lines)", "def initFormat(self):\n self.formatList = self.splitText(self.format)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, text):\n\n self.text = text", "def __init__(self, text):\n self.text = text", "def __init__(self, text):\n self.text = text", "def __init__(self):\n self.text = ''", "def set_text(self):\n pass", "def post_init(self):\n\t\tpass", "def init_widget(self):\n super(UiKitTextView, self).init_widget()\n self.init_text()", "def __init__(self,\n text: str) -> None:\n\n super().__init__(text)", "def initFormat(self):\n self.html = True", "def initFormat(self):\n self.html = True", "def initFormat(self):\n self.formatList = []", "def _post_init(self):\n pass", "def __post_init__(self):\n pass", "def initWidgets(self):\n self.lambdtext.setText(str(self.lambd))\n self.ptext.setText(str(self.p))", "def done_adding_strings(self):\n #placeholder in case there's some additional init we need to do.\n pass", "def done_adding_strings(self):\n #placeholder in case there's some additional init we need to do.\n pass", "def _init_display(self):\n raise NotImplementedError", "def __init__(self):\n super(Command, self).__init__()\n self.style.TITLE = self.style.SQL_FIELD\n self.style.STEP = self.style.SQL_COLTYPE\n self.style.ITEM = self.style.HTTP_INFO\n disconnect_objectapp_signals()", "def __init__(self, font='mediumbold'):\n\tself.set_font(font)", "def __init__(self):\r\n self.label = \"Bulk Layout Text Replace\"\r\n self.alias = \" Jake's Toolbox Alias Property True\"\r\n self.description = \"\"\r\n self.canRunInBackground = False", "def __post_init__(self):\n super().__post_init__()", "def __init__(self):\n super(Command, self).__init__()\n self.style.TITLE = self.style.SQL_FIELD\n self.style.STEP = self.style.SQL_COLTYPE\n self.style.ITEM = self.style.HTTP_INFO\n disconnect_gstudio_signals()", "def after_parsing(self):", "def __init__(self, as_text=False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.as_text = as_text", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def on_origEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()\n self.__updateTranslateButton()", "def after_init(self) -> None:\n if self.options.format.lower() != \"default_notebook\":\n self.error_format = self.options.format\n if not hasattr(self, \"color\"):\n self.color = True", "def afterInit(self):", "def post_init(self, msg='hello'):\n print(\"post init ! height=%s, color=%s, msg=%s\" % (self.height, self.color, msg))\n self.non_field_attr = msg", "def __init__(self, name, time, text):\n pass", "def init(self):", "def init(self):", "def __init__(self,txt=u'',unicodeEncoding='utf-8',verbose=False,tagID=0):\n # __document capture the document level structure\n # for each sentence and then put in the archives when the next sentence\n # is processed\n super(ConTextMarkup,self).__init__(__txt=None,__rawTxt=txt,\n __SCOPEUPDATED=False,__VERBOSE=verbose,\n __tagID=tagID,\n __unicodeEncoding=unicodeEncoding)\n self.__cleanText()", "def __init__(self):\n\t\t# Setup fonts\n\t\tself.large_font = self._get_font(1,Annotator.THICK)\n\t\tself.large_font_outline = self._get_font(1,Annotator.THICK + Annotator.BORDER)\n\t\t\n\t\tself.small_font = self._get_font(0.5,Annotator.THIN)\n\t\tself.small_font_outline = self._get_font(0.5,Annotator.THIN + Annotator.BORDER)\n\t\t\n\t\t# Text colour\n\t\tself.colour = Annotator.COLOUR_BUSY\n\t\t\n\t\tself.forehead = (0,0,1,1)\n\t\tself.face = (0,0,1,1)", "def onInit(self):\n pass", "def _afterInit(self):\n pass", "def __init__(self, **kwargs):\n # We set it to True so that starting empty lines are\n # not counting as separators\n self.last_line_was_empty = True", "def _post_init(self) -> None:\n return", "def _init(self):", "def update_editor ( self ):\n super( SimpleFontEditor, self ).update_editor()\n set_font( self )", "def __init__(self, text=\"\", widget=None):\n self._label_text = text\n self._widget = widget\n self._widget.on_change = self._update\n super().__init__(text=f\"{text} {widget.value}\")", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self): \r\n pass", "def init_widget(self):", "def __init__(self):\n ## Global initialization\n self.default_initialization()\n ## Initial function set\n self.selfdriven = False\n self._format_default_functions()\n ## Check descriptormodel\n self._assert_correctness()", "def __init__(self):\n self.content = \"\"", "def setInitDefault(self, editText):\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def initWidgets(self):\n self.loctext.setText(\"{0:g}\".format(self.loc))\n self.scaletext.setText(\"{0:g}\".format(self.scale))", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def initDocTagText(self):\n self.doc, self.tag, self.text = Doc().tagtext()", "def __init__(\n self,\n type,\n text):\n self.type = type\n self.text = text", "def _init(self):\n pass", "def _initialize(self):\n \n self.view.lineEdit_3.setText(\"C,H,N,O,P,S\")\n self.view.spin_hit.setValue(20)\n self.view.lineEdit_2.setValue(10.)\n self.view.checkBox_8.setChecked(True)", "def format(self):\n ...", "def init(self) -> None:", "def update_editor ( self ):\n super( TextFontEditor, self ).update_editor()\n set_font( self )", "def __init__(self, msg='hello'):\n print(\"post init ! height=%s, color=%s, msg=%s\" % (self.height, self.color, msg))\n self.non_field_attr = msg", "def __init__(self):\n\t\tprint(\"Class initilised\")", "def __init__(self, text='', **kwargs):\n Control.__init__(self, text=text, **kwargs)", "def on_transEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()", "def __init__(self, text, idx):\n self.text = text\n self.idx = idx", "def __init__(self):\n self.update_state()", "def set_initial_values(self):\n #Stores each line of the text file in a list\n self.text = []\n \n #Scrolling distance\n self.scroll = 0\n\n #Zooming level (font size) \n self.zoom = 12\n\n #Factor by which is decrement self.zoom\n self.factor = 0\n\n #Number of tabs spaces before a line\n self.indent = 0\n\n #Flag to only set up pango descriptions only once \n self.set_pc = 1\n\n #list of indetation level of all lines\n self.tab_index = []\n\n #Total line count\n self.line_count = 0\n\n #line number of line rendered off top of window \n self.min_text = 0\n #line number of line rendered off bottom of window \n self.max_text = 50\n\n #y position for cairo for the text at the top\n self.min_cairo = 20\n\n #y position for text at bottom\n self.max_cairo = 20\n\n #x positiong for indented text\n self.tab_cairo = 20", "def __init__(self):\n fmt = \"%(message)s\"\n super().__init__(fmt=fmt)\n\n self.baseline = None\n self.cut = None\n self.manual_push = 0", "def setInitDefault(self, editText):\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def __init__(self, text):\n self.text = text\n self.letters = [letters[c] for c in self.text]\n self.width = sum(let.width + 1 for let in self.letters)\n self._offset = width\n self.is_done = False", "def __init__(self, text=None, settings=None, style='General', language='en'):\n\n self._text = None\n self._settings = None\n self._style = None\n self._language = None\n\n self.text = text\n self.settings = settings\n self.style = style\n self.language = language", "def init(self) -> None:\n self.started = False\n self.lines = []\n self.text = ''\n self.graphics = ''\n self.ids = {}\n self.first_line_added = False\n\n self.used_fonts = set()\n self.current_line_used_fonts = set()\n self.current_height = 0\n self.lines = []\n\n line_width = self.width - (self.indent if self.is_first_line else 0)\n self.current_line = PDFTextLine(\n self.fonts, line_width, self.text_align, self.line_height\n )\n\n self.last_indent = 0\n self.last_state = self.last_factor = self.last_fill = None\n self.last_color = self.last_stroke_width = None\n\n self.y_ = 0", "def _settext(self, textEntered):\n if textEntered.strip() == '':\n textEntered=self.data['initialtext']\n self.entry.enterText(textEntered)\n else:\n if callable(self.data['callback']): self.data['callback'](textEntered)\n if self.data['autoexit'] and callable(self.data['exit']):\n # NOTE not safe to call here user callback...\n taskMgr.doMethodLater(.5, self.data['exit'], '_ntryxt')", "def __init__(self, edit: QtWidgets.QTextEdit, out=None, color=None):\n self.edit = edit\n self.out = out\n self.color = color", "def on_load(self):\n self.__init__()", "def __init__():", "def __init__(self) -> None:\n str.__init__(self)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._ansi_escape_codes = True", "def do_init(self):\n\n pass", "def initialize(self):\n\t\tpass", "def run_init(self):\n InitEditor(self.root, self)", "def __init(self):\n print(\"Welkam tu mobail lejen\")", "def __init__(self, text, tag, start ,end):\n\n self.text = six.text_type(text)\n self.tag = copy.copy(tag)\n self.end = end\n self.start = start" ]
[ "0.70883477", "0.6957401", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6801035", "0.67764556", "0.67764556", "0.6772573", "0.67218834", "0.6665987", "0.6530844", "0.6495981", "0.6494592", "0.6494592", "0.6490198", "0.6401653", "0.6355695", "0.63224435", "0.627716", "0.627716", "0.62600374", "0.6241324", "0.6241043", "0.6223984", "0.6216441", "0.6214059", "0.62072545", "0.6179023", "0.61773074", "0.6165903", "0.6150355", "0.61494476", "0.6145963", "0.6123563", "0.6106276", "0.6106276", "0.61052555", "0.6075407", "0.606871", "0.60595924", "0.6050179", "0.6039118", "0.6025508", "0.60182106", "0.60180503", "0.5996569", "0.5996569", "0.5996569", "0.5996569", "0.5993615", "0.5956698", "0.59549457", "0.59410423", "0.5936671", "0.5926797", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.5922803", "0.59159535", "0.59074825", "0.59036523", "0.59019417", "0.5898051", "0.58926487", "0.5887501", "0.5887218", "0.58803314", "0.5877826", "0.5868464", "0.58638364", "0.5862526", "0.58605254", "0.5853759", "0.5833662", "0.58296865", "0.5820315", "0.5815491", "0.58068454", "0.579537", "0.57909584", "0.57830495", "0.5776756", "0.5769101", "0.5765869", "0.5761965", "0.5755533", "0.57552737" ]
0.7095915
0
Assign other field's parameters to this field
def duplicateSettings(self, otherField): self.name = otherField.name self.enName = otherField.enName self.format = otherField.format self.prefix = otherField.prefix self.suffix = otherField.suffix self.html = otherField.html self.isRequired = otherField.isRequired self.hidden = otherField.hidden self.numLines = otherField.numLines self.initDefault = otherField.initDefault self.linkAltField = otherField.linkAltField self.parentLevel = otherField.parentLevel self.useFileInfo = otherField.useFileInfo self.showInDialog = otherField.showInDialog
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _assign_fields_to_params(cls, fields, params):\n if fields is None:\n fields = cls.get_default_read_fields()\n if fields:\n params['fields'] = ','.join(fields)", "def set(self, other):\n if self.id is None:\n self._values = other\n else:\n if type(other) is float:\n self._set_val(other)\n if type(other) is _np.ndarray:\n self._set_array(other)\n if _com.isField(other):\n self._set_array(other.get())", "def populate(self, **kw):\n for name, field in self:\n if name in kw:\n field.__set__(self, kw[name])", "def set_specific_fields(self):\n raise NotImplementedError(\"Must be defined by subclass!\")", "def copy_values(self, another):\n\n # Copy all value, uncertainty, and source information from the other\n # ExoParameter object.\n if isinstance(another, ExoParameter):\n self.reference = another.reference\n self.uncertainty = another.uncertainty\n self.uncertainty_lower = another.uncertainty_lower\n self.uncertainty_upper = another.uncertainty_upper\n self.units = another.units\n self.url = another.url\n self.value = another.value\n else:\n raise TypeError(\"Cannot copy values from a non-ExoParameter obj!\")", "def fill(self, other):\n if self.stream_id is None:\n self.stream_id = other.stream_id\n\n if self.type is None:\n self.type = other.type\n\n if self.length is None:\n self.length = other.length\n\n if self.timestamp is None:\n self.timestamp = other.timestamp\n\n assert self.stream_id is not None\n assert self.type is not None\n assert self.length is not None\n assert self.timestamp is not None\n assert self.object_id is not None", "def set_parameter_values(self, c1, c2):\n self.c1 = c1\n self.c2 = c2", "def set_params(self,**kwargs):\n for key in kwargs:\n setattr(self, key, kwargs[key])", "def hard_update(self,target, source):\n\t\tfor target_param, param in zip(target.parameters(), source.parameters()):\n\t\t\t\ttarget_param.data.copy_(param.data)", "def set_field( self, data ):\n self.val[:] = data[:]\n return", "def __init__(self, *args, **kwargs):\n self.update(*args, **kwargs)", "def update_params(self, other):\n if isinstance(other, Params):\n found = False\n for key, param in other._src.items():\n if key in self._src:\n self._src[key] = param\n found = True\n\n if not found:\n raise RuntimeError(\n \"Tried to set parameters which do not exist in the target model.\"\n )\n else:\n raise RuntimeError(\"Attempt to stream non-parameter list to parameter list.\")", "def _set_params(self,x):\r\n self.k1._set_params(x[:self.k1.num_params])\r\n self.k2._set_params(x[self.k1.num_params:])", "def _set_params(self,x):\r\n self.k1._set_params(x[:self.k1.num_params])\r\n self.k2._set_params(x[self.k1.num_params:])", "def updateFromContext(self, other):\n value = self.valueType.set(self.value, other.value)\n self.set(value)\n self.origins.extend(other.origins)", "def set(self, **kwargs):\n field_names = self.get_field_names()\n for name, value in kwargs.iteritems():\n if name in field_names:\n setattr(self, name, value)", "def part(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)", "def _populate(self, fields):\n schema = self.schema\n for k, v in fields.items():\n fields[k] = schema.fields[k].iget(self, v)\n\n self.modify(fields)\n self.reset_modified()", "def set_params(self, **kwargs):\n ...", "def set_params(self):\r\n pass", "def setValue(self,val):\n for f,v in zip(self.fields,val):\n f.setValue(v)", "def set_params(self, params):", "def update_params(self):\n pass", "def _set_attributes(self):", "def update(self, other):\n fields = None\n if isinstance(other, dict):\n fields = other\n elif isinstance(other, Torrent):\n fields = other.fields\n else:\n raise ValueError('Cannot update with supplied data')\n for k, v in fields.iteritems():\n self.fields[k.replace('-', '_')] = v", "def set_fields(self, fields: FieldDict):\n super().set_fields(fields)\n # bind fields to attrs\n for attr in ('a', 'b'):\n setattr(self, f'field_{attr}', self.fields[getattr(self, attr)])\n # get error messages\n dump_error = self.error_cls(self.get_error_message(\n self.op, a=self.field_a.dump_source, b=self.field_b.dump_source))\n load_error = self.error_cls(self.get_error_message(\n self.op, a=self.field_a.load_source, b=self.field_b.load_source))\n # set partial arguments for `validate`\n self.validate_dump = partial(\n self.validate,\n a_key=self.field_a.dump_target,\n b_key=self.field_b.dump_target,\n error=dump_error)\n self.validate_load = partial(\n self.validate,\n a_key=self.field_a.load_target,\n b_key=self.field_b.load_target,\n error=load_error)", "def _copy(self, p):\n ASParameters._copy(self, p)\n if isinstance(p, self._get_class()):\n if self._has(\"theta\"):\n p._.theta = self._.theta\n if self._has(\"omega\"):\n p._.omega = copy(self._.omega)\n elif self._has(\"omega\"):\n if isinstance(p, PolyASParameters):\n p._.omega = self._.omega.transpose()\n elif not p._has(\"P\") and not p._has(\"Q\"):\n p._.P = self.eigenmatrix()\n p._.antipodal = self._.antipodal\n p._.bipartite = self._.bipartite\n if self._has(\"r\"):\n p._.r = self._.r\n if self._has(\"antipodal_subscheme\"):\n p._.antipodal_subscheme = self._.antipodal_subscheme\n if self._has(\"bipartite_subscheme\"):\n p._.bipartite_subscheme = self._.bipartite_subscheme", "def paramodulant(self, other):\n return _coconut_tail_call((sub_once), other, {self.a: self.b, self.b: self.a})", "def set_params(self):\n raise NotImplementedError", "def __init__(self, **variables):\n vars(self).update(variables)", "def set_params(self, *arg):\n pass", "def update(self, other):\n if isinstance(other, ParameterDict):\n for key, value in other.items():\n self._type_converter[key] = other._type_converter[key]\n self._dict[key] = value\n else:\n for key, value in other.items():\n self[key] = value", "def _set_data(self, new_data):\n for name, field in self._get_fields().items():\n if name in new_data:\n try:\n setattr(self, f\"__{name}\", field.from_raw(new_data[name]))\n except (fields.ValidationError, ValueError):\n # should at least log validation and value errors\n # this can happen in case of e.g. fields type change\n pass", "def define_parameters(self):", "def field(self, field):\n\n self._field = field", "def set_related_params(self,request,responsedata):\n pass", "def set_field(self,Hext):\n self.raw_parameters[\"Hext\"] = Hext\n self.parameters = NormalizedParameters(self.raw_parameters)\n self._load()", "def _set_model_field(self):\n self._field_value = hutils.format_json(self._memory_data)\n setattr(self._model, self._field, self._field_value)", "def params(self,new):\n self._params = new\n self._config_set()\n self._make_model()", "def __post_init__(self):\n # Only do this if source_data already exists (not during its own initialization)\n if \"SOURCE_DATA\" in globals():\n for data_field in fields(self):\n setattr(self, data_field.name, getattr(SOURCE_DATA, data_field.name))", "def _update_params(self):\n pass", "def fields(self, fields):\n\n self._fields = fields", "def field_values(self, field_values):\n\n self._field_values = field_values", "def _initializeRequestField(self,field,referenceField):\n\t\tvaluesDict = referenceField.values\n\t\tfield.initialize_values(valuesDict)\n\t\t\n\t\tpass", "def fill(self, **kwargs):\r\n for name in kwargs.keys():\r\n setattr(self, name, kwargs[name])\r\n return self", "def full_init_self(self, db, field_name, model):\n if not self.db:\n self.__class__.db = db\n\n self.field_name = field_name\n self.model = model # property", "def set_params(self, **params):\n return super().set_params(**params)", "def PopulateCommonFieldValues(self, field, mojom_field):\n field.name = mojom_field.decl_data.short_name\n field.kind = self.KindFromMojom(mojom_field.type)\n field.attributes = self.AttributesFromMojom(mojom_field)", "def merge(self, other):\n self._moments = merge_pqc([self, other])._moments\n self._parameters = sp.symarray(self.parameter_symbol, len(self.symbols))\n if self.flatten_circuit:\n self.flatten()", "def hard_update(target, source):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)", "def _set_params(self,x):\r\n self.k._set_params(x)", "def update(self, other):\n\n fields = None\n if isinstance(other, dict):\n fields = other\n elif isinstance(other, Session):\n fields = other.fields\n else:\n raise ValueError('Cannot update with supplied data')\n\n for k, v in fields.iteritems():\n self.fields[k.replace('-', '_')] = v", "def intialize_from_fields(self):\n raise NotImplementedError", "def set_field( self, data ):\n super( UnsteadyField1D, self ).set_field( data )\n self.history[:] = self.val[:]\n return", "def __radd__(self, other: 'ModelParameters') -> 'ModelParameters':\n return self.__add__(other)", "def __add__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec + other.elec\n p.magn[:] = self.magn + other.magn\n return p\n else:\n raise DataError(\"Type error: cannot add %s to %s\" % (type(other), type(self)))", "def set_params(self, **params):\n\n return super().set_params(**params)", "def _setVals(self, *args, **kwargs):\n pass", "def updateFromFields(self, fields, data):\n self._fields = fields\n data = [d if d is not None else '' for d in data]\n for field,val in zip(fields, data):\n setattr(self, field, val)", "def set(self, request, _object):\n\n value = request._get_parameter_value(self)\n value.object = _object", "def __add__(self, other):\n return self.__class__(\n {\n name:\n self.__getattribute__(name) + other.__getattribute__(name)\n for name in self._fields\n }\n )", "def updateParameters(self):\n\n return", "def copy_fields(self, entity, all_fields=False):\n\n if all_fields:\n fields = self.get_all_fields()\n else:\n fields = self.get_non_pk_fields()\n\n for field in fields.keys():\n setattr(self, field, getattr(entity, field, None))", "def relate(self, other):\n ...", "def update_values(self, to_update):\n for key, value in kwargs.iteritems():\n self.params[key] = value\n # update the possibly dependent parameters\n self.set_filenames()", "def set_params(self, w, b):\n self.w = w\n self.b = b\n return", "def _update(self, other):\n # NOTE: detail map properties should NEVER be overridden. NEVER. EVER. kthx.\n if other.use_alpha:\n self.use_alpha = True\n if other.mipmap:\n self.mipmap = True", "def copy_from_other(self, other):\n self.data = other.data\n self.url = other.url\n self.container_factory = other.container_factory", "def __add__(self, other):\n self.__dict__.update(other)\n return self", "def updateParameters(self, parameters):\n # if parameters[0].altered:\n # parameters[1].value = arcpy.ValidateFieldName(parameters[1].value,\n # parameters[0].value)\n return", "def update(self, **kwargs):\r\n self.reset() # delete old calculations, before updating parameters\r\n\r\n # First, clone an object, then update remaining parameters\r\n if 'clone' in kwargs:\r\n if kwargs['clone'] is not None: self.clone = kwargs['clone']\r\n del kwargs['clone']\r\n\r\n for K, v in kwargs.items():\r\n if v is not None: setattr(self, K, v)\r\n\r\n return self", "def update_inplace_from(self, other):\n self.__dict__ = other.__dict__.copy()", "def updateParameters(self, parameters):", "def __init__(self, **kwargs):\n fields = get_fields(type(self))\n fields = dict((field.field_name, field) for field in fields)\n for name, value in kwargs.items():\n object.__setattr__(self, name, value)\n \n # Get the default values\n if kwargs:\n for name, field in fields.items():\n if not field.auto_increment and not name in kwargs:\n default = field.default\n if default is None:\n raise ValueError(\"the field {} of model {} has no \" \\\n \"default value\".format(field.field_name,\n type(self)))\n elif callable(default):\n default = default(self)\n\n object.__setattr__(self, name, default)\n \n # If named parameters were specified, save the object\n if kwargs and Model.data_connector:\n with Model.data_connector.u_lock:\n Model.data_connector.add_object(self)", "def set_fields(self, fields: FieldDict):\n super().set_fields(fields)\n nested_field: NestedField = self.fields[self.nested]\n if not isinstance(nested_field, NestedField):\n raise TypeError(\n f'The field \"{self.nested}\" must be a NestedField instance, not \"{nested_field}\".')\n if nested_field.many:\n raise ValueError(f'The field \"{self.nested}\" can not be set as \"many=True\".')\n self.nested_field = nested_field\n # create partial methods\n self._do_dump = partial(\n getattr(self, self.dump_method),\n target=nested_field.dump_target,\n method=nested_field.dump,\n )\n self._do_load = partial(\n getattr(self, self.load_method),\n target=nested_field.load_target,\n method=nested_field.load,\n )", "def __finalize__(self, other, method=None, **kwargs):\n self = super().__finalize__(other, method=method, **kwargs)\n # merge operation: using metadata of the left object\n if method == \"merge\":\n for name in self._metadata:\n print(\"self\", name, self.au_columns, other.left.au_columns)\n object.__setattr__(self, name, getattr(other.left, name, None))\n # concat operation: using metadata of the first object\n elif method == \"concat\":\n for name in self._metadata:\n object.__setattr__(self, name, getattr(other.objs[0], name, None))\n return self", "def additional_fields(self, additional_fields):\n\n self._additional_fields = additional_fields", "def __set__(self, instance, value):\n # Run process for the nested field type for each value in list\n instance._values[self.name] = [self.field.process(v) for v in value]", "def __init__(self, **attributes):\n self.set(**attributes)", "def update_params(self, extra_params):\n self._params.update(extra_params)\n return self", "def _replace_fields(self):\n for name, value in self._cleaned_data.items():\n setattr(self, name, value)", "def update(self, other=[], **kwargs):\n if ismapping(other):\n other = other.items()\n\n for key, value in other:\n self[key] = value\n\n for key, value in kwargs.items():\n self[key] = value", "def set_attributes(self):\n s = _setter(oself=self, e1=NameError, e2=AttributeError)\n\n s('oself.coef_ = oself.model.coef_')\n s('oself.intercept_ = oself.model.intercept_')\n\n self.time_prepare = None\n s('oself.time_prepare = oself.model.time_prepare')\n self.time_upload_data = None\n s('oself.time_upload_data = oself.model.time_upload_data')\n self.time_fitonly = None\n s('oself.time_fitonly = oself.model.time_fitonly')", "def __init__(self, p1, p2):\n self.p1 = p1\n self.p2 = p2", "def fill_admin_fieldparam(params, p, value=False):\n params[p.id] = {'param_id': p.id, 'placeholder': p.placeholder,\n 'inputtype': p.paramtype.typename, 'title': p.title}\n params[p.id]['model'] = value if value else ''", "def assign(self, other):\n\n assert isinstance(other, VarList)\n assert len(self) == len(other)\n ops = []\n for (my_var, other_var) in zip(self.vars_, other.vars_):\n ops.append(my_var.assign(other_var))\n return tf.group(*ops, name=\"assign_\"+self.name)", "def _extend(self, other_field, memo) -> None:\n if other_field.data.ndim != self.data.ndim:\n raise ValueError(\n f\"Field '{self.name}' cannot be extended. Dimensions must be equal. ({other_field.data.ndim} != {self.data.ndim})\"\n )\n\n old_id = id(self.data)\n if self.data.dtype < other_field.data.dtype:\n # Increase size of self.data.dtype before inserting\n new_data = np.insert(self.data.astype(other_field.data.dtype), self.num_obs, other_field.data, axis=0)\n else:\n new_data = np.insert(self.data, self.num_obs, other_field.data, axis=0)\n memo[old_id] = (self.data, new_data)\n self.data = new_data", "def set_field(self, x:int, y:int, field:Field) -> None:\r\n self.fields[x][y] = field", "def __set__(self,obj,val):\n assert len(val) == len(self.attribs),\"Compound parameter '%s' got the wrong number of values (needed %d, but got %d).\" % (self._attrib_name,len(self.attribs),len(val))\n \n if not obj:\n for a,v in zip(self.attribs,val):\n setattr(self.objtype,a,v)\n else:\n for a,v in zip(self.attribs,val):\n setattr(obj,a,v)", "def set(self, params, relink=None):\n # Fast path 'set(get())'-like\n if params is self._params:\n return\n # Assignment\n if (self._config.relink if relink is None else relink):\n tools.relink(self._model.parameters(), params)\n self._params = params\n else:\n self._params.copy_(params, non_blocking=self._config[\"non_blocking\"])", "def set(self, **parameters):\r\n for name in parameters:\r\n if name in self.prm:\r\n self.prm[name] = parameters[name]\r\n else:\r\n self._illegal_parameter(name)", "def set_edge_param(self, key_a, key_b, **kwargs):\n for param_key, param_value in kwargs.items():\n self.vertices[key_a].edges_out[key_b].params[param_key] = param_value", "def set_params(self, dG=None, dH=None, dCp=None ):\n\n\t\tif len(self.dG) > 0 and dG != None:\n\t\t\tassert( len(self.dG) == len(dG) )\n\t\tif len(self.dG) > 0 and dH != None:\n\t\t\tassert( len(self.dG) == len(dH) )\n\t\tif len(self.dG) > 0 and dCp != None:\n\t\t\tassert( len(self.dG) == len(dCp) )\n\n\t\t# note, dG & dH values are always at ref temp!\n\t\tif dG != None:\n\t\t\tself.dG\t\t=\tdG\n\t\tif dH != None:\n\t\t\tself.dH\t\t=\tdH\n\t\tif dCp != None:\n\t\t\tself.dCp\t=\tdCp\n\n\t\treturn", "def _initFields(self):\n pass", "def new_data(first: dict, second: dict, changeables: tuple):\n for name, field in first.items():\n if name not in changeables:\n second[name] = field", "def update(self, instance, validated_data):\n validated_data.pop(\"assignment\", None)\n return super().update(instance, validated_data)", "def __post_init__(self) -> None:\n self.ufp_required_field = split_tuple(self.ufp_required_field)\n self.ufp_value = split_tuple(self.ufp_value)\n self.ufp_enabled = split_tuple(self.ufp_enabled)", "def setValues(self, fields: str = \"\"):\n pass", "def update(self, **params):\n self.parameters.update(params)", "def update(self, other):\n for name, value in other.items():\n self.__setitem__(name, value)" ]
[ "0.65146065", "0.6420237", "0.6390991", "0.6363737", "0.6293439", "0.6233246", "0.6145043", "0.6098568", "0.6090508", "0.6078845", "0.6076885", "0.60532725", "0.60103554", "0.60103554", "0.60075897", "0.5986646", "0.5981647", "0.59416527", "0.5925342", "0.5911154", "0.5895669", "0.5879643", "0.58679235", "0.58413583", "0.5822643", "0.58163774", "0.5797945", "0.5792444", "0.5773239", "0.5763542", "0.5755185", "0.5749947", "0.5749894", "0.574198", "0.5729263", "0.5722895", "0.57223", "0.57178915", "0.571474", "0.5706432", "0.569124", "0.5686111", "0.5682939", "0.56829315", "0.56715834", "0.5671166", "0.5655228", "0.5648084", "0.5647816", "0.5646961", "0.5636595", "0.56345093", "0.5628923", "0.56095374", "0.5608483", "0.56035674", "0.5591514", "0.55912685", "0.55874217", "0.5580611", "0.5579315", "0.5577286", "0.55664104", "0.55663556", "0.55642337", "0.55623424", "0.5553414", "0.5542033", "0.55411667", "0.55376935", "0.55342335", "0.5531797", "0.553054", "0.55257696", "0.55240154", "0.550881", "0.55071557", "0.55048156", "0.5494949", "0.5494876", "0.54828846", "0.54809195", "0.548072", "0.54797715", "0.5475976", "0.5473468", "0.54640865", "0.5458352", "0.5456236", "0.544915", "0.5445617", "0.54453623", "0.54380417", "0.5429116", "0.5424446", "0.5422211", "0.54207414", "0.54194206", "0.54167145", "0.54098773" ]
0.6253937
5
Change this field's type to newType with default format
def changeType(self, newType): self.__class__ = globals()[newType + 'Format'] self.format = self.defaultFormat self.initFormat()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def field_type_converter(self, old_type):\n\n if old_type == 'String':\n new_type = 'Text'\n elif old_type == 'Integer':\n new_type = 'Short'\n elif old_type == 'Date':\n new_type = 'Date'\n elif old_type == 'GlobalID':\n new_type = 'GUID'\n else:\n new_type = 'Double'\n return new_type", "def reformat(self, newformat):\n # check whether the column is defined\n if self._defined:\n # get the appropriate null-format\n nullformat = self._get_nullformat(newformat)\n # set the new formats\n self._format = [newformat, nullformat]\n else:\n # first the column type must be defined\n raise Exception('The data type of this column is not yet defined!')", "def convert_type(self, value, schema_type, **kwargs):", "def setType(self,newtype):\n\t\tself.type = newtype;", "def _convert_field_type(row):\n return row", "def _make_serializable(self, field):\n if isinstance(field, datetime):\n return str(field)\n elif isinstance(field, Decimal):\n return float(field)\n else:\n return field", "def field_type(self):\n return \"\"", "def set_type(self, new_value):\n\n self.vax_type = new_value\n self.save()", "def restore_type(field_type, value):\n field_types = {\n 'BooleanField': string_to_bool,\n 'CharField': str,\n 'FloatField': float,\n 'IntegerField': int,\n }\n return_val = lambda x: x\n recast = field_types.get(field_type, return_val)\n return recast(value)", "def change_object_type(obj, new_type):\n # type: (Union[str, SupportsInt, SupportsFloat], str) -> Union[str, int, float]\n if new_type == 'str':\n return str(obj)\n elif new_type == 'int':\n return int(obj)\n elif new_type == 'float':\n return float(obj)\n else:\n raise IOError('expected_type \"{}\" is not supported in this function.'.format(new_type))", "def convert_format(self, new_format):\n if new_format not in [0, 1, 2, 3]:\n raise ValueError(\"Unknown format specified\")\n\n inp_format = new_format\n if inp_format == 3:\n new_format = 2\n\n for block in self.frd.blocks:\n if hasattr(block, 'format'):\n block.format = new_format\n\n self.frd.node_block.format = inp_format", "def _assign_type(self, type):\n if self.is_input:\n return 'data'\n else:\n return type", "def format_field(model, name, value):\n if value is None: return value\n t = type( getattr(model,name) )\n if t == datetime:\n return value.replace('T',' ')\n return value", "def update_column_format(self):\n pass", "def _change_column_type(self, t_trans, value):\n # create an element object\n val = ForElement(value)\n\n # if not set the type and the define flagg\n self._format = val.get_fvalue()\n\n # set the type to the one in the transformator object\n self._type = t_trans.higher_type\n\n # go over all data\n for index in range(len(self._data)):\n if self._data[index] != None:\n # transform all non-Null entries\n self._data[index] = t_trans.to_higher_type(self._data[index])", "def __post_init__(self):\n for field in dataclasses.fields(self):\n value = getattr(self, field.name)\n if not isinstance(value, field.type) and value:\n try:\n setattr(self, field.name, field.type(value))\n except ValueError:\n raise ValueError(f\"Expected {field.name} \"\n f\"to be {field.type}, \"\n f\"got {repr(value)}\")", "def to_field(obj):\r\n\r\n\r\n if isinstance(obj, Field):\r\n field = obj\r\n else:\r\n d = { \"storage_type\": \"unknown\" }\r\n\r\n if isinstance(obj, basestring):\r\n d[\"name\"] = obj\r\n elif type(obj) == tuple or type(obj) == list:\r\n d[\"name\"] = obj[0]\r\n try:\r\n d[\"storage_type\"] = obj[1]\r\n try:\r\n d[\"analytical_type\"] = obj[2]\r\n except:\r\n pass\r\n except:\r\n pass\r\n else: # assume dictionary\r\n d[\"name\"] = obj[\"name\"]\r\n d[\"label\"] = obj.get(\"label\")\r\n d[\"storage_type\"] = obj.get(\"storage_type\")\r\n d[\"analytical_type\"] = obj.get(\"analytical_type\")\r\n d[\"adapter_storage_type\"] = obj.get(\"adapter_storage_type\")\r\n\r\n if \"analytical_type\" not in d:\r\n storage_type = d.get(\"storage_type\")\r\n if storage_type:\r\n deftype = default_analytical_types.get(storage_type)\r\n d[\"analytical_type\"] = deftype or \"typeless\"\r\n else:\r\n d[\"analytical_type\"] = \"typeless\"\r\n\r\n field = Field(**d)\r\n return field", "def change_type(self, col_name, str_type):\n if self[col_name] is not None:\n self[col_name] = self[col_name].astype(str_type)", "def __update_custom_fieldtype_settings(self,\n eachfield, #field etree\n ):\n\n # xml attributes\n TYPE = \"type\"\n READABLE = \"readable\"\n WRITABLE = \"writable\"\n LABEL = \"label\"\n HINT = \"comment\"\n DEFAULT = \"default\"\n LINES = \"lines\"\n BOXES = \"boxes\"\n HASOPTIONS = \"has_options\"\n\n fieldtype = eachfield.attrib.get(TYPE)\n field_property = self.custom_fieldtype_properties.get(fieldtype, {})\n\n cust_fieldtype = fieldtype_property.get(\"fieldtype\", None)\n cust_readable = fieldtype_property.get(\"readable\", None)\n cust_writable = fieldtype_property.get(\"writable\", None)\n cust_label = fieldtype_property.get(\"label\", None)\n cust_hint = fieldtype_property.get(\"hint\", None)\n cust_default = fieldtype_property.get(\"default\", None)\n cust_lines = fieldtype_property.get(\"lines\", None)\n cust_boxes = fieldtype_property.get(\"boxes\", None)\n cust_has_options = fieldtype_property.get(\"has_options\", None)\n cust_options = fieldtype_property.get(\"options\", None)\n \n if cust_fieldtype:\n if cust_fieldtype != None:\n eachfield.set(TYPE, cust_fieldtype)\n if cust_readable != None:\n eachfield.set(READABLE, cust_readable)\n if cust_writable != None:\n eachfield.set(WRITABLE, cust_writable)\n if cust_label != None:\n eachfield.set(LABEL, cust_label)\n if cust_hint != None:\n eachfield.set(HINT, cust_hint)\n if cust_default != None:\n eachfield.set(DEFAULT, cust_default)\n if cust_lines != None:\n eachfield.set(LINES, cust_lines)\n if cust_boxes != None:\n eachfield.set(BOXES, cust_boxes)\n if cust_has_options != None:\n eachfield.set(HASOPTIONS, cust_has_options)\n if cust_options != None:\n opt_available = eachfield.getchildren()\n if len(opt_available) == 0:\n eachfield.append(cust_options)\n elif len(opt_available) == 1:\n eachfield.remove(opt_available[0])\n eachfield.append(cust_options)", "def _update_input_type(self):\n pass", "def set_format_by_type(self, value, format):\n self.set_render_func_by_type(value, format.format)", "def renameFields(self, nameDict):\n for format in self.values():\n if format.genericType in nameDict:\n nameDict[format.name] = nameDict[format.genericType]\n for item in globalref.docRef.root.descendantGen():\n for oldName, newName in nameDict.get(item.formatName, []):\n if oldName in item.data:\n item.data[newName] = item.data[oldName]\n del item.data[oldName]", "def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)", "def __createField(self, field):\n name = field['name']\n fType = field['type']\n fieldLength = None\n if 'shape' in name.lower():\n return\n elif \"String\" in fType:\n fieldType = \"TEXT\"\n fieldLength = field['length']\n elif \"Date\" in fType:\n fieldType = \"DATE\"\n elif \"SmallInteger\" in fType:\n fieldType = \"SHORT\"\n elif \"Integer\" in fType:\n fieldType = \"LONG\"\n elif \"Double\" in fType:\n fieldType = \"DOUBLE\"\n elif \"Single\" in fType:\n fieldType = \"FLOAT\"\n else:\n fieldType = \"Unknown\"\n featureClass = self.featureClassLocation + \"\\\\\" + self.name\n validatedName = arcpy.ValidateFieldName(name, self.featureClassLocation)\n arcpy.AddField_management(in_table=featureClass, field_name=name, field_type=fieldType, field_length=fieldLength)", "def _to_base_type(self, value):\n if value is None:\n return ''\n else:\n return value.to_json()", "def output_field(self):\n Field = self.original_field.__class__\n if isinstance(self.original_field, fields.CharField):\n return Field(max_length=self.original_field.max_length)\n\n return Field()", "def api_field_from_django_field(cls, f, default=CharField):\n if isinstance(f, JSONField):\n return JSONApiField\n \n return super(PandaModelResource, cls).api_field_from_django_field(f, default)", "def replace_typeval(self, combined, replacement):\n raise NotImplementedError(\"This is an abstract method.\")", "def convert_type(cls, prop_obj, column_name, specific_type, empty_value):\n for key, item in enumerate(prop_obj):\n if item[column_name]:\n prop_obj[key][column_name] = specific_type(item[column_name])\n else:\n prop_obj[key][column_name] = empty_value", "def _new_field(self):\n field = self.domain.new_field()\n return field", "def convert(self, format):\n cloned = self.clone()\n cloned.format = format\n return cloned", "def mongo_to_python_type(field, data):\n if isinstance(field, ObjectIdField):\n return str(data)\n elif isinstance(field, DecimalField):\n return data\n elif isinstance(field, BooleanField):\n return data\n else:\n return str(data)", "def mask_custom_field(self, custom_field, doc_type):\n\t\tcustom_field.fields.update({\n\t\t\t'doctype': 'DocField',\n\t\t\t'parent': doc_type,\n\t\t\t'parentfield': 'fields',\n\t\t\t'parenttype': 'DocType',\n\t\t})", "def set_dataframe_format(self, new_format):\n self.sig_option_changed.emit('dataframe_format', new_format)\n self.model().dataframe_format = new_format", "def setType(self, newType):\n self._itemType = newType", "def setTransformType(self, val): # real signature unknown; restored from __doc__\n pass", "def _set_data(self, new_data):\n for name, field in self._get_fields().items():\n if name in new_data:\n try:\n setattr(self, f\"__{name}\", field.from_raw(new_data[name]))\n except (fields.ValidationError, ValueError):\n # should at least log validation and value errors\n # this can happen in case of e.g. fields type change\n pass", "def _set_model_field(self):\n self._field_value = hutils.format_json(self._memory_data)\n setattr(self._model, self._field, self._field_value)", "def normalise_field_value(value):\n if isinstance(value, datetime):\n return make_timezone_naive(value)\n elif isinstance(value, Decimal):\n return decimal_to_string(value)\n return value", "def set_field_by_schema(self, header, field):\n if header not in self.schema.keys():\n if settings._DISABLE_SCHEMA_MATCH:\n return\n else:\n raise InvalidRecordProperty('Record schema does not have the property \"%s\"' % header)\n\n data_type = self.schema[header]['type'].lower()\n\n if data_type == 'string':\n if Record.is_empty_str(field):\n self.fields[header] = None\n else:\n self.fields[header] = field\n return\n\n if data_type == 'integer':\n if Record.could_be_int(field):\n self.fields[header] = int(field)\n else:\n self.fields[header] = None\n return\n\n if data_type == 'datetime':\n datetime_format = self.schema[header]['datetime_format'];\n if datetime_format == None:\n datetime_format = settings._STRFTIME_FORMAT\n if Record.could_be_datetime(field, datetime_format):\n self.fields[header] = datetime.strptime(field, datetime_format)\n else:\n self.fields[header] = None\n return\n\n if data_type == 'number':\n if Record.could_be_number(field):\n self.fields[header] = float(field)\n else:\n self.fields[header] = None\n return\n\n if data_type == 'float':\n if Record.could_be_float(field):\n self.fields[header] = float(field)\n else:\n self.fields[header] = None\n return\n\n if data_type == 'boolean':\n self.fields[header] = Record.parse_boolean(field)\n return", "def default(self, value):\n if isinstance(value, datetime):\n return value.isoformat()\n\n elif isinstance(value, UUID):\n return str(value)\n\n else:\n # Let the base class raise the TypeError\n return super().default(value)", "def _set_field_feature_dtype(self, field_path, field_feature_dtype):\n feature_dtype_str = json.dumps(field_feature_dtype.descr)\n # check if the field_feature_dtype is already set\n if field_path in self.field_feature_dtypes:\n # check that the dtype was previously saved as \"None\" as we\n # won't overwrite anything else\n if self.field_feature_dtypes[field_path] is None:\n full_path = '{}/{}/{}'.format(SETTINGS, FIELD_FEATURE_DTYPES_STR, field_path)\n # we have to delete the old data and set new data\n del self.h5[full_path]\n self.h5.create_dataset(full_path, data=feature_dtype_str)\n else:\n raise AttributeError(\n \"Cannot overwrite feature dtype for {} with {} because it is {} not \".format(\n field_path, field_feature_dtype, self.field_feature_dtypes[field_path],\n NONE_STR))\n # it was not previously set so we must create then save it\n else:\n self._add_field_feature_dtype(field_path, field_feature_dtype)", "def createField(schemaName, field):\n# print(field.domain)\n# print(field.name, field.domain if isinstance(field.domain, str) else field.domain.type)\n# print(field.__dict__)\n return \"\\\"{name}\\\" {type_}\".format(\n name = field.name,\n type_ = '\"' + schemaName + '\".\"' + field.domain + '\"' if isinstance(field.domain, str) else getType(field.domain)\n )", "def set_generic_fields(self):\n self.constant_fields[\"admver\"] = 9.1\n self.constant_fields[\"datatype\"] = 'raw'\n self.constant_fields[\"dfo\"] = '//'\n self.constant_fields[\"enterdate\"] = time.strftime(\"%m/%d/%Y\")", "def set_type(self, _new_type):\n # Check to see if type is changing\n if _new_type == self._type:\n return\n # Move from current boid set to boid set for new type\n self.target._grid[self._grid][self._type].discard(self)\n self.target._grid[self._grid][_new_type].add(self)\n # Update type\n self._type = _new_type", "def createField(selected_layer, newFieldName, newFieldType):\r\n field = ogr.FieldDefn(newFieldName, newFieldType)\r\n selected_layer.CreateField(field)", "def set_ledType(self, newval):\n rest_val = str(newval)\n return self._setAttr(\"ledType\", rest_val)", "def test_fixed_type():\n name = \"a_fixed_field\"\n namespace = \"md5\"\n aliases = [\"md5\", \"hash\"]\n default = types.Fixed(16, namespace=namespace, aliases=aliases)\n python_type = types.Fixed\n field = fields.AvroField(name, python_type, default)\n\n expected = {\n \"name\": name,\n \"type\": {\n \"type\": \"fixed\",\n \"name\": name,\n \"size\": default.size,\n \"namespace\": namespace,\n \"aliases\": aliases,\n },\n }\n\n assert expected == field.to_dict()", "def experiment_type(self, new_type: str) -> None:\n self._db_data.experiment_type = new_type", "def _cast_field(self, cast_to, value):\n if cast_to in (int, long, str):\n return cast_to(value)\n elif cast_to == unicode:\n try:\n value = value.decode(self.charset, self.errors)\n except UnicodeEncodeError, e:\n raise InvalidData(\"Error encoding unicode value '%s': %s\" % (repr(value), e))\n\n return value\n elif cast_to in (any, bytes):\n return value\n else:\n raise TypeError(\"Invalid field type %s\" % (cast_to))", "def makeField(self,field_name,field_type,field_precision,field_scale,field_length):\n \n new_field = self.GP.CreateObject(\"field\")\n new_field.Name = field_name\n new_field.Type = field_type\n new_field.Precision = field_precision\n new_field.Scale = field_scale\n new_field.Length = field_length\n new_field.IsNullable = True\n \n return new_field", "def setField(self, field):\n\n # Set the new property to container\n key = (field.getFieldID(), field.getTime())\n self.fields.set_value(key, field)", "def api_field_from_mongo_field(cls, f, default=tastypie_fields.CharField):\r\n\r\n result = default\r\n\r\n if isinstance(f, (mongoengine.ComplexDateTimeField, mongoengine.DateTimeField)):\r\n result = tastypie_fields.DateTimeField\r\n elif isinstance(f, mongoengine.BooleanField):\r\n result = tastypie_fields.BooleanField\r\n elif isinstance(f, mongoengine.FloatField):\r\n result = tastypie_fields.FloatField\r\n elif isinstance(f, mongoengine.DecimalField):\r\n result = tastypie_fields.DecimalField\r\n elif isinstance(f, mongoengine.IntField):\r\n result = tastypie_fields.IntegerField\r\n elif isinstance(f, (mongoengine.FileField, mongoengine.BinaryField)):\r\n result = tastypie_fields.FileField\r\n elif isinstance(f, mongoengine.DictField):\r\n result = tastypie_fields.DictField\r\n elif isinstance(f, mongoengine.ListField):\r\n result = tastypie_fields.ListField\r\n elif isinstance(f, mongoengine.GeoPointField):\r\n result = tastypie_fields.ListField\r\n elif isinstance(f, mongoengine.ObjectIdField):\r\n result = tastypie_mongoengine_fields.ObjectId\r\n\r\n return result", "def type(self):\n return self._field.type", "def setValue(self, newValue, valueType):\n self.value = str(newValue)\n self.valueType = valueType.value", "def SetFieldValue(fielddef, dobj, addr, value):\n\n format_ = GetFieldDef(fielddef, fields='format_')\n formatcnt = GetFormatCount(format_)\n singletype, bitsize = GetFormatType(format_)\n if debug(args) >= 2:\n print(\"SetFieldValue(): fielddef {}, addr 0x{:04x} value {} formatcnt {} singletype {} bitsize {} \".format(fielddef,addr,value,formatcnt,singletype,bitsize), file=sys.stderr)\n if not format_[-1:].lower() in ['s','p']:\n addr += (bitsize // 8) * formatcnt\n for _ in range(0, formatcnt):\n addr -= (bitsize // 8)\n maxunsigned = ((2**bitsize) - 1)\n maxsigned = ((2**bitsize)>>1)-1\n val = value & maxunsigned\n if isinstance(value,int) and value < 0 and val > maxsigned:\n val = ((maxunsigned+1)-val) * (-1)\n if debug(args) >= 3:\n print(\"SetFieldValue(): Single type - fielddef {}, addr 0x{:04x} value {} singletype {} bitsize {}\".format(fielddef,addr,val,singletype,bitsize), file=sys.stderr)\n try:\n struct.pack_into(singletype, dobj, addr, val)\n except struct.error as e:\n exit(ExitCode.RESTORE_DATA_ERROR,\n \"Single type {} [fielddef={}, addr=0x{:04x}, value={}] - skipped!\".format(e,fielddef,addr,val),\n type_=LogType.WARNING,\n doexit=not args.ignorewarning,\n line=inspect.getlineno(inspect.currentframe()))\n pass\n value >>= bitsize\n else:\n if debug(args) >= 3:\n print(\"SetFieldValue(): String type - fielddef {}, addr 0x{:04x} value {} format_ {}\".format(fielddef,addr,value,format_), file=sys.stderr)\n try:\n struct.pack_into(format_, dobj, addr, value)\n except struct.error as e:\n exit(ExitCode.RESTORE_DATA_ERROR,\n \"String type {} [fielddef={}, addr=0x{:04x}, value={} - skipped!\".format(e,fielddef,addr,value),\n type_=LogType.WARNING,\n doexit=not args.ignorewarning,\n line=inspect.getlineno(inspect.currentframe()))\n pass\n\n return dobj", "def force_field():\n ff = get_native_force_field('martini22')\n nter = ff.modifications['N-ter'].copy()\n nter.name = (nter.name, )\n cter = ff.modifications['C-ter'].copy()\n cter.name = (cter.name, )\n ff.modifications['N-ter'] = nter\n ff.modifications['C-ter'] = cter\n return ff", "def default(self, obj):\n if isinstance(obj, Decimal):\n return float(obj)\n if isinstance(obj, self._datetypes):\n return obj.strftime('%Y-%m-%dT%H:%M:%SZ')\n return super(JSONEncoder, self).default(obj)", "def extend(self, fieldname, valuefactory):\n names = {}\n values = {}\n typename = self._type.__doc__.split('(')[0]\n newtype = collections.namedtuple( typename, list(self._type._fields) + [ fieldname ] )\n for number, value in self._values.items():\n value = newtype( *(list(value) + [ valuefactory(value) ]) )\n names[value.name] = value\n values[number] = value\n \n self._type = newtype\n self._names = names\n self._values = values", "def convert(self, name, to):\n valid_types = ['int', 'float', 'single', 'delimited set', 'string']\n if not to in valid_types:\n raise TypeError(\"Cannot convert to type {}!\".format(to))\n if to == 'int':\n self._as_int(name)\n elif to == 'float':\n self._as_float(name)\n elif to == 'single':\n self._as_single(name)\n elif to == 'delimited set':\n self._as_delimited_set(name)\n elif to == 'string':\n self._as_string(name)\n if self._is_array_item(name):\n self._meta['masks'][self._maskname_from_item(name)]['subtype'] = to\n return None", "def DefaultsDataclassField(feature_type: str):\n\n\n class DefaultMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid defaults config from the feature_registry\n and creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n input_feature_class = input_mixin_registry[feature_type]\n output_feature_class = output_mixin_registry.get(feature_type, None)\n try:\n input_schema = input_feature_class.Schema().load(value)\n if output_feature_class:\n output_schema = output_feature_class.Schema().load(value)\n combined = input_schema + output_schema\n else:\n combined = input_schema\n return combined\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid params: {value}, see `{attr}` definition. Error: {error}')\n raise ValidationError(f'Invalid params: {value}')\n\n @staticmethod\n def _jsonschema_type_mapping():\n input_feature_cls = input_mixin_registry.get(feature_type)\n output_feature_cls = output_mixin_registry.get(feature_type, None)\n input_props = schema_utils.unload_jsonschema_from_marshmallow_class(input_feature_cls)['properties']\n if output_feature_cls:\n output_props = schema_utils.unload_jsonschema_from_marshmallow_class(output_feature_cls)['properties']\n combined_props = {**output_props, **input_props}\n else:\n combined_props = input_props\n return {'type': 'object', 'properties': combined_props, 'additionalProperties': False, 'title': 'defaults_options'}\n try:\n input_cls = input_mixin_registry[feature_type]\n output_cls = output_mixin_registry.get(feature_type, None)\n dump_default = input_cls.Schema().dump({'type': feature_type})\n if output_cls:\n output_dump = output_cls.Schema().dump({'type': feature_type})\n dump_default = {**output_dump, **dump_default}\n load_default = input_cls.Schema().load({'type': feature_type})\n if output_cls:\n output_load = output_cls.Schema().load({'type': feature_type})\n for k in dump_default.keys():\n if getattr(load_default, k, -1) == -1:\n setattr(load_default, k, getattr(output_load, k))\n return field(metadata={'marshmallow_field': DefaultMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported feature type: {feature_type}. See input_type_registry. Details: {e}')", "def _get_field_type_converter(pipeline_builder):\n converter_config = [\n {\n 'fields': ['/id'],\n 'targetType': 'LONG',\n 'dataLocale': 'en,US'\n }\n ]\n field_type_converter = pipeline_builder.add_stage('Field Type Converter')\n field_type_converter.set_attributes(conversion_method='BY_FIELD',\n field_type_converter_configs=converter_config)\n return field_type_converter, pipeline_builder", "def get_format_type(self):\n raise Unimplemented()", "def _type_convert(self, value):\n if value is None:\n return value\n\n try:\n return datetime.datetime.strptime(value, \"%Y-%m-%d %H:%M:%S\")\n except ValueError:\n pass\n\n try:\n return int(value)\n except ValueError:\n pass\n\n try:\n if _parser(value.strip().replace(\"_\", \"\")):\n return decimal.Decimal(value)\n except decimal.InvalidOperation:\n pass\n\n return value", "def to_field(self, **kwargs):\n if self.regex:\n if not 'regex' in self.field_args:\n self.field_args = self.field_args + ('regex', )\n self.field_klass = forms.RegexField\n return super(StringSetting, self).to_field(**kwargs)", "def __init__(self, field):\n super().__init__()\n self.field = str(field)", "def to_python(self, value):\n # Composite types are serialized as JSON blobs. If BaseField.to_python\n # is called with a string, assume it was produced by value_to_string\n # and decode it\n if isinstance(value, str):\n try:\n value = json.loads(value)\n except ValueError as exc:\n raise ValidationError(\n self.error_messages[\"bad_json\"],\n code=\"bad_json\",\n ) from exc\n\n return self.Meta.model(\n **{\n name: field.to_python(value.get(name))\n for name, field in self.Meta.fields\n }\n )\n\n return super().to_python(value)", "def to_legacy(self) -> object:\n pass", "def asformat(self, format):", "def _add_type_specific_repr_fields(self, repr_parts):", "def refine_type(self, new_type):\n if new_type is NodeType.UNKNOWN or new_type is self.var_type:\n return\n elif self.var_type is NodeType.UNKNOWN:\n self.var_type = new_type\n else:\n raise TigerTypeError, self._name", "def EncoderDataclassField(feature_type: str, default: str):\n\n\n class EncoderMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid encoder config from the encoder_registry\n and creates a corresponding `oneOf` JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n if TYPE in value and value[TYPE] in get_encoder_classes(feature_type):\n enc = get_encoder_cls(feature_type, value[TYPE])\n try:\n return enc.Schema().load(value)\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid encoder params: {value}, see `{enc}` definition. Error: {error}')\n raise ValidationError(f'Invalid params for encoder: {value}, expect dict with at least a valid `type` attribute.')\n raise ValidationError('Field should be None or dict')\n\n @staticmethod\n def _jsonschema_type_mapping():\n encoder_classes = list(get_encoder_classes(feature_type).keys())\n return {'type': 'object', 'properties': {'type': {'type': 'string', 'enum': encoder_classes, 'default': default}}, 'title': 'encoder_options', 'allOf': get_encoder_conds(feature_type)}\n try:\n encoder = get_encoder_cls(feature_type, default)\n load_default = encoder.Schema().load({'type': default})\n dump_default = encoder.Schema().dump({'type': default})\n return field(metadata={'marshmallow_field': EncoderMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported encoder type: {default}. See encoder_registry. Details: {e}')", "def type(self, value):\n return value", "def overwrite_type(self, param_type: str) -> \"Param\":\n if param_type is None or param_type == self.typestr():\n return self\n\n if self.type_frozen:\n raise PyParamTypeError(\n f\"Type of argument {self.namestr()!r} \" \"is not overwritable\"\n )\n logger.warning(\n \"Type changed from %r to %r for argument %r\",\n self.typestr(),\n param_type,\n self.namestr(),\n )\n return self.to(param_type)", "def _transtype(self, systype=None):\n if systype is None:\n systype = self.get_meta(CAMSYS_TYPE, None)\n if systype == \"annotation-type\":\n newtype = AnnotationType\n elif systype == \"relation-type\":\n newtype = RelationType\n else:\n newtype = Tag\n if self.__class__ is not newtype:\n self.__class__ = newtype", "def _update_output_type(self):\n pass", "def type(self, type):\n\n self.container['type'] = type", "def type(self, type):\n\n self.container['type'] = type", "def reset_format(self):\n ## Formatters\n self._format_setters(*self.format_set_info)\n self._format_getters(*self.format_get_info)\n self._format_joining_functions()", "def type(self, value):\n if value is None:\n ida_bytes.del_items(self.ea)\n return\n if isinstance(value, BipType):\n value.set_at(self.ea)\n elif isinstance(value, (str, unicode)):\n value = BipType.from_c(value)\n value.set_at(self.ea)\n else:\n raise TypeError(\"Unhandle type for BipData.type setter\")", "def retype(self, dictionary):\r\n\r\n for name, retype in dictionary.items():\r\n field = self._field_dict[name]\r\n for key, value in retype.items():\r\n if key in _valid_retype_attributes:\r\n field.__setattr__(key, value)\r\n else:\r\n raise Exception(\"Should not use retype to change field attribute '%s'\", key)", "def _field_sanity(self, field):\r\n if isinstance(field, models.BooleanField) and field.has_default():\r\n field.default = int(field.to_python(field.get_default()))\r\n return field", "def convert_types(cls, value):\n if type(value) in (datetime, date):\n return time.mktime(value.timetuple())\n elif isinstance(value, Decimal):\n return float(value)\n else:\n return value", "def set_input_type(self, input_type):\n if input_type is not None: self._input_type.value = input_type\n return self", "def data_types(self):", "def coerce_type(cls, obj, typedef=None, **kwargs):\n if trimesh and isinstance(obj, trimesh.base.Trimesh):\n obj = ObjDict.from_trimesh(obj)\n if isinstance(obj, dict) and ('material' in obj):\n obj['material'] = tools.bytes2str(obj['material'])\n return super(ObjMetaschemaType, cls).coerce_type(\n obj, typedef=typedef, **kwargs)", "def type(self, type):\n self._type = type", "def type(self, type):\n self._type = type", "def parse_annotated(self) -> UiField[T]:\n if not self.is_annotated_type:\n return self\n\n kwargs = _uikwargs_from_annotated_type(self.type)\n\n if (\n self.default is not Undefined\n and kwargs.get(\"default\", Undefined) is not Undefined\n ):\n warnings.warn(\n \"Cannot set default value in both type annotation and field. Overriding\"\n f\" default {kwargs['default']} with {self.default} in field \"\n f\"{self.name!r}\",\n stacklevel=2,\n )\n kwargs.pop(\"default\", None)\n if self.name is not None and kwargs.get(\"name\") is not None:\n warnings.warn(\n \"Cannot set name in both type annotation and field. Overriding\"\n f\" name {kwargs['name']!r} with {self.name!r} in field {self.name!r}\",\n stacklevel=2,\n )\n kwargs.pop(\"name\", None)\n return dc.replace(self, **kwargs)", "def register_field(self, field, *args):\n self.ma_plugin.map_to_openapi_type(*args)(field)", "def _process_type(self):\n _type = self._transform_value(self.transform.type)\n if _type is not None:\n self.transformed_item['type'] = _type\n else:\n self.log.error(\n 'feature=ti-transform, action=process-type, error=invalid=type, '\n f'path={self.transform.type.path}, value={_type}'\n )\n raise RuntimeError('Invalid type')", "def _type_translate(p_type, default_v=None):\n translate_dict = {'float': 'float',\n 'double': 'real',\n 'int': 'int',\n 'gr_complex': 'complex',\n 'char': 'byte',\n 'unsigned char': 'byte',\n 'std::string': 'string',\n 'std::vector<int>': 'int_vector',\n 'std::vector<float>': 'real_vector',\n 'std::vector<gr_complex>': 'complex_vector',\n }\n if p_type in ('int',) and default_v[:2].lower() == '0x':\n return 'hex'\n try:\n return translate_dict[p_type]\n except KeyError:\n return 'raw'", "def to_field(self):\n K = self.domain.get_field()\n return self.convert_to(K)", "def _coerce_value(self, new_value):\n return new_value", "def reset_to_pype_default(self):\n raise NotImplementedError(\n \"{} Method `reset_to_pype_default` not implemented!\".format(\n repr(self)\n )\n )", "def _set_data_types(self):\n temp_df = self.raw_data\n cols = temp_df.drop('room_location', axis=1).columns\n temp_df[cols] = temp_df[cols].apply(pd.to_numeric)\n temp_df['room_location'] = temp_df['room_location'].astype(str)\n self.raw_data = temp_df", "def as_type(self, type_):\n\n try:\n return type_(self.value)\n except (ValueError, TypeError):\n\n message = \"Attribute <{}> = '{}' could not be converted to {}\".format(\n self.namespace, self.value, type_\n )\n raise ConfigTypeError(message)", "def undo_format_field_name(field_name):\n if json_api_settings.FORMAT_FIELD_NAMES:\n return format_value(field_name, \"underscore\")\n\n return field_name", "def _add_to_schema(self, new: dict):\n self._defaults.update(new)\n self._migrate()", "def customConvert(self):\r\n try:\r\n conversionMap = self._conversionMaps[str(self.deviceId)]\r\n\r\n convertIdx = 0\r\n for idx in range(len(self.fields)):\r\n offset = conversionMap[convertIdx]\r\n scale = conversionMap[convertIdx + 1]\r\n\r\n if(offset!=None):\r\n if(str(offset) == self.TC16):\r\n val = self.fields[idx]\r\n val = self.convertSigned16(val) * scale\r\n elif(str(offset) == self.LONG_SERIAL):\r\n #merge with the existing serial number\r\n val = self.serialNo << 32\r\n val |= self.fields[idx]\r\n self.serialNo = val\r\n else:\r\n val = (self.fields[idx] * scale) + offset\r\n\r\n self.fields[idx] = val\r\n\r\n convertIdx += 2\r\n\r\n except KeyError:\r\n #no conversion for this type\r\n None\r\n\r\n return" ]
[ "0.7694419", "0.6731671", "0.6345279", "0.6247612", "0.6140391", "0.6116279", "0.60566497", "0.60437393", "0.6029452", "0.6012749", "0.5938881", "0.589731", "0.58825034", "0.58494186", "0.582059", "0.58018064", "0.57945174", "0.57587177", "0.57267576", "0.5711055", "0.57083714", "0.5697486", "0.56484854", "0.56402653", "0.5629887", "0.5577035", "0.55711544", "0.55596805", "0.5544898", "0.55447626", "0.5510644", "0.5453508", "0.54418683", "0.5441792", "0.5440405", "0.54242367", "0.5408538", "0.53669125", "0.5329074", "0.530644", "0.53047854", "0.5297583", "0.5293159", "0.5284686", "0.5277674", "0.5276642", "0.5266316", "0.5261019", "0.52565885", "0.52540433", "0.5253249", "0.52254456", "0.5217484", "0.5211203", "0.5197005", "0.5195904", "0.5192349", "0.518292", "0.51782817", "0.5175813", "0.5175162", "0.51631606", "0.51582164", "0.5157642", "0.5156261", "0.5155003", "0.51426375", "0.5142007", "0.5135711", "0.5132519", "0.511288", "0.5110612", "0.51089174", "0.5104903", "0.510348", "0.50992936", "0.5088128", "0.5088128", "0.50859135", "0.5085313", "0.5079642", "0.5074506", "0.50721157", "0.506536", "0.5051094", "0.50450635", "0.50424993", "0.50424993", "0.5040273", "0.5019241", "0.501854", "0.5015417", "0.50090283", "0.50039357", "0.5002991", "0.5000311", "0.4999843", "0.49982017", "0.4992483", "0.49922612" ]
0.7249991
1
Returns English name if assigned, o/w name
def englishName(self): if self.enName: return self.enName return self.name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def english_name(self) -> str | None:\n return self.get_display_name(Locale('en'))", "def get_localized_name(name):\n locale = \"{}_{}\".format(\n name[\"preferredLocale\"][\"language\"],\n name[\"preferredLocale\"][\"country\"]\n )\n return name['localized'].get(locale, '')", "def primary_name(names):\n\tlangs = names.keys()\n\tif 'en' in langs:\n\t\treturn names['en']\n\treturn names[langs[0]]", "def get_name() -> str:", "def get_eng_name(self):\n return self.eng_name", "def get_name():", "def get_fulll_name(self):\n return self.name", "def get_name(self) -> str:\n def _seg2():\n if self.name:\n return self.name\n else:\n try:\n return self.player.title\n except AttributeError:\n return 'No title specified'\n try:\n if self.player.title == 'translate_tts':\n return 'Speech'\n else:\n return _seg2()\n except AttributeError:\n return _seg2()", "def get_display_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n retval = locale.languages.get(self.language)\n if retval and (self.territory or self.script or self.variant):\n details = []\n if self.script:\n details.append(locale.scripts.get(self.script))\n if self.territory:\n details.append(locale.territories.get(self.territory))\n if self.variant:\n details.append(locale.variants.get(self.variant))\n if self.modifier:\n details.append(self.modifier)\n detail_string = ', '.join(atom for atom in details if atom)\n if detail_string:\n retval += f\" ({detail_string})\"\n return retval", "def get_name_translation(self):\n\t\treturn frappe.get_value(\n\t\t\t\"Translation\",\n\t\t\t{\"source_text\": self.doc_type, \"language\": frappe.local.lang or \"en\"},\n\t\t\t[\"name\", \"translated_text\"],\n\t\t\tas_dict=True,\n\t\t)", "def get_level_name(self, level_id):\n for (english_name, level_package) in self.levels[self.game]:\n if level_package.lower() == level_id.lower():\n return english_name\n return None", "def get_name() -> str:\n pass", "def get_name(self):\n return self.normalize_name(self.name)", "def get_full_language(self, language):\n if language:\n language = pycountry.languages.get(alpha_2=language)\n if language:\n language = language.name\n return language.title()", "def get_name(self):\n if self.name != None: return self.name\n else: return self.get_name_from_items(self.items.values())", "def get_descriptive_name(self):\r\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\r\n #Mostrar_Grande = long_name.upper()\r\n #return long_name.upper()\r\n #return Mostrar_Grande #Funciona Com Return TAMBÉM, mas olhe na linha 39 como seria necessário usar.\r\n print(long_name.upper())", "def get_full_name_with_academic_title(self) -> str:\n base_name = super().get_full_name()\n return f'{self.title} {base_name}' if self.title else base_name", "def get_full_name(self):\n return self.name #self is base and it hits name filed", "def get_full_name(self):\n\n return self.name", "def get_language_name(self):\n return self.language_name", "def full_name(self) -> Optional[str]:\n return pulumi.get(self, \"full_name\")", "def get_name(self):\n return self.load_name(self.subject)", "def LegacyName(self, default=None):\n return self.data.get('legacy_name', default)", "def display_name(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"display_name\")", "def get_name():\n\n return character['Name']", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_real_name(self):\n return self.get_display_name()", "def gen_name():\n return choice(globals()[choice(['oc_males', 'oc_females'])]) + ' ' + choice(na_surnames)", "def getname(self, full: bool = False) -> str:\n return self.name_full if full else self.name", "def get_name(self): \r\n return self.name", "def _get_name(self):\n return self.name", "def name(self):\n\n if not hasattr(self, \"_name\"):\n name = Doc.get_text(self.doc.find(\"PreferredName\", \"\"))\n self._name = name.strip()\n return self._name", "def get_name(self) -> str:\n pass", "def get_short_name(self):\n return self.last_name", "def full_name(self):\n return u\"{} {}\".format(self.pref_first_name(), self.last_name)", "def get_name(self):\n\t\treturn self.name", "def get_name(self):\n\t\treturn self.name", "def get_name(self):\n\t\treturn self.name", "def get_short_name(self):\r\n return self.first_name", "def label(self) -> Optional[str]:\n return self._itempage.labels.get(\"en\", None)", "def get_name(self):\r\n return self.name", "def get_name():\n return \"Boss\"", "def get_full_name(self):\r\n full_name = '%s' % (self.name)\r\n return full_name.strip()", "def get_displayname(self):\n return self.full_name or self.user.username", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return user_name_for( self.name )", "def short_name(self):\n return {\n 0: \"null\",\n 1: \"eng-us\",\n 2: \"eng-gb\",\n 3: \"chi\",\n 4: \"fre\",\n 5: \"ger\",\n 6: \"rus\",\n 7: \"spa\"\n }[self.value]", "def name_lookup(first_name):\n if first_name == \"Joe\": \n last_name = \"Warren\"\n elif first_name == \"Scott\": \n last_name = \"Rixner\"\n elif first_name == \"John\": \n last_name = \"Greiner\"\n elif first_name == \"Stephen\":\n last_name = \"Wong\"\n else: \n last_name = \"Error: Not an instructor\"\n return last_name", "def get_name(self) -> str:\r\n return self.name", "def scope_name(self):\n if self._course_id:\n try: \n return utils.lookup_course_slug_by_id(self._course_id)\n except:\n print(\"couldn't create human readable course name, using alphanumeric characters of course_id\")\n chars = re.escape(string.punctuation)\n return re.sub(r'['+chars+']', '', self._course_id)\n elif self._partner_id:\n try:\n return utils.lookup_partner_short_name_by_id(self._partner_id)\n except:\n print(\"couldn't create human readable partner name, using course_id\")\n return self._partner_id\n elif self._group_id:\n return self._group_id\n else:\n return 'UNKNOWN'", "def get_name(self):\n pass", "def get_name(self):\n pass", "def get_name(self) :\n\n return self.factory.to_user_name(self.name)", "def local_name(self) -> Optional[str]:\n return None if self.name is None else local_name(self.name)", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def get_name(self):\r\n return ('%s %s' % ( self.first_name, self.last_name ))", "def display_name(self) -> Optional[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> Optional[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> Optional[str]:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def display_name(self) -> str:\n return pulumi.get(self, \"display_name\")", "def friendly_name(self) -> Optional[str]:\n return pulumi.get(self, \"friendly_name\")", "def sepName(self, englishOnly=False):\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % (name)", "def get_name(self):\n user = self.user\n name = \"%s %s\" % (user.first_name, user.last_name)\n name = name.strip()\n\n return self.display_name or name or user.email or user.username", "def displayName(self):\n return self.tr(self.name())", "def displayName(self):\n return self.tr(self.name())", "def displayName(self):\n return self.tr(self.name())", "def get_name(self):\n return self.name", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def get_full_name(self):\n\n return self.name", "def get_full_name(self):\n\n return self.name", "def get_name(self):\n return self._label", "def FullName(self, default=None):\n return self.data.get('full_name', default)", "def FullName(self, default=None):\n return self.data.get('full_name', default)", "def get_short_name(self):\r\n return self.name", "def get_name(self):\n\n return self.name", "def get_display_name(self):\n return self.display_name", "def get_full_name(self):\n full_name = '{0} {1} {2}'.format(self.last_name, self.first_name, self.patronymic)\n return full_name.strip()" ]
[ "0.7934382", "0.73227644", "0.7047755", "0.6930511", "0.68412167", "0.6786411", "0.67855275", "0.6760979", "0.6730141", "0.6662109", "0.6648933", "0.66485107", "0.66408026", "0.6632018", "0.65623486", "0.655018", "0.65476584", "0.6501709", "0.64950544", "0.64857614", "0.647543", "0.6458725", "0.64511305", "0.6439916", "0.64379627", "0.64333516", "0.64333516", "0.64333516", "0.64333516", "0.64333516", "0.6429192", "0.64097625", "0.640501", "0.6389406", "0.6382944", "0.63767886", "0.63502306", "0.6342656", "0.63346267", "0.6329116", "0.6329116", "0.6329116", "0.63276416", "0.6326192", "0.63235915", "0.6321881", "0.6317096", "0.6316917", "0.63093394", "0.630808", "0.6307537", "0.63074094", "0.6303709", "0.6303558", "0.6303558", "0.62939465", "0.6283061", "0.6282206", "0.6282206", "0.6282206", "0.6282206", "0.6282206", "0.6279456", "0.62744117", "0.62744117", "0.62744117", "0.6268668", "0.6268668", "0.6268668", "0.6268668", "0.6268668", "0.6268668", "0.6268668", "0.6268668", "0.6268668", "0.6268668", "0.6268668", "0.6268668", "0.62603784", "0.6257528", "0.6255833", "0.62448484", "0.62448484", "0.62448484", "0.62421465", "0.62406117", "0.62406117", "0.62406117", "0.62406117", "0.62406117", "0.62406117", "0.62386984", "0.62386984", "0.6224392", "0.62150884", "0.62150884", "0.6211343", "0.62078494", "0.62048525", "0.62045044" ]
0.80885744
0
Return name enclosed with { } separators
def sepName(self, englishOnly=False): name = englishOnly and self.enName or self.name if not self.useFileInfo: return u'{*%s*}' % name return u'{*!%s*}' % name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_name():\n def _extract_name(quoted_name):\n return e.String(quoted_name.subexpression.name)\n yield (\"(λ &[name] . str)\", _extract_name)", "def _extract_name(line: str) -> str:\n tokens = line[19:-2].split(\" {\")\n name = tokens[0]\n return name", "def format_name_string(x: str) -> str:\n # get rid of [#] when present\n if \"{\" in x:\n x = x[:x.find(\"{\")-1]\n if \"var.\" in x.lower():\n p = x.lower().find(\"var.\")\n return \"<em class=\\\"species\\\">\" + x[:p] + \"</em> \" + x[p:p+4] + \" <em class=\\\"species\\\">\" + x[p+4:] + \"</em>\"\n elif \" var \" in x.lower(): # need the spaces around var, because some names have the letters var in them\n p = x.lower().find(\" var \")\n return \"<em class=\\\"species\\\">\" + x[:p] + \"</em> \" + x[p:p+4] + \" <em class=\\\"species\\\">\" + x[p+4:] + \"</em>\"\n elif \"subsp.\" in x.lower():\n p = x.lower().find(\"subsp.\")\n return \"<em class=\\\"species\\\">\" + x[:p] + \"</em> \" + x[p:p+6] + \" <em class=\\\"species\\\">\" + x[p+6:] + \"</em>\"\n elif \" forme \" in x.lower():\n p = x.lower().find(\" forme \")\n return \"<em class=\\\"species\\\">\" + x[:p] + \"</em> \" + x[p:p+6] + \" <em class=\\\"species\\\">\" + x[p+6:] + \"</em>\"\n elif \" f. \" in x.lower():\n p = x.lower().find(\" f. \")\n return \"<em class=\\\"species\\\">\" + x[:p] + \"</em> \" + x[p:p+3] + \" <em class=\\\"species\\\">\" + x[p+3:] + \"</em>\"\n else:\n return \"<em class=\\\"species\\\">\" + x + \"</em>\"", "def format_name(self) -> str:\n decl = self.declaration\n name, _, _ = decl.partition(\"(\")\n return name", "def format_name(self) -> str:\n decl = self.declaration\n name, _ = decl.split(\"(\", 1)\n return name", "def split_name(fullname):", "def format_name(field_name):\r\n if field_name == \"celebration_tier\":\r\n return \"{wLargesse{n\"\r\n return \"{w%s{n\" % field_name.capitalize()", "def get_name() -> str:", "def clean_name(x: str) -> str:\n x = x.replace(\", var.\", \" var.\")\n if \"{\" in x:\n x = x[:x.find(\"{\")-1]\n return x", "def get_name():", "def format_name(self) -> str:\n return self.name", "def get_name():\n\n return character['Name']", "def _process_name(name):\n\n # Unescape HTML entities\n name = unescape(name)\n\n # Remove bracketed stuff on the end\n name = NG_RE.sub('', name).strip() # Nomenclature groups\n name = END_RE.sub('', name).strip(', ') # Words\n name = RATIO_RE.sub('', name).strip(', ') # Ratios\n\n # Remove stuff off start\n name = START_RE.sub('', name).strip()\n\n # Remove balanced start and end brackets if none in between\n name = BRACKET_RE.sub('\\g<1>', name)\n\n # Un-invert CAS style names\n comps = name.split(', ')\n if len(comps) == 2:\n if comps[1].endswith('-'):\n name = comps[0]\n name = '%s%s' % (comps[1], name)\n elif len(comps) > 2:\n name = comps[0]\n for i in range(1, len(comps)):\n if comps[i].endswith('-'):\n name = '%s%s' % (comps[i], name)\n else:\n name = '%s %s' % (name, comps[i])\n return name", "def get_name(header, splitchar=\"_\", items=2):\n if splitchar:\n return \"_\".join(header.split(splitchar)[:items]).lstrip(\">\")\n else:\n return header.lstrip(\">\")", "def name_parser(string):\n return string.replace('\\n', ' ')", "def full_name(self) -> str:\n # return self.separator.join(map(lambda x: x.name, self.path()))\n return self.separator.join(map(lambda x: x.tagged_name, self.path()))", "def named_back_reference(name:str) -> str:\n # TODO error handling \n return f\"\\\\k<{name}>\"", "def printname(bruce):", "def CleanName(self,name):\n name2 = \"\"\n for c in name:\n if c == \"(\":\n break\n else: name2+=c\n\n return name2.strip(\"\\n\")", "def fmt(competitor_name: str) -> str:\n name = competitor_name.replace(\"_a\", r\" $\\alpha$ \")\n name = name.replace(\"_b\", r\" $\\beta$ \")\n return name", "def _get_name(self):\n return '%s: %s-%s' % (\n self.fcs_number,\n self.parameter_type,\n self.parameter_value_type)", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def name(self):\n\n return self._name.replace(\"[]\", \"\")", "def _var_name_sub(self, sprintf, quote=False):\n q = ''\n if quote:\n q = \"'\"\n name_list = map(lambda x: q + self.cdict[x][0] + q, sprintf[\"vars\"] )\n return sprintf[\"text\"] % tuple(name_list)", "def get_qname(uri, name):\n if not uri or not name or name[0] in ('{', '.', '/', '['):\n return name\n else:\n return '{%s}%s' % (uri, name)", "def format_name(self):\n\t\tself.full_name = self.first + \" \" + self.last", "def get_name() -> str:\n pass", "def fullname(self, name):\n f, l = name.split(' ')\n self.first = f\n self.last = l", "def print_name(name):\r\n\r\n\r\n return name + \"-apple\"", "def fullNameFor( self, name ):\n if name in self.named: return name\n if name[-3:] == '...':\n best= [ n for n in self.named.keys()\n if n.startswith( name[:-3] ) ]\n if len(best) > 1:\n raise Error(\"Ambiguous abbreviation {!r}, matches {!r}\".format( name, list(sorted(best)) ) )\n elif len(best) == 1: \n return best[0]\n return name", "def visit_name(self, node, children):\n name = ''.join(children)\n return name", "def _get_name(name):\n if \"::\" in name:\n return name.split(\"::\")[1]\n return name", "def _get_name(name):\n if \"::\" in name:\n return name.split(\"::\")[1]\n return name", "def get_name(self):\n return '.'.join(self.name)", "def _username_from_name(self, name):\r\n return name.replace(' ', '_')", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def regular_edge_name(name: str) -> str:\n regular = \"\"\n for char in name:\n if char.isalpha() or char.isdigit():\n regular = f\"{regular}{char}\"\n else:\n regular = f\"{regular}_\"\n if not regular[0].isalpha():\n regular = f\"auto_legalized__{regular}\"\n return regular", "def nice_name():\n\n pass", "def name(self):\n if self.current != b'/':\n self.on_parser_error(\"Name token expected\")\n token = b''\n self.next()\n while self.is_regular:\n if self.current == b'#':\n self.next()\n code = b''\n for i in range(2):\n if not self.is_hex_digit:\n break\n code += self.next()\n if len(code) == 2:\n # must be exactly 2 characters\n token += chr(int(code.decode(DEFAULT_ENCODING), 16)).encode(DEFAULT_ENCODING)\n else:\n # leave as is\n token += b'#' + code\n else:\n token += self.next()\n if not self.empty_names_allowed and not token:\n self.on_parser_error(\"Empty /Name found\")\n\n return Name(token.decode(DEFAULT_ENCODING))", "def get_name(internal: str):\n if '__' in internal:\n return ': '.join(get_name(s) for s in internal.split('__'))\n *path, name = internal.split('::')\n current = config.utils.names\n look_in = [current]\n try:\n for k in path:\n current = current[k]\n look_in.append(current)\n except KeyError:\n # noinspection PyUnboundLocalVariable\n logging.warning('invalid namespace {!r} of {!r}'.format(k, internal))\n look_in.reverse()\n for ns in look_in:\n try:\n val = ns[name]\n if isinstance(val, str):\n return val\n elif isinstance(val, dict):\n return val['*this*']\n else:\n raise TypeError('{!r} is neither dict nor str'.format(val))\n except KeyError:\n pass\n logging.warning('Name \"{}\" was not found in the namefile'.format('::'.join(path+[name])))\n return '::'.join(path+[name])", "def print_name(nome, sobrenome):\r\n return nome + \" \" + sobrenome", "def variable_string(self, name):\n return \"$(\" + name + \")\"", "def asName(self, name):\r\n\t\tnewName = \"\"\r\n\t\ttoHigher = False\r\n\t\tfor char in name:\r\n\t\t\tif char in \"_-\":\r\n\t\t\t\ttoHigher = True\r\n\t\t\telse:\r\n\t\t\t\tif toHigher:\r\n\t\t\t\t\tnewName = newName + char.upper()\r\n\t\t\t\telse:\r\n\t\t\t\t\tnewName = newName + char\r\n\t\t\t\ttoHigher = False\r\n\t\treturn newName", "def name() -> str:\n pass", "def name(self):\n return \"%s %s\" % (self.first_name, self.last_name)", "def name(self):\n return f\"{self._name.replace('_', ' ')}\".title()", "def get_name(self):\r\n return ('%s %s' % ( self.first_name, self.last_name ))", "def _name(self):\n return self.arguments[0].split('(')[0]", "def name(self) -> str: # pragma: no cover", "def fullname(self, name):\n\n first, last = name.split(' ')\n self.first = first\n self.last = last", "def format_name(self) -> str:\n return self.declaration", "def _make_name(self, name=None):\n\n if name:\n new_name = name.split(\"/\")[-1].split(\".png\")[0]\n if new_name.startswith((\"AWS-\", \"Amazon-\")):\n new_name = new_name.split(\"-\", 1)[1]\n # Replace non-alphanumeric with underscores (1:1 mapping)\n new_name = re.sub(r'\\W+', '_', new_name)\n return new_name", "def _name(self):\n return self._arguments[0].split('(')[0]", "def name(self):\n\t\tname = self.__class__.__name__.replace('Block', '')\n\t\tname = INITIAL_CAPS.sub(r'\\1 \\2', name)\n\t\treturn CAMEL_CASE.sub(r'\\1 \\2', name)", "def TransformNames(self) -> _n_2_t_0[str]:", "def generate_name(self, name):\n return \"{}/{}.{}\".format(self.name, self._layer_counter, name)", "def get_python_name(cls, name):\n first_cap_re = re.compile(\"(.)([A-Z](?!s([A-Z])*)[a-z]+)\")\n all_cap_re = re.compile(\"([a-z0-9])([A-Z])\")\n\n s1 = first_cap_re.sub(r\"\\1_\\2\", Utils._clean_name(name))\n return all_cap_re.sub(r\"\\1_\\2\", s1).lower()", "def get_singlet_name(orig_name):\n return \"singlet_{}\".format(orig_name)", "def initialled_name(obj):\n initials = ''.join([name[0] for name in obj.first_names.split(' ')])\n return \"{}, {}\".format(obj.last_names, initials)", "def name_formatting(name: str) -> str:\n \n first_name = name.rstrip()[name.find(',') + 2 : ]\n last_name = name[ : name.find(',')]\n return first_name + ' ' + last_name", "def Escape(name):\n return re.sub(r'[^\\w#-]', '_', name)", "def char_name(character_object, verbose_where=False, watch_list=None):\n watch_list = watch_list or []\n cname = character_object.name\n if character_object in watch_list:\n cname += \"{c*{n\"\n if character_object.player_ob and character_object.player_ob.db.lookingforrp:\n cname += \"|R+|n\"\n if not verbose_where:\n return cname\n if character_object.db.room_title:\n cname += \"{w(%s){n\" % character_object.db.room_title\n return cname", "def standard_name_remapper(orig_name):\n # Remove any trailing parentheses.\n # TODO(tjann): to check if this is safe.\n paren_start = orig_name.find(\"(\")\n if paren_start != -1:\n orig_name = orig_name[:paren_start]\n\n # Removes separating words.\n orig_name = orig_name.replace(\",\", \" \")\n orig_name = orig_name.replace(\"-\", \" \")\n orig_name = orig_name.replace(\"and \", \"\")\n return \"\".join([word.capitalize() for word in orig_name.split()])", "def test_names(self):\n obj = dotnet.DotNetNamespace({\"id\": \"Foo.Bar\"}, jinja_env=None, app=None)\n self.assertEqual(obj.name, \"Foo.Bar\")\n self.assertEqual(obj.short_name, \"Bar\")\n\n obj = dotnet.DotNetNamespace(\n {\"id\": \"Foo.Bar.Something`1\"}, jinja_env=None, app=None\n )\n self.assertEqual(obj.name, \"Foo.Bar.Something`1\")\n self.assertEqual(obj.short_name, \"Something`1\")", "def safe_name(self, name):\n\n output = \"\"\n for char in name:\n if char not in '\\\\/<>:\"|?*':\n output += char\n\n return output", "def get_name(self):\n return \"%s %s\" % (\n self.first_name,\n self.last_name\n )", "def name_format(name):\n lst_name, fst_name = name.split(',')\n return ' '.join((fst_name, lst_name))", "def safe_formal_name(name):\n return re.sub(r\"\\s+\", \" \", re.sub(r'[!/\\\\:<>\"\\?\\*\\|]', \"\", name)).strip()", "def getName(self):\n return \"\"", "def get_name_from_item(item, separators=None):\n separators = separators or [',', ';', ':']\n indexes = []\n for sep in separators:\n index = item.find(sep)\n indexes.append(index if index != -1 else len(item))\n \n name = item[:min(indexes)]\n return name", "def name(self):\n return self.raw.get(\"name\")", "def name(self):\n # easy enough\n return \"{0.first} {0.last}\".format(self)", "def name(self) -> str:\n\t\treturn self._raw_result['name']", "def success_new_brass(name):\n return 'Nom de brasserie %s' % name + ' a bien ete ajoute'", "def get_ig_name ( base_name ) :\n return base_name + '-GW'", "def get_name(self):\n if self.name != None: return self.name\n else: return self.get_name_from_items(self.items.values())", "def get_name(self) -> str:\n pass", "def render_name(name_att_list):\n res = ['']\n for k, v in name_att_list:\n v = dn_escape(v)\n res.append(\"%s=%s\" % (k, v))\n res.append('')\n return '/'.join(res)", "def ending_cutter(name: str):\n if name.endswith('ID') and re.match(r'^(?=\\w+[A-Z])(?=\\w+[a-z])\\w+$', name):\n return name[:-2]\n return name", "def _expanded_id(name: str, sep: str = '_') -> str:\n return sep.join([el.lower()\n for el in re.split(r'([A-Z]+[^A-Z]*)', name)\n if el])", "def get_name(self):\n name = ''\n # name contains all adjacent alphanumeric symbol\n while self.current_character.isalnum():\n name += self.current_character\n self.current_character = self.file.read(1)\n self.file.seek(self.file.tell() - 1, 0)\n character = self.file.read(1)\n if character.isalnum():\n pass\n else:\n self.file.seek(self.file.tell() - 1, 0)\n return name", "def latex_name(name):\r\n name = name + '_' #protects against .split('_') failing\r\n if name.startswith('['): #format leading [] as concentration\r\n head, tail = name[1:].rsplit(']', 1)\r\n head = r'[\\ce{%s}]' % head\r\n else:\r\n if '[' in name: # turn internal [] into marked-up subscripts\r\n before, inside, after = re.match(r'([^[]+)\\[(.*)\\]([^]]*)', name).groups() # separates bracketed material\r\n name = r'%s_\\ce{%s}_%s' % (before, inside, after)\r\n head, tail = name.split('_', 1)\r\n if len(head) > 1: # special cases like v12 (=> v_12) and roman multiple letter symbol\r\n if re.match(r'^.[0-9]+$', head): # single character following by integer, i.e. v0\r\n head, tail = name[0], name[1:]\r\n else:\r\n head = r'\\mathrm{%s}' % head\r\n subscripts = re.findall(r'(\\\\ce{.*}|[^_]+)_', tail) # tail.split('_') but ignoring underscore within chem mark-up\r\n if subscripts:\r\n return head + r'_{\\mathrm{' + ','.join(subscripts) + '}}'\r\n return head", "def sepName(self, englishOnly=False):\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % (name)", "def _get_pretty_name(name):\n pretty = ''\n if name.countryName:\n pretty += '/C=' + name.countryName\n if name.stateOrProvinceName:\n pretty += '/ST=' + name.stateOrProvinceName\n if name.localityName:\n pretty += '/L=' + name.localityName\n if name.organizationName:\n pretty += '/O=' + name.organizationName\n if name.organizationalUnitName:\n pretty += '/OU=' + name.organizationalUnitName\n if name.commonName:\n pretty += '/CN=' + name.commonName\n if name.emailAddress:\n pretty += '/email=' + name.emailAddress\n return pretty", "def _to_jsonc_name(member_name):\n\n characters = []\n uppercase_next = False\n for character in member_name:\n if character == '_':\n uppercase_next = True\n elif uppercase_next:\n characters.append(character.upper())\n uppercase_next = False\n else:\n characters.append(character)\n return ''.join(characters)", "def get_full_name(self):\n full_name = '{0} {1} {2}'.format(self.last_name, self.first_name, self.patronymic)\n return full_name.strip()", "def make_python_name(self, name):\n # FIXME see cindex.SpellingCache\n for k, v in [('<', '_'), ('>', '_'), ('::', '__'), (',', ''), (' ', ''),\n (\"$\", \"DOLLAR\"), (\".\", \"DOT\"), (\"@\", \"_\"), (\":\", \"_\"),\n ('-', '_')]:\n if k in name: # template\n name = name.replace(k, v)\n # FIXME: test case ? I want this func to be neutral on C valid\n # names.\n if name.startswith(\"__\"):\n return \"_X\" + name\n if len(name) == 0:\n pass\n elif name[0] in \"01234567879\":\n return \"_\" + name\n return name", "def full_name(name: str, *, prefix: str = DEFAULT_METRIC_NAME_PREFIX) -> str:\n return f\"{prefix}{DEFAULT_METRIC_NAME_DELIMITER}{name}\"" ]
[ "0.70919734", "0.69829744", "0.68450254", "0.67303866", "0.6666624", "0.66044873", "0.65953267", "0.65891093", "0.65593725", "0.623893", "0.6218805", "0.61895317", "0.61869186", "0.6163495", "0.6100971", "0.6099947", "0.6092239", "0.607046", "0.60489154", "0.6042493", "0.60297483", "0.60196704", "0.60196704", "0.60196704", "0.60196704", "0.60196704", "0.59820706", "0.59820706", "0.59820706", "0.59820706", "0.59723836", "0.59584624", "0.5956047", "0.59497714", "0.59392816", "0.5907609", "0.5902328", "0.59006584", "0.58909464", "0.58905774", "0.58905774", "0.5884643", "0.58812624", "0.5867738", "0.5867738", "0.5867738", "0.5867738", "0.5867738", "0.5867738", "0.5864715", "0.58419174", "0.5839502", "0.58261853", "0.58222836", "0.58205885", "0.5819315", "0.58178324", "0.5814598", "0.5814492", "0.5814424", "0.5808165", "0.58064514", "0.57872814", "0.5781584", "0.57787645", "0.5773482", "0.5773379", "0.5740995", "0.5739245", "0.5738703", "0.572095", "0.57185394", "0.57167095", "0.57137495", "0.57006043", "0.56991506", "0.5696045", "0.5695825", "0.56944555", "0.5692191", "0.5691018", "0.568776", "0.5687329", "0.56866896", "0.5682151", "0.5680882", "0.56742495", "0.5673338", "0.5663452", "0.5658749", "0.5653632", "0.5653553", "0.5652143", "0.56519014", "0.5640885", "0.56357235", "0.5633493", "0.56246877", "0.56161076", "0.5609878", "0.5606493" ]
0.0
-1
Return name used for labels add for required fields
def labelName(self): if self.isRequired: return '%s*' % self.name return self.name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def label_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label_name\")", "def build_label_text(field_name: str, field: dict):\n\n label = \"\"\n if \"required\" in field:\n label = \" * \" if field.get(\"required\") else \"\"\n\n # If we don't have a label defined, used the field name\n if \"label\" not in field:\n field.update({\"label\": field_name.upper()})\n\n label += field[\"label\"]\n\n return label", "def label_name(self) -> str:\n return pulumi.get(self, \"label_name\")", "def name_field(self):\r\n return 'name'", "def label(self):\r\n return self._name", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def Label(self) -> str:", "def label(self):\n return ''", "def get_name(self):\n return self._label", "def get_labelname(self):\n return self.options['labelname']", "def get_label(name):\n lower = name.lower()\n vals = lower.split('_')\n if 'ho' in vals:\n name = 'Independent Estimate'\n elif 'alldata' in vals:\n name = 'Extra-Data Estimate'\n elif 'ris' in vals[0]:\n name = 'RIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n elif 'is' in vals[0]:\n name = 'OIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n if 'dr' in vals:\n name += ' DR'\n if 'wdr' in vals:\n name += ' WDR'\n return name", "def create_label(self, org, name):\n pass", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return self.name", "def get_label():\n inp = option_text('Input label name (leave blank for no label):')\n add_to_collected('label', inp)\n OPTIONS['label'] = inp\n return", "def label(self) -> str:\n return self[\"label\"]", "def _get_label(self):\n if self.model.name == '':\n return \"KPI\"\n return \"KPI: {} ({})\".format(self.model.name, self.model.objective)", "def label(self) -> str:\r\n\r\n return self.__label", "def getName(self):\n return \"\"", "def name(self):\n\t\t# This is necessary for ColumnLists that are used\n\t\t# for CondDescs as well. Ideally, we'd do this on an\n\t\t# InputKeys basis and yield their names (because that's what\n\t\t# formal counts on), but it's probably not worth the effort.\n\t\treturn \"+\".join([f.name for f in self.inputKeys])", "def field_names(self):\n\n entry_time_name = forms_builder.forms.models.FormEntry._meta.get_field('entry_time').verbose_name.title()\n document_title_name = Document._meta.get_field('name').verbose_name.title()\n document_url_name = Document._meta.get_field('url').verbose_name.title()\n\n form = self.form.all()[0]\n return ['user'] \\\n + [document_title_name, document_url_name] \\\n + [f.label\n for f in form.fields.all()] \\\n + [entry_time_name]", "def label(self) -> str:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"label\"))\r\n return self._name", "def get_name(self):", "def get_name(self):", "def get_name():", "def field_label(field_name, bushfire=None):\r\n field_name = FIELD_MAPPING.get(field_name) or field_name\r\n if bushfire:\r\n try:\r\n return bushfire._meta.get_field(field_name).verbose_name\r\n except:\r\n return field_name\r\n else:\r\n return field_name", "def label(self):\n # type: () -> str\n labels = self.__class__.__labels__\n return force_str(labels.get(self.value, self.name))", "def get_name(self):\n pass", "def get_name(self):\n pass", "def getName(self):", "def name(self):\r\n pass", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self):\r\n raise NotImplementedError", "def get_name(self):\n return", "def get_label(cls):\r\n return cls._type_name(cls.label)", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self) -> str:", "def name(self):\n pass", "def get_field_name(self):\n if self.language is None:\n lang = \"i18n\"\n else:\n lang = self.get_language()\n\n return build_localized_fieldname(self.original_name, lang)", "def get_name(self):\r\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def get_name(self):\n raise NotImplementedError", "def __str__(self):\n return self.label", "def __str__(self):\n return self.label", "def get_label(cls) -> str:\n return cls._meta.label_lower.split('.')[-1]", "def name(self):\n raise NotImplementedError # pragma: no cover", "def name(self):", "def name(self):", "def name(self):", "def name(self):", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def name(self):\n raise NotImplementedError", "def label(self) -> str: # Enforcing every node defines a label\n pass", "def get_label(cls):\n return cls._type_name(cls.label)", "def get_name(self):\n return None", "def label(self) -> str:\n return self.__parameters.label", "def get_name_type_label(self):\n id, name_type = self.NAME_TYPE_CHOICES[self.name_type]\n return name_type", "def name(self):\n ...", "def display_name(self):", "def __str__ ( self ):\n return self.get_label()", "def _generateLabelAndName(self, obj, **args):\n result = []\n label = self._generateLabel(obj, **args)\n name = self._generateName(obj, **args)\n result.extend(label)\n if not len(label):\n result.extend(name)\n elif len(name) and name[0].strip() != label[0].strip():\n result.extend(name)\n return result", "def label(self):\n return self.__label", "def label(self):\n return self.__label", "def label_from_instance(obj):\n if len(obj.first_name) > 0 and len(obj.last_name) > 0:\n return \"{} {}\".format(obj.first_name, obj.last_name)\n else:\n return \"<{}>\".format(obj.username)", "def name(self) -> str:\n ...", "def name(self) -> str:\n ...", "def get_name(self) -> str:\n pass", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def Name(self) -> str:", "def __str__(self):\n return str(self.label)", "def name(self) -> str: # pragma: no cover", "def get_name(self) -> str:\n raise NotImplementedError", "def name(self):\n raise NotImplementedError()", "def name(self):\n raise NotImplementedError()", "def getName(self):\n raise NotImplementedError", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ...", "def getName(self) -> unicode:\n ..." ]
[ "0.7235186", "0.7140648", "0.7047591", "0.7011272", "0.69102323", "0.678387", "0.678387", "0.678387", "0.678387", "0.678387", "0.678387", "0.67811215", "0.67751276", "0.67363954", "0.67283016", "0.6711237", "0.6679455", "0.6657541", "0.66369164", "0.65833545", "0.65801924", "0.65796936", "0.6577343", "0.65438753", "0.6540091", "0.65350425", "0.65324503", "0.65324503", "0.6524761", "0.65048337", "0.6477937", "0.6471583", "0.6471583", "0.6455086", "0.6448511", "0.6438812", "0.6438812", "0.6438812", "0.6438812", "0.64326996", "0.64228714", "0.64156306", "0.6412687", "0.6412687", "0.6412687", "0.6412687", "0.6412687", "0.64075226", "0.6404683", "0.6399427", "0.6397469", "0.6397469", "0.6397469", "0.6397469", "0.63948935", "0.63948935", "0.63864636", "0.6375625", "0.6358173", "0.6358173", "0.6358173", "0.6358173", "0.63553894", "0.63553894", "0.63553894", "0.63553894", "0.63553894", "0.63553894", "0.63553894", "0.63553894", "0.63553894", "0.63553894", "0.6346281", "0.63459575", "0.63446504", "0.6332708", "0.63103247", "0.63058245", "0.62783873", "0.62780243", "0.6278004", "0.62766355", "0.62766355", "0.62732124", "0.6273128", "0.6273128", "0.6269649", "0.62643945", "0.62643945", "0.62643945", "0.62643945", "0.6258846", "0.6254322", "0.62407774", "0.6234041", "0.6234041", "0.6227456", "0.62145275", "0.62145275", "0.62145275" ]
0.82659084
0
Return text for xml attributes
def writeXml(self): text = u' type="%s"' % self.typeName if self.format: text += u' format="%s"' % escape(self.format, treedoc.escDict) if self.prefix: text += u' prefix="%s"' % escape(self.prefix, treedoc.escDict) if self.suffix: text += u' suffix="%s"' % escape(self.suffix, treedoc.escDict) if self.html: text += u' html="y"' if self.isRequired: text += u' required="y"' if self.hidden: text += u' hidden="y"' if self.numLines > 1: text += u' lines="%d"' % self.numLines if self.initDefault: text += u' init="%s"' % escape(self.initDefault, treedoc.escDict) if self.linkAltField: text += u' linkalt="%s"' % escape(self.linkAltField, treedoc.escDict) return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def attrs(xml):\r\n return lxml.html.fromstring(xml).attrib", "def AttributeString(self) -> str:", "def AttributeString(self) -> str:", "def attrsToString(self, attrs):\n string = \"\"\n # for every attribut\n for attr in attrs:\n # converts its name and value to string and adds this to string\n string += \" {}=\\\"{}\\\"\".format(attr[0], attr[1])\n # no exception!\n print(\"Das Attribut ist zu lang!\") if len(attr) > 2 else None\n return string", "def attr(node: md.Document, name: str) -> str:\n return node.getAttribute(name)", "def ATTRIBUTE():\n return \"author\", \"title\", \"publisher\", \"shelf\", \"category\", \"subject\"", "def domAttributesToString( node ):\n strOut = \"node has %d attribute(s):\\n\" % node.attributes.length;\n for i in range(node.attributes.length):\n attr = node.attributes.item(i);\n strOut += \"- %s:'%s'\\n\" % (attr.name, attr.value );\n return strOut;", "def attributes(self):\n _attrs = [\"label\"]\n if self.attrs:\n _attrs.append(\"attrs\")\n return _attrs", "def attr(*attrs: ATTRIBUTE) -> str:\n return PyHTML.attr(*attrs)", "def getCustomAttributeValue(self):\n\t\tpayload = ''\n\t\tif len(self.Attribute) > 0:\n\t\t\tfor x in range(0,len(self.Attribute)):\n\t\t\t\tpayload += \"%s : %s\" % (self.Attribute[x], self.AttributeValue[x])\n\t\t\treturn payload\n\t\telse:\n\t\t\treturn payload", "def _get_attribute(self):\n return self.split_text[1] if len(self.split_text) > 1 else \"\"", "def get_attribute(tdesc, attr_name, required=True):\n # Fields stored in hex format by default.\n default_hex = ('cipher_text', 'iv', 'key')\n\n data = tdesc.find(attr_name)\n if data is None:\n if required:\n raise subcmd.TpmTestError('node \"%s\" does not have attribute \"%s\"' %\n (tdesc.get('name'), attr_name))\n return ''\n\n # Attribute is present, does it have to be decoded from hex?\n cell_format = data.get('format')\n if not cell_format:\n if attr_name in default_hex:\n cell_format = 'hex'\n else:\n cell_format = 'ascii'\n elif cell_format not in ('hex', 'ascii'):\n raise subcmd.TpmTestError('%s:%s, unrecognizable format \"%s\"' %\n (tdesc.get('name'), attr_name, cell_format))\n\n text = ' '.join(x.strip() for x in data.text.splitlines() if x)\n if cell_format == 'ascii':\n return text\n\n # Drop spaces from hex representation.\n text = text.replace(' ', '')\n if len(text) & 3:\n raise subcmd.TpmTestError('%s:%s %swrong hex number size' %\n (tdesc.get('name'), attr_name, utils.hex_dump(text)))\n # Convert text to binary\n value = ''\n for x in range(len(text)/8):\n try:\n value += struct.pack('<I', int('0x%s' % text[8*x:8*(x+1)], 16))\n except ValueError:\n raise subcmd.TpmTestError('%s:%s %swrong hex value' %\n (tdesc.get('name'), attr_name, utils.hex_dump(text)))\n return value", "def handleAttributes(text, parent):\r\n def attributeCallback(match):\r\n parent.set(match.group(1), match.group(2).replace('\\n', ' '))\r\n return ATTR_RE.sub(attributeCallback, text)", "def __str__(self) -> str:\n result = \"\"\n for attr in self.ATTRS:\n if len(result) != 0:\n result += \"\\n\"\n result += f\"{attr}: {getattr(self, attr)}\"\n\n return result", "def attrs(context):\n result = \"\"\n for key, value in context.flatten().items():\n if key not in [\"True\", \"False\", \"None\", \"content\", \"element\"]:\n if \"hx_\" in key:\n key = key.replace(\"_\", \"-\")\n result += f' {key}=\"{value}\"'\n return mark_safe(result)", "def print_attribute(attributes):\n for attribute in attributes:\n print ' ',\n change_color_by_tag(attribute)\n if attribute['ExtAttributes']:\n print_extattributes_of_member(attribute['ExtAttributes'])\n print attribute['Type'],\n print attribute['Name']", "def text(self, value, match_option=None):\n return self.attributes(\"text\", value, match_option)", "def Attribute(name, value=None):\r\n if value:\r\n return '{}=\"{}\"'.format(name, value)\r\n else:\r\n return ''", "def attr(*attrs: ATTRIBUTE) -> str:\n formatted = []\n for attr_ in attrs:\n if isinstance(attr_, str):\n formatted.append(attr_)\n elif isinstance(attr_, tuple) and len(attr_) == 2:\n formatted.append(f'{attr_[0]}=\"{attr_[1]}\"')\n else:\n raise ValueError(f\"Bad attribute: {attr_}\")\n return \" \".join(formatted)", "def get_xml_attrib(el, key):\n for k in el.attrib.keys():\n if key == re.sub(\"^{.*}\", \"\", k):\n return el.attrib[k]", "def attrs(*attributes):\n return ';'.join([ str(i) for i in attributes ])", "def attribute_value(self) -> str:\n return pulumi.get(self, \"attribute_value\")", "def getName(self, *args):\n return _libsbml.XMLAttributes_getName(self, *args)", "def attval(self, text,\n whitespace=re.compile('[\\n\\r\\t\\v\\f]')):\n return self.encode(whitespace.sub(' ', text))", "def attval(self, text,\n whitespace=re.compile('[\\n\\r\\t\\v\\f]')):\n return self.encode(whitespace.sub(' ', text))", "def text(self):\n for attr in ['label', 'text']:\n val = self.attribute_value(attr)\n if val:\n return val\n\n return super(Option, self).text", "def attrib(self) -> Any:\n return self.attributes", "def print_attr(self):\n return \"name : {0}\\nprice : {1}\\ndescription : {2}\".format(\n self.name, self.price, self.description\n )", "def getAttrName(self, *args):\n return _libsbml.XMLToken_getAttrName(self, *args)", "def __str__(self):\n return 'Target with value ' + str(self.attributes[AT.VALUE])", "def getCustomAttribute(self):\n\t\treturn self.Attribute", "def read_attribs(self):\n\n attribs = {}\n while self.index < self.length:\n self.ignore_whitespaces()\n if self.xtext[self.index] == '>':\n break\n name = self.read_until('=')\n self.index += 1\n self.read_until('\"')\n self.index += 1\n value = self.read_until('\"')\n self.index += 1\n\n attribs[name] = value\n\n return attribs", "def printer_attributes_tag(self) :\n self._curattributes = self._printer_attributes\n return self.parseTag()", "def get_attributes(doc):\n\treturn doc.keys()", "def getAttributes(self):\n return _libsbml.XMLToken_getAttributes(self)", "def _get_attribs_string_from_schema(header_attributes):\n attrib_values = [f\"{attr}=\\\"{value}\\\"\" for attr, value in header_attributes.items()]\n final_attrib_string = \" \".join(attrib_values)\n return final_attrib_string", "def attr(elem, attr):\n try:\n return elem[attr]\n except:\n return \"\"", "def attr(elem, attr):\n try:\n return elem[attr]\n except:\n return \"\"", "def get_text_attr():\n csbi = CONSOLE_SCREEN_BUFFER_INFO()\n GetConsoleScreenBufferInfo(stdout_handle, byref(csbi))\n return csbi.wAttributes", "def get_attributes(self) -> Dict[str, str]:\n pass", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def get_attributes(cls):\r\n return [\r\n Attribute('size', '20'),\r\n Attribute('inline', False),\r\n Attribute('label', ''),\r\n ]", "def userattr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"userattr\")", "def dot_node_attrs(self):\n\n lbl_name = '%s' % self.format_name(True, True, 24)\n lbl_acc = '<font point-size=\"8.0\">%s</font>' % self.format_id()\n label = self.node_label_fmt % (self.url(), self.name,\n lbl_name, lbl_acc)\n\n node_attrs = {'label': label}\n return node_attrs", "def get_attribute(self, attribute: str) -> str:\n pass", "def html_attrs(attrs):\n html = \"\"\n for a in attrs.items():\n if a[1]:\n html = html + \"%s=\\\"%s\\\" \"%(a)\n return html", "def _get_attrib(self, attrib_path: str, binfo: dict) -> str:\n apath = attrib_path.split('.')\n return self._get_attrib_by_path(apath, binfo)", "def attributes(self) -> Optional[Mapping[str, Sequence[str]]]:\n return pulumi.get(self, \"attributes\")", "def render_attr(key, value, attr_format='{key}=\"{value}\"'):\n\n if not key or ' ' in key:\n raise InvalidAttribute('Invalid name \"{}\"'.format(key))\n\n if value:\n if type(value) is RawNode:\n value = str(value)\n else:\n value = html.escape(str(value))\n\n return attr_format.format(key=key, value=value)\n\n return key", "def __str__(self) -> str:\n command = ['get_attr', AttributeProxy._compose_path(self._path)]\n return self._herbstluftwm.call(command).stdout", "def attributes(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"attributes\")", "def getAttrValue(self, *args):\n return _libsbml.XMLToken_getAttrValue(self, *args)", "def getAttrName(self, context):\r\n return self.attr if self.attr is not None else context.attr", "def summary(self):\n return \"%s: %s\" % (self.attribute.name, self.value_as_text)", "def attributes(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:\n return pulumi.get(self, \"attributes\")", "def getAttributes(self):\n pass", "def create_descr(self, attr_name):", "def strpatt(self, name):\n return name.replace(\"att.\", \"\")", "def description(self):\n desc = self.title\n ops = []\n for attribute in self.attributes.all():\n value = attribute.value\n if isinstance(value, list):\n ops.append(\n \"%s = '%s'\" % (attribute.type, (\", \".join([str(v) for v in value])))\n )\n else:\n ops.append(\"%s = '%s'\" % (attribute.type, value))\n if ops:\n desc = \"%s (%s)\" % (desc, \", \".join(ops))\n return desc", "def GetAttributes(self):\r\n\r\n return self._attr", "def get_attribute(self, lexeme: str) -> typing.Union[Attributes.Attributes]:\n return self._parent_node.get_attribute(lexeme)", "def get_attribute(self, name):\n\n pass", "def attributes(self):\n return [self._ELE_ATTR]", "def get_attributes(cls):\r\n return [Attribute('size', '20'),\r\n Attribute('label', ''), ]", "def text(self):\n\n txt = self.web_element.get_attribute('value')\n return txt", "def attributes(doc, header, renderer=Attribute, item_class=DefinitionItem):\n items = doc.extract_items(item_class)\n lines = []\n renderer = renderer()\n for item in items:\n renderer.item = item\n lines += renderer.to_rst()\n lines.append('')\n return lines", "def format_attributes(attributes):\n return ';'.join([k + '=' + v for k, v in attributes.items()])", "def attributes(self):\n _attrs = [\"type\", \"name\", \"value\"]\n if self.confidence is not None:\n _attrs.append(\"confidence\")\n if self.constant:\n _attrs.append(\"constant\")\n if self.tags:\n _attrs.append(\"tags\")\n\n return _attrs", "def str_attr(attr):\n return str(attr)", "def attribute_summary(self):\n attributes = self.get_attribute_values()\n pairs = [attribute.summary() for attribute in attributes]\n return \", \".join(pairs)", "def attrs(self):\n return self.size, self.propSuffix, self.specified", "def GetAttribute(self, name):\n ret = libxml2mod.xmlTextReaderGetAttribute(self._o, name)\n return ret", "def extensible_attributes():\n return 'extensibleattributedef?'", "def print_extattribute(extattributes):\n for extattribute in extattributes:\n print ' ',\n change_color_by_tag(extattribute)\n print '{NAME}'.format(NAME=extattribute['Name'])", "def getValue(self, *args):\n return _libsbml.XMLAttributes_getValue(self, *args)", "def string_for_attrs(attrs):\n if not attrs: return ''\n return ''.join(' %s=\"%s\"' % (attr, value) for attr, value in attrs)", "def __str__(self):\n # newline-delimited values of all the attributes\n return \">%s\\n%s\" % (self.Label, self.Sequence)", "def GetAttributeValuesString(self):\n attributes = []\n for attribute_name, attribute_value in sorted(self.__dict__.items()):\n # Not using startswith to improve performance.\n if attribute_name[0] == '_' or attribute_value is None:\n continue\n\n if isinstance(attribute_value, bytes):\n raise TypeError(\n 'Attribute: {0:s} value of type bytes not supported.'.format(\n attribute_name))\n\n if isinstance(attribute_value, dict):\n raise TypeError(\n 'Attribute: {0:s} value of type dict not supported.'.format(\n attribute_name))\n\n attribute_string = '{0:s}: {1!s}'.format(attribute_name, attribute_value)\n attributes.append(attribute_string)\n\n return ', '.join(attributes)", "def toXMLElement(self):\n attribute_element = xml.etree.ElementTree.Element('attribute')\n attribute_element.set('concept', self.concept_ref)\n\n if self.value:\n value_element = xml.etree.ElementTree.Element('value')\n value_element.text = self.value\n\n attribute_element.append(value_element)\n\n return attribute_element", "def get_propattr(self, naam, attr):\r\n h = self._root.find(naam)\r\n if h is None:\r\n h = \"\"\r\n else:\r\n hh = h.get(attr)\r\n if hh is None:\r\n h = \"\"\r\n else:\r\n h = hh\r\n return h", "def attribute_value(self) -> Optional[str]:\n return pulumi.get(self, \"attribute_value\")", "def __getitem__(self, key):\n return self.attrib[key]", "def __getitem__(self, key):\n return self.attrib[key]", "def getAttrURI(self, *args):\n return _libsbml.XMLToken_getAttrURI(self, *args)", "def getAttr(node, name):\n path = \"./attributelist/attribute[@name='%s']/@value\" % name\n n = node.xpathEval2(path)\n if len(n):\n return n[0].content\n else:\n return None", "def groupattr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"groupattr\")", "def value_as_text(self):\n property_name = \"_%s_as_text\" % self.attribute.type\n return getattr(self, property_name, self.value)", "def attributeInfo(*args, allAttributes: bool=True, bool: bool=True, enumerated: bool=True,\n hidden: bool=True, inherited: bool=True, internal: bool=True, leaf: bool=True,\n logicalAnd: bool=True, multi: bool=True, short: bool=True, userInterface:\n bool=True, writable: bool=True, type: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass", "def gen_tag_attrs(self, *a, **kw):\n return gen_tag_attrs(self, *a, **kw)", "def openTagAtt ( x, attName, attVal ):\n assert str(type(x)) == \"<type 'str'>\"\n assert str(type(attName)) == \"<type 'str'>\"\n assert str(type(attVal)) == \"<type 'str'>\"\n tag = \"<\" + str ( x ) + \" \" + str ( attName ) + \"='\" + str ( attVal ) +\"'>\"\n assert str ( type ( tag ) ) == \"<type 'str'>\"\n return tag", "def __str__(self):\n return \"Attribute(name={},type={},is_required={})\".format(\n self.name, self.type, self.is_required\n )", "def attribute(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"attribute\")", "def attribute(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"attribute\")", "def getAttributeInfoDictionary(attr, format=None):\n format = format or _getDocFormat(attr)\n return {'name': attr.getName(),\n 'doc': renderText(attr.getDoc() or '', format=format)}", "def name(self):\n return (\n self._raw_data.get(ATTR_NAME_UNICODE)\n or self._raw_data.get(ATTR_NAME)\n or \"\"\n )", "def test_get_attrib(self):\n self.assertEqual(\"true\", get_attrib(self.xml, \"exists\"))\n self.assertEqual(0, get_attrib(self.xml, \"default\", default=0))\n self.assertEqual(23, get_attrib(self.xml, \"integer\", cast=int))\n self.assertEqual(1.354, get_attrib(self.xml, \"float\", cast=float))\n self.assertRaises(ValueError, get_attrib, *(self.xml, \"noexist\", \"unittest\"))", "def get_attributes(cls):\r\n return [\r\n Attribute('rows', '30'),\r\n Attribute('cols', '80'),\r\n Attribute('hidden', ''),\r\n\r\n # For CodeMirror\r\n Attribute('mode', 'python'),\r\n Attribute('linenumbers', 'true'),\r\n # Template expects tabsize to be an int it can do math with\r\n Attribute('tabsize', 4, transform=int),\r\n ]", "def get_attribute(self, name):\n return self.element.get_attribute(name)", "def attributeType(self) -> unicode:\n ...", "def attributes(self):\n attrs_ = [\"name\", \"type\"]\n if self.exclusive:\n attrs_.append(\"exclusive\")\n if self.default is not None:\n attrs_.append(\"default\")\n\n return attrs_" ]
[ "0.7418472", "0.7217796", "0.7217796", "0.6662724", "0.6609409", "0.65585464", "0.65371746", "0.6463625", "0.6277733", "0.6217797", "0.6175163", "0.6160595", "0.61249447", "0.61248875", "0.6103389", "0.60987955", "0.6094574", "0.60763466", "0.6030213", "0.60041815", "0.5995793", "0.5958707", "0.59583235", "0.59540135", "0.59540135", "0.5948224", "0.59456223", "0.5925614", "0.5902977", "0.5880386", "0.5874951", "0.58725876", "0.5854682", "0.58154833", "0.5804823", "0.57935244", "0.57858676", "0.57858676", "0.57792574", "0.5767154", "0.57608646", "0.57608646", "0.575214", "0.5751485", "0.5744082", "0.57332784", "0.5730346", "0.57293206", "0.57125354", "0.5698359", "0.56982625", "0.569457", "0.56905186", "0.5690361", "0.56834143", "0.56694335", "0.566221", "0.56606036", "0.5644342", "0.56343144", "0.56211984", "0.5618949", "0.5613722", "0.56084836", "0.55965286", "0.55916154", "0.5581218", "0.55760056", "0.55611753", "0.5559518", "0.555818", "0.5556996", "0.55545545", "0.55410624", "0.5537328", "0.5535048", "0.5528933", "0.5525052", "0.5510849", "0.5508183", "0.54999727", "0.54794127", "0.54741895", "0.54741895", "0.54701024", "0.54647595", "0.5442442", "0.5434145", "0.5432016", "0.54289144", "0.5427994", "0.54256374", "0.54122573", "0.54122573", "0.5409915", "0.5386005", "0.5385292", "0.5384034", "0.537457", "0.5370638", "0.5369938" ]
0.0
-1
Return formatted text for this field
def outputText(self, item, titleMode, internal=False): if self.useFileInfo: item = globalref.docRef.fileInfoItem storedText = item.data.get(self.name, '') if storedText: return self.formatOutput(storedText, titleMode, internal) return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()", "def formatted(self) -> str:\r\n ...", "def format(self) -> str:", "def text(self) -> str:", "def get_as_text(self):\n d = {\n 'user': self.user or self.name,\n 'date': self.submit_date,\n 'text': self.text,\n 'domain': self.site.domain,\n 'url': self.get_absolute_url()\n }\n return _('Posted by %(user)s at %(date)s\\n\\n%(review)s\\n\\nhttp://%(domain)s%(url)s') % d", "def get_text(self):\n inp = \" \"\n if self.link_id:\n inp += \"LINK \" + self.link_id\n inp += self.status + ' '\n if self.node_id:\n inp += \"NODE \" + self.node_id + ' '\n if self.value:\n inp += self.control_type.name + ' ' + str(self.value) + ' '\n if self.time:\n inp += self.time + ' '\n if self.clock_time:\n inp += self.clock_time + ' '\n # TODO: research correct formatting of time, clock_time options\n return inp", "def format(self) -> str:\n return self._format", "def format(self) -> str:\n return self._format", "def getText(self):\r\n return \"\"", "def value_as_text(self):\n property_name = \"_%s_as_text\" % self.attribute.type\n return getattr(self, property_name, self.value)", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def getFormattedText(self):\r\n h = \"00\"\r\n m = \"00\"\r\n s = \"00\"\r\n if(self.seconds < 10):\r\n s = \"0\" + str(self.seconds)\r\n else:\r\n s = str(self.seconds)\r\n\r\n if(self.minutes < 10):\r\n m = \"0\" + str(self.minutes)\r\n else:\r\n m = str(self.minutes)\r\n\r\n if(self.hours < 10):\r\n h = \"0\" + str(self.hours)\r\n else:\r\n h = str(self.hours)\r\n\r\n return h + \":\" + m + \":\" + s", "def get_text(self):\n return self.rule_id + '\\t' + self.rule_text", "def get_text(self):\n return self.get_property('text')", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def _get_FIELD_display(self, field):\n value = getattr(self, field.attname)\n if value is None:\n return\n template = ''\n template += '{:d}' if field.decimals == 0 else '{:.%sf}' % field.decimals\n template += ' ' if field.spaced_display else ''\n template += '{!s:s}'\n return template.format(value, field.unit)", "def TEXT(number, format_type):\n raise NotImplementedError()", "def format(self) -> str:\n return pulumi.get(self, \"format\")", "def text(self) -> str:\n return self.__text", "def show(self) -> str:\n return f'[{self.font}]{self.text}[{self.font}]' if self.font else self.text", "def formatName(self):\r\n return self.title.getVal() + \" \" + self.first.getVal() + \" \" + self.last.getVal()", "def getText(self):\n return(' '.join(map(lambda x:x.text,self.getNested())))", "def get_text(self) -> str:\n return self.text", "def text(self):\n return self.__text", "def getText(self):", "def get_text(self):\n return self.text", "def text(self):\n return ''", "def get_text(self) -> str:\n return self._text", "def get_formatted_text(self, n_cols):", "def text(self) -> str:\n return self._impl.get_text()", "def format_item_display(self, obj):\n return u\"%s - %s\" % (escape(obj.nombre),obj.rfc)", "def text(self) -> LocalizedString:\n return self._text", "def as_text(self) -> str:\n txt = ''\n with self._th_lock:\n # purge expired value (reach ttl_s) from values dict\n purge_l = []\n for key, (_value, _timestamp_ms, expire_at) in self._values_d.items():\n if expire_at and time.monotonic() > expire_at:\n purge_l.append(key)\n for rm_key in purge_l:\n self._values_d.pop(rm_key)\n # if any value exists, format an exposition message\n if self._values_d:\n # add a comment line if defined\n if self.comment:\n # apply escapes to comment\n esc_comment = str(self.comment)\n for rep_args in [('\\\\', '\\\\\\\\'), ('\\n', '\\\\n')]:\n esc_comment = esc_comment.replace(*rep_args)\n txt += f'# HELP {self.name} {esc_comment}\\n'\n # add a type line if defined\n if self.type is not MetricType.UNTYPED:\n txt += f'# TYPE {self.name} {self.type.value}\\n'\n # add every \"name{labels} value [timestamp]\" for the metric\n for lbl_id_str, (value, ts, _expire_at) in self._values_d.items():\n if self._type is MetricType.HISTOGRAM:\n txt += self._data2txt_histogram(lbl_id_str, value)\n elif self._type is MetricType.SUMMARY:\n txt += self._data2txt_summary(lbl_id_str, value)\n else:\n txt += self._data2txt_default(lbl_id_str, value, ts)\n return txt", "def __str__(self):\n txt = \"%s:\\n\" % self.name\n txt += \" Charge: %.4f\\n\" % self.charge\n txt += \" Radius: %.4f\" % self.radius\n return txt", "def text(self):\n if self.is_root:\n return ''\n elif self.is_comment:\n return self.__value\n elif self.is_flag:\n if self.__value:\n return '[{0}]'.format(self.name)\n else:\n return '!{0}!'.format(self.name)\n else:\n return '[{0}:{1}]'.format(self.name, self.value)", "def __repr__(self) -> str:\n return f\"{self.text}\"", "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def text(self) -> str:\n return self._text", "def format(self):\n return self._format", "def get_formatter(self, **kwargs):\n config = dict([\n (attr, getattr(self, attr))\n for attr in [\n \"include_sign\",\n \"group_with_commas\",\n \"num_decimal_places\",\n ]\n ])\n config.update(kwargs)\n return \"\".join([\n \"{\",\n config.get(\"field_name\", \"\"),\n \":\",\n \"+\" if config[\"include_sign\"] else \"\",\n \",\" if config[\"group_with_commas\"] else \"\",\n \".\", str(config[\"num_decimal_places\"]), \"f\",\n \"}\",\n ])", "def text(self):\n\n if self.status.full_text:\n return self.status.full_text\n elif self.status.text:\n return self.text\n else:\n return MISSING", "def to_str(self):\n return self.template.format(\n text_color=self.color.to_str(),\n font_type=self.font_type.to_str(),\n font_weight=self.font_weight.to_str(),\n font_size=self.font_size.to_str()\n )", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def _text_formatting(bs4_tag):\n return bs4_tag.get_text().replace('\\n', '')", "def text(self):\n return self.name, self.fontfile, self.fontsize", "def plain_text(self) -> str:\n return pulumi.get(self, \"plain_text\")", "def _get_FIELD_humanized_display(self, field):\n value = getattr(self, field.attname)\n if value is None:\n return\n power = max([i for i in utils.POWERS if value // i > 0 and i > 1])\n value /= power\n template = ''\n template += '{:.%sf}' % field.humanized_decimals\n template += ' ' if field.spaced_display else ''\n template += utils.POWERS[power]\n template += '{!s:s}'\n return template.format(value, field.unit)", "def format( self ) :\n\n return( self.__format )", "def __repr__(self) -> str:\r\n\r\n saida = \"Format: \"\r\n x = self.getformat()\r\n for _ in range(len(x)):\r\n saida = f\"{saida}{x[_]}\"\r\n if _ < len(x)-1:\r\n saida += \", \"\r\n saida += \"\\n\"\r\n return saida", "def text(self):\n return self.label.text()", "def html(self):\n bop = ('<b>' if self._bold else '')\n iop = ('<i>' if self._italic else '')\n icl = ('</i>' if self._italic else '')\n bcl = ('</b>' if self._bold else '')\n txt = escape(self._text)\n s = '%s%s%s%s%s' % (bop, iop, txt, icl, bcl)\n return '%s' % s", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title} :\" + s\n\n return s", "def format_value(text):\n return text.encode('utf8').replace('\\n', ' ').replace('\\r', ' ')", "def get_text(self):\n logging.getLogger(__name__).info(\"Element text: {}\\nby = {}\\nvalue = {}\".format(\n self.driver.find_element(self.by, self.value).text, self.by, self.value))\n return self.driver.find_element(self.by, self.value).text", "def get_instance_text(instance):\n values = []\n for fieldname in registry[instance.__class__]:\n values.extend(resolve_field_value(\n [instance], fieldname.split('__')))\n return u' '.join(values)", "def get_text(self):\n # If percentage is zero, round it\n if self.percentage == 0:\n self.percentage = str(\"< 0.01\")\n text = str(self.percentage) + \"% in \" + self.name\n return text", "def text(self, v=''):\n return str(v)", "def _to_text(self, value):\n raise NotImplementedError", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def __str__(self):\n return self.fmt.format(*self.args, **self.kwargs)", "def field_display(obj, field):\n return get_field_value(obj, field)", "def text(self):\n return self._text", "def text(self):\n return self._text", "def text(self):\n return self._text", "def text(self):\n return self._text", "def text(self):\n return self._text", "def text(self):\n return self._text", "def text(self):\n if hasattr(self,'label'):\n return str(self.label.text())\n else:\n return self.key", "def __str__(self):\n return self.format()", "def text(self):\n parts = [(\"%s\" if isinstance(p, Insert) else p) for p in self.parts]\n parts = [(\"%%\" if p == \"%\" else p) for p in parts] # escape percent\n return \"\".join(parts)", "def Text(self):\n return self._text", "def __str__(self):\n struct_repr = \", \".join([\n \"type: \" + str(self.type),\n \"text: \" + str(self.text)\n ])\n\n return f\"StatusText: [{struct_repr}]\"", "def GetText(self):\r\n \r\n return self._text", "def getText(self):\n return self.text", "def getText(self):\n return self.text", "def to_text(self, floatformat: str = \".6f\") -> str:\n return self.val.to_text(floatformat)", "def __str__(self):\n #Format data from default fields\n template = \"{number:4}|{rep:4}|{time:5}{priority:+2}|\" \\\n \"{record_type:8}|{name:17}\"\n default_fields = template.format(**self)\n \n #Format data from custom fields\n custom_field_list = []\n for label in self.custom_labels:\n custom_field_list.append(\"|{:17}:{!s:<5}\".format(label,\n self[label]))\n custom_fields = \"\".join(custom_field_list)\n \n return default_fields + custom_fields", "def strftime(self, format):\n return \"\"", "def strftime(self, format):\n return \"\"", "def strftime(self, format):\n return \"\"", "def __str__(self):\n\t\treturn self.text", "def outputText(self, item, titleMode, internal=False):\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def text(self):\n # type: () -> str\n return self._text", "def get_text(self):", "def text(self):\n return self.full_text", "def __str__(self):\n return \"%s\\n\" % self.text + \" \" * self.col + \"^\"", "def get_text(self):\n # If percentage is zero, round it\n if self.percentage == 0:\n self.percentage = str(\"< 0.01\")\n text = str(self.percentage) + \"% on line \" + self.line\n return text", "def _entity_as_text(self):\n return str(self.value)", "def format(self, record):\n message = record.getMessage()\n asctime = self.formatTime(record, self.datefmt)\n name = yellow(record.name)\n\n s = \"%(timestamp)s %(levelname)s %(name)s \" % {\n \"timestamp\": green(\"%s,%03d\" % (asctime, record.msecs), bold=True),\n \"levelname\": self.LEVELS[record.levelname],\n \"name\": name,\n }\n\n if \"\\n\" in message:\n indent_length = len(re_color_codes.sub(\"\", s))\n message = message.replace(\"\\n\", \"\\n\" + \" \" * indent_length)\n\n s += message\n return s", "def get_text(self):\n # If percentage is zero, round it\n if self.percentage == 0:\n self.percentage = str(\"< 0.01\")\n text = str(self.percentage) + \"% in \" + self.name\n text += \" [\" + self.file_name + \"]\"\n return text", "def text(self):\n\t\treturn ' '.join([self.write_components[x] for x in self.write_components])" ]
[ "0.75734353", "0.7379207", "0.7205578", "0.68481845", "0.67844886", "0.67808527", "0.67703915", "0.67703915", "0.67655444", "0.6745665", "0.6687435", "0.66469747", "0.6644202", "0.66413474", "0.65842545", "0.65842545", "0.65842545", "0.65842545", "0.65842545", "0.65741307", "0.6567402", "0.6558896", "0.6555393", "0.655068", "0.6547239", "0.6544499", "0.6535529", "0.6517592", "0.6480058", "0.6460372", "0.6459885", "0.64553267", "0.6446324", "0.6445358", "0.64076585", "0.6392125", "0.6380471", "0.6367286", "0.6361198", "0.6353135", "0.6348966", "0.63427186", "0.6320017", "0.6318294", "0.63155836", "0.6308729", "0.6299974", "0.6299974", "0.6299974", "0.6299974", "0.6299974", "0.62890583", "0.62872475", "0.6284665", "0.6283615", "0.62564605", "0.62539566", "0.6250457", "0.6246106", "0.6245501", "0.6238378", "0.62316114", "0.62306285", "0.6227439", "0.6216431", "0.6214085", "0.6207841", "0.6207841", "0.62025124", "0.618629", "0.6185114", "0.6185114", "0.6185114", "0.6185114", "0.6185114", "0.6185114", "0.6184535", "0.6183745", "0.6183568", "0.61735356", "0.61702335", "0.61699766", "0.6169714", "0.6169714", "0.6168279", "0.616314", "0.6158217", "0.6158217", "0.6158217", "0.6156037", "0.613794", "0.6137155", "0.61364275", "0.6114419", "0.61141664", "0.6103432", "0.61010504", "0.6098953", "0.609143", "0.60909635", "0.60876095" ]
0.0
-1
Remove HTML Markup and unescape entities
def removeMarkup(self, text): text = TextFormat.stripTagRe.sub('', text) return unescape(text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unescape_html_entities(self, text):\n text = html.unescape(text)\n return text", "def unescape_html(text):\n\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return unicode_char(int(text[3:-1], 16))\n else:\n return unicode_char(int(text[2:-1]))\n except ValueError:\n pass\n else:\n # named entity\n try:\n text = unicode_char(htmlentities.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text # leave as is\n\n return re.sub(r\"&#?\\w+;\", fixup, text)", "def unhtmlify(html):\n return unescape(re.sub(r'<.*?>', '', html))", "def cleaningHTML(text):\n # HTML-Entities decodieren\n h = html.parser.HTMLParser(convert_charrefs=True)\n text = h.unescape(text)\n \n # Geschützte Leerzeichen löschen\n text = re.sub('\\u00A0', \" \", text)\n text = re.sub(r'&', r'&amp;', text)\n text = re.sub(r'<a .*?>', r'', text)\n text = re.sub(r'</a>', r'', text)\n return text", "def unescape(s):\n\n\tif s is None:\n\t\treturn \"\"\n\n\t# html entities\n\ts = s.replace(\"&#13;\", \"\\r\")\n\n\t# standard html\n\ts = s.replace(\"&lt;\", \"<\")\n\ts = s.replace(\"&gt;\", \">\")\n\ts = s.replace(\"&amp;\", \"&\") # this has to be last\n\n\treturn s", "def escape_html_entity(text):\n parser = HTMLParser.HTMLParser()\n return parser.unescape(text)", "def HtmlUnescape(text):\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return unichr(int(text[3:-1], 16))\n else:\n return unichr(int(text[2:-1]))\n except ValueError:\n pass\n else:\n # named entity\n try:\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text # leave as is\n return re.sub(\"&#?\\w+;\", fixup, text)", "def filter_html(self, text):\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return unichr(int(text[3:-1], 16))\n else:\n return unichr(int(text[2:-1]))\n except ValueError:\n print \"Value Error\"\n pass\n else:\n # named entity\n try:\n if text[1:-1] in (\"amp\",\"gt\",\"lt\"):\n return text\n else:\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])\n except KeyError:\n print \"keyerror\"\n pass\n return text # leave as is\n return re.sub(\"&#?\\w+;\", fixup, text)", "def _decode_html_entities(text: str) -> str:\n return html.unescape(text)", "def html_unescape(text):\n return html.unescape(text)", "def convertHTML(self, text):\n return text.replace('&#39;', \"'\")", "def unescape(s):\r\n s = s.replace(\"&amp;\", \"&\")\r\n s = s.replace(\"&lt;\", \"<\")\r\n s = s.replace(\"&gt;\", \">\")\r\n s = s.replace(\"&#34;\", '\"')\r\n s = s.replace(\"&#39;\", \"'\")\r\n return s", "def safeHTML(s):\n parser = StrippingParser()\n parser.feed(s)\n parser.close()\n parser.cleanup()\n return parser.result", "def html_unescape(text):\n\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character reference\n try:\n if text[:3] == \"&#x\":\n return chr(int(text[3:-1], 16))\n else:\n return chr(int(text[2:-1]))\n except ValueError:\n pass\n else:\n # named entity\n try:\n text = chr(html.entities.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text # leave as is\n return re.sub(\"&#?\\w+;\", fixup, text)", "def unescape(t):\r\n return (t\r\n .replace(\"&amp;\", \"&\").replace(\"&lt;\", \"<\").replace(\"&gt;\", \">\")\r\n .replace(\"&#39;\", \"´\").replace(\"&quot;\", '\"').replace('&apos;',\"'\")\r\n )", "def unescape(text):\n import re, htmlentitydefs\n def fixup(m):\n text = m.group(0)\n if text[:2] == \"&#\":\n # character ref\n try:\n if text[:3] == \"&#x\":\n return unichr(int(text[3:-1],1))\n else:\n return unichr(int(text[2:-1]))\n except ValueError:\n pass\n else:\n #named entity\n try:\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])\n except KeyError:\n pass\n return text\n return re.sub(\"&#?\\w+;\", fixup, text)", "def stripHtml(html):\n\t# kinda works\n\tres = html.replace(\"&lt;\", \"<\")\n\tres = res.replace(\"&gt;\", \">\")\n\tres = re.sub(r'<[^>]+>', '', res)\n\treturn res", "def unescape(text):\r\n\r\n def fixup(m):\r\n text = m.group(0)\r\n if text[:2] == '&#':\r\n try:\r\n if text[:3] == '&#x':\r\n return unichr(int(text[3:-1], 16)).encode('utf-8')\r\n return unichr(int(text[2:-1])).encode('utf-8')\r\n except ValueError:\r\n logger.info('error de valor')\r\n\r\n else:\r\n try:\r\n import htmlentitydefs\r\n text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode('utf-8')\r\n except KeyError:\r\n logger.info('keyerror')\r\n except:\r\n pass\r\n\r\n return text\r\n\r\n return re.sub('&#?\\\\w+;', fixup, text)", "def xhtml_unescape(value):\r\n return re.sub(r\"&(#?)(\\w+?);\", _convert_entity, _unicode(value))", "def striphtml(content):\n\tif not isinstance(content, basestring):\n\t\treturn u''\n\tcontent = re_script.sub(u'',content)\n\tdoc = html.fragment_fromstring(content, create_parent=True)\n\tclean.clean_html(doc)\n\treturn unicode(re_nl.sub(u'', doc.text_content()))", "def htmldecode(s):\n\ts = s.replace(\"&lt;\", \"<\")\n\ts = s.replace(\"&gt;\", \">\")\n\ts = s.replace(\"&quot;\", \"\\\"\")\n\ts = s.replace(\"&apos;\",\"'\")\n\ts = s.replace(\"&amp;\", \"&\")\n\treturn s", "def RemoveHTMLTags(self, data):\n return self.UnescapeHTMLEntities(lxml.html.fromstring(data).text_content())", "def htmlunescape(value):\n\n retVal = value\n if value and isinstance(value, str):\n codes = ((\"&lt;\", '<'), (\"&gt;\", '>'), (\"&quot;\", '\"'),\n (\"&nbsp;\", ' '), (\"&amp;\", '&'), (\"&apos;\", \"'\"))\n retVal = reduce(lambda x, y: x.replace(y[0], y[1]), codes, retVal)\n try:\n retVal = re.sub(\n r\"&#x([^ ;]+);\", lambda match: chr(int(match.group(1), 16)), retVal)\n except ValueError:\n pass\n return retVal", "def __html_unescape(self, text):\n\n return re.sub(\"&(%s);\" % \"|\".join(name2codepoint),\n lambda m: unichr(name2codepoint[m.group(1)]),\n text)", "def remove_html_tags_fun(self):\n cleaner = re.compile('<.*?>')\n cleaned_text = re.sub(cleaner, '', self.doc)\n cleaned_text = re.sub('[\\n\\t]', '', cleaned_text)\n self.doc = cleaned_text", "def decode_html_entities(html):\n def decode(m):\n html = m.group(0)\n if html[:2] == \"&#\":\n try:\n if html[:3] == \"&#x\":\n return unichr(int(html[3:-1], 16))\n else:\n return unichr(int(html[2:-1]))\n except ValueError:\n pass\n else:\n try:\n html = unichr(name2codepoint[html[1:-1]])\n except KeyError:\n pass\n return html\n return re.sub(\"&#?\\w+;\", decode, html.replace(\"&amp;\", \"&\"))", "def unescape(text):\n if isinstance(text, list):\n for i, t in enumerate(text):\n t = t.replace(r'&amp;', r'\\&')\n t = t.replace(r'&lt;', r'<')\n t = t.replace(r'&gt;', r'>')\n text[i] = t\n else:\n text = text.replace(r'&amp;', r'\\&')\n text = text.replace(r'&lt;', r'<')\n text = text.replace(r'&gt;', r'>')\n return text", "def remove_html(text):\n return re.sub(r'<.*?>', r'', text)", "def escape_html(s):\n\treturn s. \\\n\t\treplace(\"<\", \"&lt;\"). \\\n\t\treplace(\">\", \"&gt;\"). \\\n\t\treplace(\"&\", \"&amp;\"). \\\n\t\treplace(\" \", \"&nbsp;\"). \\\n\t\treplace(\"\\t\", \"&nbsp;&nbsp;&nbsp;&nbsp;\")", "def unescape(string, using_unicode=False):\n\t\t\n\tif using_unicode:\n\t\tsub_function = sub_from_html\n\telse:\n\t\tsub_function = lambda m: sub_from_html(m).encode('ascii', 'replace')\n\t\n\treturn re.sub(r\"&#?\\w+;\", sub_function, string)", "def unescape(s):\n return (\n s.replace(\"&amp;\", \"&\")\n .replace(\"&lt;\", \"<\")\n .replace(\"&gt;\", \">\")\n .replace(\"&quot;\", '\"')\n .replace(\"&#039;\", \"'\")\n )", "def remove_html(txt):\r\n TAG_RE = re.compile(r'<[^>]+>')\r\n return TAG_RE.sub(\"\", txt).strip()", "def clean_html(text):\n cleanr = re.compile(\"<.*?>\")\n clean_text = re.sub(cleanr, \"\", text)\n return clean_text", "def UnescapeHTMLEntities(self, data):\n if '#39' not in htmlentitydefs.name2codepoint:\n htmlentitydefs.name2codepoint['#39'] = 39\n return re.sub('&(%s);' % '|'.join(htmlentitydefs.name2codepoint),\n lambda m: unichr(htmlentitydefs.name2codepoint[m.group(1)]),\n data)", "def strip_html(text: str, **serializer_kwargs: bool):\n cleaner = get_cleaner(**serializer_kwargs)\n text = cleaner.clean(text)\n return text", "def strip_html(inputString):\r\n return BeautifulSoup(inputString, \"html.parser\").text", "def remove_html_tags(text):\n print('VOU REMOVER AS TAGS DA STRING')\n clean = re.compile('<.*?>')\n print('',re.sub(clean, '', text))\n return re.sub(clean, '', text)", "def remove_html_tags(self,text):\n #https://medium.com/@jorlugaqui/how-to-strip-html-tags-from-a-string-in-python-7cb81a2bbf44\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text)", "def xss_strip_all_tags(s):\n return s\n def fixup(m):\n text = m.group(0)\n if text[:1] == \"<\":\n return \"\" # ignore tags\n if text[:2] == \"&#\":\n try:\n if text[:3] == \"&#x\":\n return unichr(int(text[3:-1], 16))\n else:\n return unichr(int(text[2:-1]))\n except ValueError:\n pass\n elif text[:1] == \"&\":\n import htmlentitydefs\n entity = htmlentitydefs.entitydefs.get(text[1:-1])\n if entity:\n if entity[:2] == \"&#\":\n try:\n return unichr(int(entity[2:-1]))\n except ValueError:\n pass\n else:\n return unicode(entity, \"iso-8859-1\")\n return text # leave as is\n \n return re.sub(\"(?s)<[^>]*>|&#?\\w+;\", fixup, s)", "def unescape_tweet(tweet):\r\n return html.unescape(tweet)", "def remove_html( html):\n return html2txt(html)", "def html2unicode(text):\n html_entity_digit_re = re.compile(r\"&#\\d+;\")\n html_entity_alpha_re = re.compile(r\"&\\w+;\")\n amp = \"&amp;\"\n\n # digit\n ents = set(html_entity_digit_re.findall(text))\n if len(ents) > 0:\n for ent in ents:\n entnum = ent[2:-1]\n entnum = int(entnum)\n text = text.replace(ent, chr(entnum))\n\n # alpha\n ents = set(html_entity_alpha_re.findall(text))\n ents = filter((lambda x: x != amp), ents)\n for ent in ents:\n entname = ent[1:-1]\n text = text.replace(ent, chr(html.entities.name2codepoint[entname]))\n\n text = text.replace(amp, \" and \")\n\n return text", "def remove_html_tags(text):\n clean = re.compile('<.*?>|&ndash; ')\n return re.sub(clean, '', text)", "def remove_html_tags(text):\n clean = re.compile('<.*?>|&ndash; ')\n return re.sub(clean, '', text)", "def fix_entities(html_string):\n html_string = html_string.replace('&nbsp;', u'\\u00a0')\n return html_string", "def remove_html(x: str) -> str:\n regex = r\"<.+?>\"\n return re.sub(regex, \"\", x)", "def remove_html_tags(text: str) -> str:\n return re.sub('<.*?>', '', text).strip()", "def remove_tags(raw):\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, ' ', raw)\n return cleantext", "def clean_tag(data):\n # TODO: make this a method of Tag?\n return escape_html(data).replace('\"', '&quot;').replace(\"'\", '&#39')", "def promed_html_to_formatted_text(html):\n # This is to fix some cases in malformed html where <s aren't esacaped.\n # >s can be parsed without escaping.\n normed_html = html.\\\n replace(\"<<\", \"&lt;<\").\\\n replace(\"<http\", \"&lt;http\").\\\n replace(\"< \", \"&lt; \")\n return dom_tree_to_formatted_text(BeautifulSoup(normed_html))", "def strip_markup(text):\n html_tag_regex = re.compile(\n r'<'\n r'[(--)\\?\\!\\%\\/]?'\n r'[a-zA-Z0-9#\\\"\\=\\s\\.\\;\\:\\%\\&?!,\\+\\*\\-_\\/]+'\n r'\\/?>',\n re.MULTILINE | re.UNICODE\n )\n if text:\n text = re.sub(html_tag_regex, ' ', text)\n return text", "def strip_html(unclean):\n # We make this noop for non-string, non-collection inputs so this function can be used with higher-order\n # functions, such as rapply (recursively applies a function to collections)\n if not isinstance(unclean, basestring) and not is_iterable(unclean) and unclean is not None:\n return unclean\n return bleach.clean(unclean, strip=True, tags=[], attributes=[], styles=[])", "def remove_html_tags(text: str) -> str:\n clean = re.compile('<.*?>')\n return re.sub(clean, '', str(text))", "def _escape(html):\n return encoding.force_unicode(html).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def remove_html_tags(text):\r\n clean = re.compile('<.*?>')\r\n return re.sub(clean, '', text)", "def htmlescape(s):\n if isinstance(s, htmltext):\n return s\n else:\n s = stringify(s)\n # inline _escape_string for speed\n s = s.replace(\"&\", \"&amp;\") # must be done first\n s = s.replace(\"<\", \"&lt;\")\n s = s.replace(\">\", \"&gt;\")\n s = s.replace('\"', \"&quot;\")\n return htmltext(s)", "def _strip_excerpt(self, raw_html):\n clean_regex = re.compile(\"<.*?>\")\n clean_text = re.sub(clean_regex, \"\", raw_html)\n return html.unescape(clean_text).replace(\"\\n\", \"\")", "def unescape(self, string):\r\n def convert(matches):\r\n text = matches.group(0)\r\n # Character reference\r\n if text[:2] == \"&#\":\r\n try:\r\n if text[:3] == \"&#x\":\r\n return H.unicode_chr(int(text[3:-1], 16))\r\n else:\r\n return H.unicode_chr(int(text[2:-1]))\r\n except ValueError:\r\n pass\r\n # Named entity\r\n else:\r\n try:\r\n # Following are not needed to be converted for XML\r\n if text[1:-1] == \"amp\" or text[1:-1] == \"gt\" or text[1:-1] == \"lt\":\r\n pass\r\n else:\r\n text = H.unicode_chr(name2codepoint[text[1:-1]])\r\n except KeyError:\r\n pass\r\n return text\r\n return re.sub(\"&#?\\w+;\", convert, string)", "def cleanXMLfromSpecialChars(self,line):\n return str(line).replace(\"&\", \"&amp;\").replace(\"\\\"\",\"&quot;\").replace(\"<\",\"&lt;\").replace(\">\",\"&gt;\").replace(\"'\",\"&apos;\")", "def remove_html_tags(text):\n import re\n clean = re.compile('<.*?>|\\\\n')\n return re.sub(clean, '', text)", "def clean_html(soup):\n html = str(soup.findAll('p', text=True)).strip()\n tags = re.compile('<.*?>')\n clean_2 = re.sub(tags, '', html)\n line_removed = clean_2.replace('\\n', ' ').replace('\\r', '').replace('’', ' ')\n return re.sub(r\"[-()\\\"#”/@“—;:<>{}'`+=~|!?,]\", \"\", line_removed).strip()", "def extract_html(tag):\n s = unescape(remove_all_empty_tags(tag).renderContents())\n if type(s) != unicode:\n return s.decode('utf-8')\n return s", "def _remove_html_tags(self, text: str) -> str:\n pattern = r\"\"\"\n (?x) # Turn on free-spacing\n <[^>]+> # Remove <html> tags\n | &([a-z0-9]+|\\#[0-9]{1,6}|\\#x[0-9a-f]{1,6}); # Remove &nbsp;\n \"\"\"\n return re.sub(pattern, \" \", str(text))", "def clean(string):\r\n if string is None or not string: return ''\r\n string = html.unescape(string)\r\n string = unicodedata.normalize('NFC', string)\r\n string = unescape(string)\r\n string = html.escape(string)\r\n string = unicodedata.normalize('NFC', string)\r\n return string", "def strip_html(text):\n soup = BeautifulSoup(text, \"html.parser\")\n return soup.get_text()", "def unescape(self, text):\r\n try:\r\n stash = self.markdown.treeprocessors['inline'].stashed_nodes\r\n except KeyError:\r\n return text\r\n def get_stash(m):\r\n id = m.group(1)\r\n value = stash.get(id)\r\n if value is not None:\r\n try:\r\n return self.markdown.serializer(value)\r\n except:\r\n return '\\%s' % value\r\n \r\n return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)", "def strip_html_tags(text):\r\n soup = BeautifulSoup(text, 'lxml')\r\n stripped_text = soup.get_text(separator=\" \")\r\n return stripped_text", "def norm_html_from_html(html):\n if not isinstance(html, unicode):\n html = html.decode('utf-8')\n html = _markdown_email_link_re.sub(\n _markdown_email_link_sub, html)\n if sys.platform == \"win32\":\n html = html.replace('\\r\\n', '\\n')\n return html", "def unescape(input):\n output=atpic.cleaner_escape.unescape(input)\n return output", "def escape(cls, html):\n return (\"%s\" % (html)).replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def strip_html(unclean, tags=[]):\n # We make this noop for non-string, non-collection inputs so this function can be used with higher-order\n # functions, such as rapply (recursively applies a function to collections)\n if not isinstance(unclean, basestring) and not is_iterable(unclean) and unclean is not None:\n return unclean\n return bleach.clean(unclean, strip=True, tags=tags, attributes=[], styles=[])", "def remove_html_tags(text):\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text)", "def remove_html_tags(data):\n p = re.compile(r'<.*?>')\n return p.sub('', data)", "def remove_html_tags(text):\n import re\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text)", "def remove_html_tags(text):\n import re\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text)", "def make_html_safe(s):\n return s.replace(\"<\", \"&lt;\").replace(\">\", \"&gt;\")", "def clean_up_text(text):\n text = html.unescape(text)\n return remove_emoji(text)", "def scrubHTML( html ):\n parser = StrippingParser()\n parser.feed( html )\n parser.close()\n return parser.result", "def htmlquote(text):\r\n text = text.replace(\"&\", \"&amp;\") # Must be done first!\r\n text = text.replace(\"<\", \"&lt;\")\r\n text = text.replace(\">\", \"&gt;\")\r\n text = text.replace(\"'\", \"&#39;\")\r\n text = text.replace('\"', \"&quot;\")\r\n return text", "def escape(html):\n if not isinstance(html, unicode):\n if not isinstance(html, str):\n html = unicode(html)\n else:\n html = unicode(html, 'utf-8')\n return html.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')", "def RemoveHTMLTags(data):\n\n p = re.compile(r'<[^<]*?>')\n return p.sub('', data)", "def clean_html(input):\n p = HTMLParser(tree=treebuilders.getTreeBuilder(\"dom\"))\n dom_tree = p.parseFragment(input)\n walker = treewalkers.getTreeWalker(\"dom\")\n stream = walker(dom_tree)\n\n s = HTMLSerializer(omit_optional_tags=False)\n return \"\".join(s.serialize(stream))", "def remove_html_tags(text):\n if type(text) is pd.core.series.Series or type(text) is str:\n text = text.replace(\"'\", \" \").replace('\"', \" \")\n clean = re.compile('<.*?>')\n return re.sub(clean, ' ', text)\n return text", "def convert_html():\n return", "def strip_html_tags(text):\n soup = BeautifulSoup(text, \"html.parser\")\n stripped_text = soup.get_text(separator=\" \")\n return stripped_text", "def strip_html_tags(text):\n soup = BeautifulSoup(text, \"html.parser\")\n stripped_text = soup.get_text(separator=\" \")\n return stripped_text", "def remove_html_tags(text):\n import re\n clean = re.compile('<.*?>')\n return re.sub(clean, '', text).rstrip('...')", "def textilize(s):\n s = s.replace(\"<p>\", \" \").replace('&nbsp;', ' ')\n return _re_html.sub(\"\", s)", "def remove_html_tags(text):\n tag_pattern = re.compile(r'<[^>]+>')\n return tag_pattern.sub('', text)", "def unhtml(cls, text):\n parser = cls()\n parser.feed(text)\n return parser", "def escape_html(self, text):\n return cgi.escape(text, quote=True). \\\n replace(u'\\n', u'<br />'). \\\n replace(u'\\t', u'&emsp;'). \\\n replace(u' ', u' &nbsp;')", "def _strip_tags(value):\r\n return re.sub(r'<[^>]*?>', ' ', force_unicode(value))", "def sanitize_html(input):\n p = HTMLParser(tokenizer=HTMLSanitizer, tree=treebuilders.getTreeBuilder(\"dom\"))\n dom_tree = p.parseFragment(input)\n walker = treewalkers.getTreeWalker(\"dom\")\n stream = walker(dom_tree)\n\n s = HTMLSerializer(omit_optional_tags=False)\n return \"\".join(s.serialize(stream))", "def remove_html_tags(html_text: str) -> str:\n document = fromstring(html_text)\n text = document.text_content()\n return text.strip()", "def strip_html_tags(text):\n if text is np.nan:\n return text\n regex = re.compile(r\"<.*?>\")\n return re.sub(regex, \"\", text)", "def strip_html(html_str):\n return bleach.clean(html_str, tags=[], attributes={},\n styles=[], strip=True)", "def escape(self, text):\n\t\tif not self.escape_html or text is None:\n\t\t\treturn text\n\n\t\treturn (\n\t\t\ttext.replace('&', '&amp;').replace('<', '&lt;')\n\t\t\t.replace('>', '&gt;').replace('\"', '&quot;').replace(\"'\", '&#39;')\n\t\t)", "def htmlstr(self, unsafe) :\n\t\tunsafe = string.replace(unsafe, '&', '&amp;')\n\t\tunsafe = string.replace(unsafe, '<', '&lt;')\n\t\treturn string.replace(unsafe, '>', '&gt;')", "def RemoveHTMLTags(text, \\\n separator=''):\n clean = re.compile('<.*?>')\n return re.sub(clean, separator, text)", "def clean_html(html):\n html = re.sub(r\"(?s)<!--(.*?)-->[\\n]?\", \"\\\\1\", html)\n html = re.sub(r\"<!--\", \"\", html)\n if html == '':\n return ''\n s = MLStripper()\n s.feed(html)\n return s.get_data().strip()" ]
[ "0.8015007", "0.77196825", "0.7664597", "0.75644994", "0.75601804", "0.74361134", "0.74127716", "0.7398852", "0.7363433", "0.72552484", "0.72305125", "0.7151913", "0.71092284", "0.7063701", "0.70483255", "0.7035279", "0.7028564", "0.702725", "0.7001328", "0.6975736", "0.6972302", "0.69380116", "0.68265235", "0.6825061", "0.68250316", "0.6797911", "0.67438066", "0.67286986", "0.6723868", "0.67214006", "0.67152077", "0.66977024", "0.66877013", "0.6686008", "0.6671127", "0.666334", "0.66524285", "0.66241527", "0.6604034", "0.65967214", "0.65905666", "0.6588564", "0.65640366", "0.65640366", "0.6539842", "0.65310395", "0.65248245", "0.6524489", "0.6516609", "0.6515363", "0.6511896", "0.6494842", "0.6489486", "0.64682895", "0.6463524", "0.64627576", "0.64626783", "0.64481854", "0.6438346", "0.64367366", "0.6435447", "0.64351916", "0.64301646", "0.6428267", "0.64270455", "0.64267063", "0.6422663", "0.6404483", "0.63845545", "0.6382652", "0.63753736", "0.6372316", "0.63713527", "0.6359051", "0.6359051", "0.6351921", "0.63506097", "0.6341211", "0.63369143", "0.6312164", "0.6299874", "0.62995934", "0.62790936", "0.6276029", "0.6266556", "0.6266556", "0.6240021", "0.6236506", "0.621265", "0.6205024", "0.6203045", "0.6199363", "0.61829174", "0.61807406", "0.61755896", "0.6155677", "0.61533123", "0.61249655", "0.6120712", "0.6092374" ]
0.68237066
25
Return formatted text, properly escaped if not in titleMode
def formatOutput(self, storedText, titleMode, internal=False): prefix = self.prefix suffix = self.suffix if titleMode: if self.html: storedText = self.removeMarkup(storedText) if globalref.docRef.formHtml: prefix = self.removeMarkup(prefix) suffix = self.removeMarkup(suffix) else: if not self.html: storedText = escape(storedText).replace('\n', '<br />') if not globalref.docRef.formHtml: prefix = escape(prefix) suffix = escape(suffix) return u'%s%s%s' % (prefix, storedText, suffix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def title(text, level=0):\n return '\\n' + text + '\\n' + '=-~_#%^' [level] * len(text) + '\\n\\n'", "def format_title(self, data):\n return data", "def output_plain_sep_title(title):\n print(f\"{plain_sep_mark}\\t{title}{plain_sep_mark}\")", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_text(downgrade_titles=False):", "def PROPER(text):\n return text.title()", "def title(self, string):\n return self.bold(string)", "def html_title(title):\n return '<center><h1>%s</h1></center>' % (title)", "def _get_title_text(self):\n return Text(\n self,\n self.settings.font_bold_filename,\n 96,\n self.settings.font_color,\n 'zuckbot',\n {'center': self.screen_rect.center},\n 0,\n -50,\n )", "def get_title_repr(self) -> str:\n try:\n return Title[self.title].value\n except (KeyError, ValueError):\n pass", "def editModeHeading(text):\n return u'<p style=\"editModeHeading\">%s</p>' % text", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def emph_text(text):\n\n if use_color():\n return colorama.Style.BRIGHT + text + colorama.Style.RESET_ALL\n else:\n return text", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()", "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "def formatOutput(self, storedText, titleMode, internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = ['<img src=\"%s\">' % escape(url, treedoc.escDict) for url\n in paths]\n return u'<br />'.join(results)", "def _text_formatting(bs4_tag):\n return bs4_tag.get_text().replace('\\n', '')", "def print_with_title(title, content, before='', after='', hl='='):\n cont_maxlen = max(len(s) for s in content.split('\\n'))\n hl_len = max(cont_maxlen, len(title))\n print('{}{}\\n{}\\n{}{}'.format(before, title, hl * hl_len, content, after))", "def title(self, txt):\n num = len(txt)\n ticks = \"=\" * num\n print(ticks)\n print(txt)\n print(ticks)", "def test_title(self):\n self.assertEquals(\"Title\\n=====\", trans(\"== Title ==\"))\n self.assertEquals(\"Title\\n-----\", trans(\"=== Title ===\"))\n self.assertEquals(\"#### Title\", trans(\"==== Title ====\"))\n self.assertEquals(\"##### Title\", trans(\"===== Title =====\"))", "def helptext(self):\n return \"\"", "def book_title(book_text):\n search = re.search(\"Title:(.*)\", book_text)\n title = search.group(1).replace(\"\\r\", \" \").strip()\n return title", "def get_title():", "def __str__(self):\n date_str = self.date.strftime(self.journal.config['timeformat'])\n title = date_str + \" \" + self.title\n body = self.body.strip()\n\n return \"{title}{sep}{body}\\n\".format(\n title=title,\n sep=\"\\n\" if self.body else \"\",\n body=body\n )", "def __str__(self) -> str:\n return textwrap.wrap(self.title, _POST_TITLE_MAX_LENGTH // 4)[0]", "def title_draw():\n nonlocal width\n widthTitle = len(self.str_title)\n if widthTitle > width:\n self.str_title = self.str_title[0:width-5] + '...'\n widthTitle = len(self.str_title)\n h_len = widthTitle + self.l_padding + self.r_padding\n top = ''.join(['┌'] + ['─' * h_len] + ['┐']) + '\\n'\n result = top + \\\n '│' + \\\n ' ' * self.l_padding + \\\n self.str_title + \\\n ' ' * self.r_padding + \\\n '│' + self.str_shadow + '\\n'\n offset = 2 + self.l_padding + len(self.str_title) + self.r_padding\n return result, offset", "def show_title():\r\n complement = (\r\n '\\n __ ')\r\n title = ('\\n _______ _______________ ____ _______ __ ___ _ _______/ /_ ____ _____ ____ ____ ')\r\n title += ('\\n / ___/ / / / ___/ ___/ _ \\/ __ \\/ ___/ / / / / _ \\| |/_/ ___/ __ \\/ __ `/ __ \\/ __ `/ _ \\ ')\r\n title += ('\\n/ /__/ /_/ / / / / / __/ / / / /__/ /_/ / / __/> </ /__/ / / / /_/ / / / / /_/ / __/ ')\r\n title += ('\\n\\___/\\__,_/_/ /_/ \\___/_/ /_/\\___/\\__, / \\___/_/|_|\\___/_/ /_/\\__,_/_/ /_/\\__, /\\___/ ')\r\n title += ('\\n /____/ /____/ ')\r\n # Add Styles\r\n break_line = ('-' * len(complement) + \"\\n\") * 2\r\n print(\"{}\\n{}\\n{}\\n\".format(break_line, title, break_line))", "def format_title(self, title):\n new_title = ''.join(word.lower().strip('!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ ') for word in title)\n return new_title", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def pretty_title(title):\n output = '-' * 5 + ' ' + title + ' ' + '-' * 5\n return output", "def html_title(self, title=None):\r\n if title is None:\r\n return \"<title>PyBossa</title>\"\r\n else:\r\n return \"<title>PyBossa &middot; %s</title>\" % title", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def output_sep_title(title):\n print(f\"{sep_mark}\\t{title}{sep_mark}\")", "def make_main_title(self, end, end_center=False):\n main_title = r\"\\begin{center}\"\n if self.detector is not None:\n main_title += \"%s \"%self.detector\n if self.selection is not None:\n main_title += \"%s Event Selection \"%self.selection\n main_title += end\n if end_center:\n main_title += r\"\\end{center}\"\n return main_title", "def title(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"=\")))", "def get_title(self):\n if not hasattr(self, '_title'):\n self._title = 'NO TITLE'\n if self._title:\n title = _(self._title)\n title = title.replace('&', '&amp;') \n title = title.replace('\"', '&quot;')\n return title\n else:\n return u''", "def escape_if_needed(text, options):\n if hasattr(text, '__html__'):\n # Text has escape itself:\n return to_string(text.__html__())\n if need_to_escape(options):\n return escape(to_string(text))\n return to_string(text)", "def ftitle(self, text):\n return \"{} - {}\".format(self._app_name, text)", "def get_rst_title_char(level):\n chars = (u'=', u'-', u'`', u\"'\", u'.', u'~', u'*', u'+', u'^')\n if level < len(chars):\n return chars[level]\n return chars[-1]", "def process_title(self, title):\n\t\t# strip apostrophes\n\t\tif '\\'' in title:\n\t\t\ttitle = re.sub('\\'', '', title)\n\t\tif '.' in title:\n\t\t\ttitle = re.sub('.', '', title)\n\t\treturn title", "def pretty_title(title):\n output = '-' * 5 + ' ' + title.lower() + ' ' + '-' * 5\n return output", "def html(self):\n bop = ('<b>' if self._bold else '')\n iop = ('<i>' if self._italic else '')\n icl = ('</i>' if self._italic else '')\n bcl = ('</b>' if self._bold else '')\n txt = escape(self._text)\n s = '%s%s%s%s%s' % (bop, iop, txt, icl, bcl)\n return '%s' % s", "def escape_for_display(s) :\n if len(s) == 0 :\n return \"[EMPTY]\"\n return s.replace(\"\\n\",\"[NL]\").replace(\"\\t\",\"[TAB]\") #.replace(\" \",\"[SP]\") # Escape newlines so not to confuse debug output.", "def style_title(self) -> str:\n style_title = \"\"\".title\n {margin-bottom: 10px}\\n\"\"\"\n self.html_doc = self.html_doc + style_title\n return self.html_doc", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def getHTMLText(self, s):\r\n\r\n # Removes any \"<\" or \">\" from the text, and replaces line ends with <br> tags\r\n if s is not None:\r\n res = str(s)\r\n res = string.replace(res, \">\", \"&gt;\")\r\n res = string.replace(res, \"<\", \"&lt;\")\r\n res = string.replace(s, \"\\n\", \"<br style='mso-data-placement:same-cell;'/>\")\r\n else:\r\n res = \"\"\r\n\r\n # Inserts formatting tag around text, if defined\r\n if self.formatBeginTag:\r\n res = self.formatBeginTag + res + self.formatEndTag\r\n\r\n return res", "def escape_single_quotes(custom_data):\n # https://stackoverflow.com/questions/10569438/how-to-print-unicode-character-in-python\n # https://regex101.com/r/nM4bXf/1\n if re.search(\"(?<!u)'(?!:|}|,)\", custom_data.get('title_name', '')):\n z = re.sub(r\"(?<!u)'(?!:|}|,)\", '\\\\\\'', custom_data.get('title_name', None))\n\n custom_data['title_name'] = z\n return custom_data\n return custom_data", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def format_title(self, ticket_id, subject):\n # TODO: strip block tags?\n title = \"#%i %s\" % (ticket_id, subject)\n return title.strip()", "def subtitle(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"-\")))", "def title(self, value):\n if len(value):\n self._title = self._wrap_line(value, self._width)\n\n # Add a blank line\n self._title.append('')", "def emphasize(text: str, tablefmt: str | TableFormat, strong: bool = False) -> str:\n # formats a title for a table produced using tabulate,\n # in the formats tabulate understands\n if tablefmt in [\"html\", \"unsafehtml\", html_with_borders_tablefmt]: # type: ignore\n if strong:\n emph_text = f\"<strong>{text}</strong>\"\n else:\n emph_text = f\"<em>{text}</em>\"\n elif tablefmt in [\"latex\", \"latex_raw\", \"latex_booktabs\", \"latex_longtable\"]:\n if strong:\n emph_text = r\"\\textbf{\" + text + r\"}\"\n else:\n emph_text = r\"\\emph{\" + text + r\"}\"\n else: # use the emphasis for tablefmt == \"pipe\" (Markdown)\n star = \"**\" if strong else \"*\"\n emph_text = f\"{star}{text}{star}\"\n return emph_text", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def textual(title, ordering_field=None):\n def decorator(func):\n def wraps(self, obj):\n result = func(self, obj)\n return result if result else u'---'\n\n wraps.short_description = title\n wraps.allow_tags = True\n\n if ordering_field:\n wraps.admin_order_field = ordering_field\n\n return wraps\n return decorator", "def outputText(self, item, titleMode, internal=False):\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''", "def inclusive_title(self):\n return self.title + (\" %s\" % (self.episode_to_string(self.latest_season, self.latest_episode),) if self.is_series() else \"\")", "def group_title(self, group):\n group_title = group.getProperty('title')\n if self.short:\n splitted = group_title.split('(')\n if len(splitted) > 1:\n group_title = group_title.split('(')[-1][:-1]\n return html.escape(group_title)", "def outputText(self, item, titleMode, internal=False):\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)", "def format_heading(self, level, text):\n underlining = ['=', '-', '~', ][level-1] * len(text)\n return '%s\\n%s\\n\\n' % (text, underlining)", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)", "def formatted(self) -> str:\r\n ...", "def __str__(self):\n return \"{title}\".format(title=self.title)", "def gen_title_rst(txt):\n # Just add a few useful directives\n txt = \".. highlight:: cmake\\n\\n\" + txt\n return txt", "def _prettyfilename(self):\n return self.title", "def wrap_title(title, mpl_layout):\n fig = mpl_layout.canvas.figure\n ax = fig.axes[0]\n ext_pixels = ax.get_window_extent()\n ext_inches = ext_pixels.transformed(fig.dpi_scale_trans.inverted())\n magic_number = 10\n letters_per_line = int(ext_inches.width * magic_number)\n title_wrapped = '\\n'.join(textwrap.wrap(title, letters_per_line))\n return title_wrapped", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title} :\" + s\n\n return s", "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "def get_title(text, uuid=None):\n if uuid is not None:\n text += get_provenance_link(uuid)\n title = pn.Row(pn.pane.HTML('<h2>{}</h2>'.format(text)), align='start')\n\n return title", "def print_title(title):\n print \"\\n\"+\"#\"*32+\"\\n# \"+title+\"\\n\"+\"#\"*32+\"\\n\"", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''", "def transform(text: str) -> str:\n return text.title()", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n # add prefix/suffix within the executable path:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' %\n (escape(path, treedoc.escDict), altText or url))\n return u'<br />'.join(results)", "def complete_alt_title(self, obj):\n return str(obj)", "def clean_title(\r\n title: str,\r\n mode: Literal[\"soft\", \"hard\", \"safe\"],\r\n allow_dot: bool = False,\r\n n: Optional[int] = None,\r\n) -> str:\r\n ...", "def text(self) -> str:", "def get_title(self):\n\n title = ''\n doc = self.article.doc\n\n title_element = self.parser.getElementsByTag(doc, tag='title')\n # no title found\n if title_element is None or len(title_element) == 0:\n return title\n\n # title elem found\n title_text = self.parser.getText(title_element[0])\n used_delimeter = False\n\n # split title with |\n if '|' in title_text:\n title_text = self.split_title(title_text, PIPE_SPLITTER)\n used_delimeter = True\n\n # split title with -\n if not used_delimeter and '-' in title_text:\n title_text = self.split_title(title_text, DASH_SPLITTER)\n used_delimeter = True\n\n # split title with »\n if not used_delimeter and u'»' in title_text:\n title_text = self.split_title(title_text, ARROWS_SPLITTER)\n used_delimeter = True\n\n # split title with :\n if not used_delimeter and ':' in title_text:\n title_text = self.split_title(title_text, COLON_SPLITTER)\n used_delimeter = True\n\n title = MOTLEY_REPLACEMENT.replaceAll(title_text)\n return title", "def print_title(title):\n\n print(\"\\n\" + title)\n print(\"=\" * len(title))", "def format_rich_quote(rich_text_quote):\n rich_text = format_rich_text(rich_text_quote)\n return \"> \" + \"\\n> \".join(rich_text.split(\"\\n\")) + \"\\n\"", "def SearchableText(self):\n ctool = getToolByName(self, 'portal_cpscalendar')\n if getattr(ctool, 'event_fulltext_index', False):\n return '%s %s' % (self.title, self.description)\n return ''", "def __str__(self):\n t = Template(\n \"\"\"\n <h4>$title</h4>\n $imgs\n $footnotes\n <hr/>\"\"\")\n # Return result.\n return t.substitute({\n \"title\": self.title,\n \"imgs\": self.render_images(),\n \"footnotes\": self.render_footnotes()\n })", "def visit_title_reference(self, node):\n self.body.append('\\\\emph{\\\\textbf{')", "def render(resolve_unicode,\n title_force_uppercase,\n msdos_eol_style,\n output_encoding,\n omit_fields=[]):", "def inject_title(self,template,title):\n return re.sub('TITLE',title,template)", "def format_screen(self,str):\n # Paragraph continue\n par_re = re.compile(r'\\\\$',re.MULTILINE)\n str = par_re.sub('',str)\n return str", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def title_content(label=\"A title\"):\n return {'label':label}", "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def title_p(self):\n self.run_command('title_p')", "def title(self):\n # Use the first line of the articles text as title, if not title\n # exists.\n title = self._text[:min(32, self._text.find(\"\\n\"))]\n return title", "def numbered_title(self):\n return f\"{self.title}\"", "def numbered_title(self):\n return f\"{self.title}\"", "def render_text(self):\n if self.text_type == 1:\n return markdown.markdown(self.text)\n else:\n return self.text", "def dumps(self):\n\n if not self.numbering:\n num = '*'\n else:\n num = ''\n\n string = Command(self.latex_name + num, self.title).dumps()\n string += '%\\n' + self.dumps_content()\n\n return string", "def processTitle(title):\n\n cleaned = re.sub(r'[@#\\\"]+', '', title.lower().strip())\n cleaned = re.sub(r'\\(\\d{4}.*\\)', '', cleaned)\n cleaned = re.sub(r':.+', '', cleaned).strip()\n return cleaned", "def show(self) -> str:\n return f'[{self.font}]{self.text}[{self.font}]' if self.font else self.text" ]
[ "0.6623557", "0.64947814", "0.6347113", "0.6307539", "0.621596", "0.6210496", "0.60684896", "0.60674477", "0.60663515", "0.60421175", "0.6019259", "0.59935653", "0.59802073", "0.59790826", "0.595393", "0.5948588", "0.5939195", "0.590317", "0.5872387", "0.58521676", "0.5838757", "0.5835408", "0.5834278", "0.5832544", "0.58303535", "0.58232164", "0.58196765", "0.5818879", "0.581837", "0.58134586", "0.58123326", "0.57893336", "0.5777435", "0.5773666", "0.5759935", "0.57562524", "0.57514244", "0.5736761", "0.5721786", "0.57156", "0.5693657", "0.56579095", "0.56524575", "0.56516933", "0.56416726", "0.5639766", "0.5630319", "0.56235963", "0.5607828", "0.55989367", "0.5597865", "0.5593643", "0.55868447", "0.5576239", "0.55753696", "0.5570099", "0.556155", "0.55568874", "0.55474097", "0.5539662", "0.5532411", "0.5531814", "0.5512975", "0.5479672", "0.54774815", "0.54768354", "0.5473451", "0.54682344", "0.5464578", "0.54521894", "0.5445922", "0.5437787", "0.54369724", "0.5422958", "0.5415149", "0.5415149", "0.5399354", "0.539413", "0.53890395", "0.5382889", "0.5382856", "0.53564143", "0.535306", "0.53529805", "0.5352455", "0.5347083", "0.5333787", "0.5333257", "0.5332394", "0.5331696", "0.53306514", "0.53304696", "0.53293514", "0.5327383", "0.53269297", "0.53269297", "0.53238297", "0.53169096", "0.5314785", "0.5314103" ]
0.67517006
0
Return tuple of this field's text in edit format and bool validity, using edit format option
def editText(self, item): storedText = item.data.get(self.name, '') result = self.formatEditText(storedText) if self.isRequired and not result[0]: return (result[0], False) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def _get_field_edit_widget(self, row_index):\n field_row = self.field_rows[row_index]\n if not field_row.editable:\n raise TypeError(\"Cannot edit a boolean or dropdown field. (Internal error, tell the developer!)\")\n field_type = field_row.field_type\n field_value = self.get_field_dict(self.get_entry_id(self.active_row_index))[field_row.field_name]\n initial_text = repr(sorted(field_value)) if issubclass(field_type, list) else str(field_value)\n return self.Entry(\n field_row.value_box,\n initial_text=initial_text,\n integers_only=field_type == int,\n numbers_only=field_type == float,\n sticky=\"ew\",\n width=5,\n )", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def getinforow(docfield):\n\t\tif docfield.fieldtype == 'Select':\n\t\t\tif not docfield.options:\n\t\t\t\treturn ''\n\t\t\telif docfield.options.startswith('link:'):\n\t\t\t\treturn 'Valid %s' % docfield.options[5:]\n\t\t\telse:\n\t\t\t\treturn 'One of: %s' % ', '.join(filter(None, docfield.options.split('\\n')))\n\t\telif docfield.fieldtype == 'Link':\n\t\t\treturn 'Valid %s' % docfield.options\n\t\telif docfield.fieldtype in ('Int'):\n\t\t\treturn 'Integer'\n\t\telif docfield.fieldtype == \"Check\":\n\t\t\treturn \"0 or 1\"\n\t\telif docfield.info:\n\t\t\treturn docfield.info\n\t\telse:\n\t\t\treturn ''", "def getText(self):", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def test_form_help_text_is_correct(self):\n # https://stackoverflow.com/questions/24344981/how-to-change-help-\n # text-of-a-django-form-field\n\n # Above link helped figure out how to access help_text.\n self.assertEqual(\n self.form.fields[\"texture\"].help_text,\n \"One word descriptions seperated by commas.\",\n )", "def text(value):\n return True", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))", "def info(level):\n if level == 'basic':\n string = _(\"Basic markup\")\n text = _(\"Only basic text tags are available in this input field.\")\n elif level == 'rich':\n string = _(\"Rich markup\")\n text = _(\"Rich and basic text tags are available in this input field.\") \n elif level == 'full':\n string = _(\"Full markup\")\n text = _(\"Every tags are available in this input field.\") \n elif level == 'none':\n string = _(\"No markup\")\n text = _(\"No tags are available in this input field.\") \n\n if level != 'none':\n text = text + \" \" + _(\"Check the markup reminder in related documentation for a description of these tags.\")\n\n return '<span class=\"help\" title=' + quoteattr(text) \\\n + '><img src=\"' + settings.STATIC_MEDIA_URL \\\n + 'images/savane/common/misc.default/edit.png' \\\n + ' border=\"0\" class=\"icon\" alt=\"\" />' \\\n + string + '</span>'", "def get_format_attrs(self, name, field, alt_field_info={}):\n # important_props = ('initial', 'autofocus', 'widget')\n if name in alt_field_info:\n field = deepcopy(field)\n for prop, value in alt_field_info[name].items():\n setattr(field, prop, value)\n initial = field.initial\n initial = initial() if callable(initial) else initial\n attrs, result = {}, []\n if initial and not isinstance(field.widget, Textarea):\n attrs['value'] = str(initial)\n data_val = self.form.data.get(get_html_name(self.form, name), None)\n if data_val not in ('', None):\n attrs['value'] = data_val\n attrs.update(field.widget_attrs(field.widget))\n result = ''.join(f'{key}=\"{val}\" ' for key, val in attrs.items())\n if getattr(field, 'autofocus', None):\n result += 'autofocus '\n if issubclass(self.form.__class__, FormOverrideMixIn):\n # TODO: Expand for actual output when using FormOverrideMixIn, or a sub-class of it.\n result += '%(attrs)s' # content '%(attrs)s'\n else:\n result = '%(attrs)s' + result # '%(attrs)s' content\n return result", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def get_text(self):\n inp = \" \"\n if self.link_id:\n inp += \"LINK \" + self.link_id\n inp += self.status + ' '\n if self.node_id:\n inp += \"NODE \" + self.node_id + ' '\n if self.value:\n inp += self.control_type.name + ' ' + str(self.value) + ' '\n if self.time:\n inp += self.time + ' '\n if self.clock_time:\n inp += self.clock_time + ' '\n # TODO: research correct formatting of time, clock_time options\n return inp", "def format_field_with_flag(self, data):\n return data.strip() == '*'", "def human_readable(self):\n if self.no_flags_set():\n return \"no flags set\"\n else:\n flag_desc = []\n for name in (\"bookmarked\", \"for_validation\", \"candidate\", \"final causative\"):\n if getattr(self, \"flag_%s\" % name.replace(\" \", \"_\")):\n flag_desc.append(name)\n for name in (\"visual\", \"validation\", \"molecular\", \"phenotype_match\", \"summary\"):\n field = getattr(self, \"flag_%s\" % name)\n if field and field != \"empty\":\n flag_desc.append(\"%s rating is %s\" % (name.split(\"_\")[0], field))\n return \", \".join(flag_desc)", "def text(self):\n for attr in ['label', 'text']:\n val = self.attribute_value(attr)\n if val:\n return val\n\n return super(Option, self).text", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def TEXT(number, format_type):\n raise NotImplementedError()", "def edit_form_entry_help_text_extra(cls):\n return \"\"\"\n <ul class=\"{container_class}\">\n {edit_option_html}\n <li><a href=\"{delete_url}\">\n <span class=\"{delete_option_class}\"></span> {delete_text}</a>\n </li>\n </ul>\n <input type=\"hidden\" value=\"{form_element_position}\"\n name=\"form-{counter}-position\"\n id=\"id_form-{counter}-position\"\n class=\"form-element-position\">\n <input type=\"hidden\" value=\"{form_element_pk}\"\n name=\"form-{counter}-id\" id=\"id_form-{counter}-id\">\n \"\"\".format(\n container_class=cls.form_list_container_class,\n edit_option_html=\"{edit_option_html}\",\n delete_url=\"{delete_url}\",\n delete_option_class=cls.form_delete_form_entry_option_class,\n delete_text=\"{delete_text}\",\n form_element_position=\"{form_element_position}\",\n counter=\"{counter}\",\n form_element_pk=\"{form_element_pk}\",\n )", "def get_row_input_text(self, row_idx):\n return self.row_items[row_idx][1].get()", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def formatted(self) -> str:\r\n ...", "def validate_format(self):\n raise NotImplementedError()", "def html(self):\n bop = ('<b>' if self._bold else '')\n iop = ('<i>' if self._italic else '')\n icl = ('</i>' if self._italic else '')\n bcl = ('</b>' if self._bold else '')\n txt = escape(self._text)\n s = '%s%s%s%s%s' % (bop, iop, txt, icl, bcl)\n return '%s' % s", "def _generateReadOnly(self, obj, **args):\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'readonly'\n if self._script.utilities.isReadOnlyTextArea(obj):\n result.append(self._script.formatting.getString(**args))\n return result", "def __str__(self):\n struct_repr = \", \".join([\n \"type: \" + str(self.type),\n \"text: \" + str(self.text)\n ])\n\n return f\"StatusText: [{struct_repr}]\"", "def _hidden_in_unicode(self, txt):", "def format(self) -> str:", "def T(value):\n return (value if isinstance(value, basestring) else\n str(value) if isinstance(value, AltText) else \"\")", "def post_formatter(self, value):\n if isinstance(value, bool):\n return value and 'true' or None\n return value", "def get_data_type_error_text(field_name, field_value, type_name):\n\n\tmessage = ''\n\n\ttry:\n\t\tmessage = (\"Value '{0}' entered for '{1}' could not be parsed as a valid {2}\"\n\t\t\t\t .format(str(field_value),field_name,type_name))\n\texcept TypeError:\n\t\tmessage = (\"A value entered for '{0}' could not be read\".format(field_name))\n\n\treturn message", "def edit_form_entry_edit_option_html(cls):\n return \"\"\"\n <li><a href=\"{edit_url}\">\n <span class=\"{edit_option_class}\"></span> {edit_text}</a>\n </li>\n \"\"\".format(\n edit_url=\"{edit_url}\",\n edit_option_class=cls.form_edit_form_entry_option_class,\n edit_text=\"{edit_text}\",\n )", "def summary(self):\n if self.intact and self.valid:\n return 'INTACT:' + ','.join(self.summary_fields())\n else:\n return 'INVALID'", "def syntax_text():", "def get_help_text(self):\n requirements = \"Your password must contain at least: {min_length} \" \\\n \"character(s), {min_uppercase} uppercase letter(s), \" \\\n \"{min_lowercase} lowercase letter(s) and \" \\\n \"{min_digits} digit(s). \".format(\n min_length=self.min_length,\n min_uppercase=self.min_uppercase,\n min_lowercase=self.min_lowercase,\n min_digits=self.min_digits)\n return requirements", "def get_field_format(field):\n if field and '#{' in field and '}' in field:\n i = field.index('#{')\n j = field.index('}', i)\n cond = field[i + 2:j]\n try:\n if len(cond) > 0:\n return (field.replace('#{%s}' % cond, ''), cond)\n except Exception:\n return (field, False)\n return (field, False)", "def get_opt_formatted(self, command):\n if \"formatted\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"formatted\"]\n else:\n return CommandDict.DEFAULT_OPT_FORMATTED", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title} :\" + s\n\n return s", "def show_fields(*fields):\n\n fields = filter( lambda x: x, fields )\n target_len = max( len(name) for name, value in fields ) + 2\n for name, value in fields:\n line = name + ':' + \" \" * (target_len - len(name))\n if type(value) == bool:\n line += color_text(\"Yes\", 'green') if value else color_text(\"No\", 'red')\n else:\n line += str(value)\n print line", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def renderEdit(self, style):\n html = u\"<div>\\n\"\n html += self.contentElement.renderEdit()\n emphasisValues = [(_(u\"No emphasis\"), Idevice.NoEmphasis),\n (_(u\"Some emphasis\"), Idevice.SomeEmphasis)]\n html += common.formField('select', _('Emphasis'),\n 'emphasis', self.id, \n '', # TODO: Instructions\n emphasisValues,\n self.idevice.emphasis)\n html += self.renderEditButtons()\n html += u\"</div>\\n\"\n return html", "def renderEdit(self):\n if self.discussion.isNone:\n return \"\"\n html = common.textInput(\"topic\" + self.id, self.discussion.topic)\n html += common.elementInstruc(self.discussion.instruc)\n html += u\"<br/>\\n\"\n html += common.richTextArea(\"dIntro\" + self.id, self.discussion.intro)\n return html", "def on_edit(self, event, text):\n return None", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def on_text(self, instance, value):\n if not EVENTS['IS_OBJ']:\n EVENTS['EDITOR_SAVED'] = False\n\n if value:\n self.valid_text = True\n EVENTS['IS_RAM_EMPTY'] = False\n else:\n self.valid_text = False", "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_description(self):\n if self.desc_format == MARKDOWN_FORMAT:\n return markdown.markdown(self.desc, safe_mode='escape') \n elif self.desc_format == TEXT_FORMAT:\n return html.escape(self.desc)", "def get_description(self):\n if self.desc_format == MARKDOWN_FORMAT:\n return markdown.markdown(self.desc, safe_mode='escape') \n elif self.desc_format == TEXT_FORMAT:\n return html.escape(self.desc)", "def _format_field_val(\n self,\n field: str,\n field_type: str,\n value: Any,\n ) -> str | int | bool | list | None:\n\n # If the field is empty, no need to format.\n if value is None:\n return None\n\n # TODO(DanielRyanSmith): Write checks to ensure enum values are valid.\n if field_type == 'emails' or field_type == 'split_str':\n list_val = self._split_list_input(field, field_type, value, ',')\n if field == 'blink_components' and len(value) == 0:\n return [settings.DEFAULT_COMPONENT]\n return list_val\n elif field_type == 'link':\n return self._extract_link(value)\n elif field_type == 'links':\n list_val = self._split_list_input(field, field_type, value)\n # Filter out any URLs that do not conform to the proper pattern.\n return [self._extract_link(link)\n for link in list_val if link]\n elif field_type == 'int':\n # Int fields can be unset by giving null or nothing in the input field.\n if value == '' or value is None:\n return None\n try:\n return int(value)\n except ValueError:\n self._abort_invalid_data_type(field, field_type, value)\n elif field_type == 'bool':\n return bool(value)\n return str(value)", "def getText(self):\r\n return \"\"", "def is_text( self ):\n return self.get_main_type() == 'text'", "def text(self):\n\n if self.status.full_text:\n return self.status.full_text\n elif self.status.text:\n return self.text\n else:\n return MISSING", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def is_text(self):\n return self.value_type in (str, unicode)", "def text(self, value, match_option=None):\n return self.attributes(\"text\", value, match_option)", "def text(self) -> str:", "def test_help_text_group(self): \n field_help_text = {\n \"title\": \"Дайте назание группе\",\n \"slug\": ('Укажите адрес для группы. Используйте '\n 'только латиницу, цифры, дефисы и знаки '\n 'подчёркивания'),\n } \n for value, expected in field_help_text.items():\n with self.subTest(value=value):\n self.assertEqual(self.group._meta.get_field(value).help_text, expected)", "def getValue(self):\n return self.field.currentText()", "def format_insert_value(self, value, datatype):\n if datatype == \"bool\":\n try:\n if int(value) == 1:\n return \"TRUE\"\n elif int(value) == 0:\n return \"FALSE\"\n except:\n pass\n return Engine.format_insert_value(self, value, datatype)", "def FormatYesNo(value):\n if value:\n return u'Yes'\n else:\n return u'No'", "def text(self):\n return str(self.input.text())", "def _editorText(self):\n if self.__lineEditKind:\n return self._editor.text()\n else:\n return self._editor.currentText()", "def get_PoemText(self):\n return self.text if self.text else \"No Text Yet\\n\"", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def test_form_help_text_is_correct(self):\n # https://stackoverflow.com/questions/24344981/how-to-change-help-\n # text-of-a-django-form-field\n\n # Above link helped figure out how to access help_text.\n self.assertEqual(\n self.form.fields[\"company_url\"].help_text,\n \"Please ensure this is a valid web address.\",\n )", "def show_line_edit_dialog(text):\n dialog = QDialog()\n interface = lineEditEntryGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True, str(interface.lineEdit.text())\n else:\n return False, \"\"", "def edit_text(self, _, val):\n t_edit = text_editor.TextEditor(val, \"value\")\n t_edit.execute = self.execute", "def reformat(ctx):\n pass", "def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()", "def cmd(self):\n return self.view.command_input.edit_text", "def label_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),label=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def get_data_from_nonformat_text():\n pass", "def get_edit_text(self):\n # grab edit page\n response = self._get_page('edit.php')\n html = response.text\n # parse out existing plan\n soup = bs4.BeautifulSoup(html, 'html5lib')\n plan = soup.find('textarea')\n if plan is None:\n raise PlansError(\"Couldn't get edit text, are we logged in?\")\n else:\n plan = u'' + plan.contents[0]\n # prepending the empty string somehow prevents BS from\n # escaping all the HTML characters (weird)\n assert type(plan) == str\n # convert to CRLF line endings\n plan = convert_endings(plan, 'CRLF')\n # parse out plan md5\n md5sum = soup.find('input',\n attrs={'name': 'edit_text_md5'}).attrs['value']\n # also, explicitly compute the hash, for kicks\n assert md5sum == plans_md5(plan)\n # verify that username has not changed\n assert self.username == self.parser.username\n return plan, md5sum", "def createEditor(self, parent, options, midx):\n ledit = qt.QLineEdit(parent)\n rx = qtc.QRegExp()\n rx.setPattern(\"\\\\S{0,8}\");\n validator = qt.QRegExpValidator(rx, ledit)\n ledit.setValidator(validator)\n return ledit" ]
[ "0.7739331", "0.76100457", "0.7482231", "0.7482231", "0.7214738", "0.7091386", "0.702513", "0.7018469", "0.6923507", "0.67845714", "0.67359626", "0.6618689", "0.6553471", "0.6410668", "0.63894486", "0.6138071", "0.56394523", "0.5639128", "0.5639128", "0.5639128", "0.5639128", "0.5639128", "0.5638955", "0.5513468", "0.55053264", "0.53779685", "0.5359518", "0.53337234", "0.5307074", "0.5301762", "0.5241272", "0.52383614", "0.5226998", "0.5211725", "0.520141", "0.5192378", "0.51895934", "0.51856655", "0.5173577", "0.51607776", "0.5142735", "0.5135674", "0.51244736", "0.5120664", "0.5118944", "0.51149535", "0.50991255", "0.5074703", "0.50708526", "0.50635666", "0.50552285", "0.5042696", "0.50382054", "0.50350344", "0.5032984", "0.5016341", "0.5006048", "0.5004984", "0.50031674", "0.50015765", "0.5000649", "0.49909857", "0.4987798", "0.49851483", "0.4983423", "0.49768013", "0.49763212", "0.49456343", "0.4943617", "0.4942017", "0.4942017", "0.49341482", "0.493275", "0.4930266", "0.49289", "0.49278158", "0.49278158", "0.49171287", "0.49130768", "0.49126655", "0.4904769", "0.48918605", "0.48839173", "0.48800564", "0.4875121", "0.48722965", "0.4866846", "0.48654693", "0.4865155", "0.48596352", "0.48541993", "0.4854045", "0.48522964", "0.48491585", "0.48441505", "0.4843921", "0.48430306", "0.48427606", "0.48414764", "0.48411053" ]
0.76801556
1
Return tuple of text in edit format and bool validity, using edit format option
def formatEditText(self, storedText): return (storedText, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def reformat(ctx):\n pass", "def task_format_check():\n return {\"actions\": [[\"black\", HERE, \"--check\"]], \"verbosity\": 1}", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def formatted(s):\n matches = re.findall(_format_re, normalize(s))\n if len(matches) == 1 and matches[0][0] != '':\n return matches[0][0]\n def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):\n if txt_none != '':\n return FORMAT_NONE, txt_none\n elif txt_sw != '':\n return FORMAT_SW, txt_sw\n elif txt_rem != '':\n return FORMAT_REM, txt_rem\n elif txt_em != '':\n return FORMAT_EM, txt_em\n elif txt_a != '':\n return FORMAT_A, txt_a\n return [to_fmt(*m) for m in matches]", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def test_format_permissions_and_docstring(self):\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\"],\n {\"some\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"permission formatted string\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\"\n ),\n )\n\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\", \"another permission\"],\n {\"some\": \"docstring\", \"another\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"- permission formatted string\\n\"\n \"- another permission\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\\n\"\n \"- **another** : docstring\"\n ),\n )", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def text(value):\n return True", "def test_is_entry_formatted(self):\n\n valid_formats = test_case_data.get('valid_formats')\n for i, valid_entry in enumerate(test_case_data.get('valid_entries')):\n entry = [value.strip() for value in valid_entry.split(',')]\n format_fields = valid_formats[i].split(',')\n valid = self.parser._is_entry_formatted(entry, format_fields)\n self.assertTrue(valid, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [value.strip() for value in invalid_entry.split(',')]\n for f in valid_formats:\n format_fields = f.split(',')\n entry_dict = self.parser._is_entry_formatted(entry, format_fields)\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def _check_style(file_path, clang_format_bin):\n with open(file_path, 'r') as f:\n is_valid_header = f.read().startswith(CppFormatter.standard_header)\n\n cmd = [\n clang_format_bin,\n \"-style=file\",\n \"-output-replacements-xml\",\n file_path,\n ]\n result = subprocess.check_output(cmd).decode(\"utf-8\")\n if \"<replacement \" in result:\n is_valid_style = False\n else:\n is_valid_style = True\n return (is_valid_style, is_valid_header)", "def info(level):\n if level == 'basic':\n string = _(\"Basic markup\")\n text = _(\"Only basic text tags are available in this input field.\")\n elif level == 'rich':\n string = _(\"Rich markup\")\n text = _(\"Rich and basic text tags are available in this input field.\") \n elif level == 'full':\n string = _(\"Full markup\")\n text = _(\"Every tags are available in this input field.\") \n elif level == 'none':\n string = _(\"No markup\")\n text = _(\"No tags are available in this input field.\") \n\n if level != 'none':\n text = text + \" \" + _(\"Check the markup reminder in related documentation for a description of these tags.\")\n\n return '<span class=\"help\" title=' + quoteattr(text) \\\n + '><img src=\"' + settings.STATIC_MEDIA_URL \\\n + 'images/savane/common/misc.default/edit.png' \\\n + ' border=\"0\" class=\"icon\" alt=\"\" />' \\\n + string + '</span>'", "def human_readable(self):\n if self.no_flags_set():\n return \"no flags set\"\n else:\n flag_desc = []\n for name in (\"bookmarked\", \"for_validation\", \"candidate\", \"final causative\"):\n if getattr(self, \"flag_%s\" % name.replace(\" \", \"_\")):\n flag_desc.append(name)\n for name in (\"visual\", \"validation\", \"molecular\", \"phenotype_match\", \"summary\"):\n field = getattr(self, \"flag_%s\" % name)\n if field and field != \"empty\":\n flag_desc.append(\"%s rating is %s\" % (name.split(\"_\")[0], field))\n return \", \".join(flag_desc)", "def fancyString(inVal, correctOutput, funcOutput):\r\n checkCorrect = \"Correct = \" + u'\\u2713'*(funcOutput == correctOutput) + 'X'*(funcOutput != correctOutput)\r\n # Check mark code from site below:\r\n # https://stackoverflow.com/questions/16676101/print-the-approval-sign-check-mark-u2713-in-python\r\n return \"Input(s) = {:<15} Output = {:<25} Your Output = {:<35} \".format(str(inVal), str(correctOutput), str(funcOutput)) + checkCorrect", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))", "def parse_edits(line):\n \n # Get edits inside brackets allowing parentheses\n edit_strings = re.findall(r'\\((.*?)\\)[,\\]]', line)\n \n edits = []\n for edit_string in edit_strings:\n # Splitting by comma is not enough. Some of the edits actually fix spacing\n # when commas are used, so we may can't use edit_string.split(', ')\n # For some reason, the unicode gold strings are enclosed in a list\n m = re.match(r'^(\\d+), (\\d+), (.*), \\[?(.*)\\]?$', edit_string)\n edit_items = [m.group(i) for i in range(1, 5)]\n \n # No way to handle this in regex\n if edit_items[3].endswith(']'):\n edit_items[3] = edit_items[3][:-1]\n \n # Cast the indices\n edit_items[0] = int(edit_items[0])\n edit_items[1] = int(edit_items[1])\n \n # Convert unicode-string-inside-string into actual unicode string\n edit_items[2] = codecs.decode(edit_items[2][2:-1], 'unicode_escape') or 'NIL'\n edit_items[3] = codecs.decode(edit_items[3][2:-1], 'unicode_escape') or 'NIL'\n \n edits.append(edit_items)\n \n return edits", "def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text", "def show_line_edit_dialog(text):\n dialog = QDialog()\n interface = lineEditEntryGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True, str(interface.lineEdit.text())\n else:\n return False, \"\"", "def as_tuple(self, selected: bool) -> tuple:\n if self.check_ans:\n current = self.num.ljust(2) + self.answer\n else:\n current = self.num.ljust(2)+self.current.rjust(1)\n return ((\n \"fg:\"+self.style_fg + \" bg:\" # apply foreground color\n + (self.style_bg_selected if selected else self.style_bg),\n current\n if self.answer != \".\" else \"▐\" + \"█\" + \"▌\"), ('bg:#fefefe fg:#000', \"│\"))", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def hints(s):\n if s == 'hello':\n # string, color, bold\n return (' World', 35, False)\n return None", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_data_from_nonformat_text():\n pass", "def FormatYesNo(value):\n if value:\n return u'Yes'\n else:\n return u'No'", "def validateWorkFormat(format):\n\n if not(format):\n return \"You must select a work format.\"", "def rich(text):\n return full(text, False)", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def change_prompt_format(self, arg, **_):\n if not arg:\n message = 'Missing required argument, format.'\n return [(None, None, None, message)]\n\n self.prompt_format = self.get_prompt(arg)\n return [(None, None, None, \"Changed prompt format to %s\" % arg)]", "def test_command_edit_info_boolean_flags():\n def f(inputfile):\n with tempfile.NamedTemporaryFile() as tmp:\n shutil.copy(inputfile, tmp.name)\n\n for flag in (\"write_protected\", \"synchronized\", \"cleaned\"):\n for true_value, false_value in ((\"1\", \"0\"),\n (\"yes\", \"no\"),\n (\"YES\", \"No\"),\n (\"true\", \"false\"),\n (\"tRuE\", \"FaLsE\")):\n wozardry.parse_args([\"edit\", \"-i\", \"%s:%s\" % (flag, true_value), tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[flag] == True\n wozardry.parse_args([\"edit\", \"-i\", \"%s:%s\" % (flag, false_value), tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[flag] == False\n f(kValid1)\n f(kValid2)", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def get_help_text(self):\n requirements = \"Your password must contain at least: {min_length} \" \\\n \"character(s), {min_uppercase} uppercase letter(s), \" \\\n \"{min_lowercase} lowercase letter(s) and \" \\\n \"{min_digits} digit(s). \".format(\n min_length=self.min_length,\n min_uppercase=self.min_uppercase,\n min_lowercase=self.min_lowercase,\n min_digits=self.min_digits)\n return requirements", "def help_for(self, flag: str) -> Tuple[str, str]:\n # Obtain arg obj\n if flag not in self.flags:\n err = \"{!r} is not a valid flag for this context! Valid flags are: {!r}\" # noqa\n raise ValueError(err.format(flag, self.flags.keys()))\n arg = self.flags[flag]\n # Determine expected value type, if any\n value = {str: \"STRING\", int: \"INT\"}.get(arg.kind)\n # Format & go\n full_names = []\n for name in self.names_for(flag):\n if value:\n # Short flags are -f VAL, long are --foo=VAL\n # When optional, also, -f [VAL] and --foo[=VAL]\n if len(name.strip(\"-\")) == 1:\n value_ = (\"[{}]\".format(value)) if arg.optional else value\n valuestr = \" {}\".format(value_)\n else:\n valuestr = \"={}\".format(value)\n if arg.optional:\n valuestr = \"[{}]\".format(valuestr)\n else:\n # no value => boolean\n # check for inverse\n if name in self.inverse_flags.values():\n name = \"--[no-]{}\".format(name[2:])\n\n valuestr = \"\"\n # Tack together\n full_names.append(name + valuestr)\n namestr = \", \".join(sorted(full_names, key=len))\n helpstr = arg.help or \"\"\n return namestr, helpstr", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def __check_format(node, lint_ctx, profile: str, allow_ext=False):\n if \"format_source\" in node.attrib and (\"ext\" in node.attrib or \"format\" in node.attrib):\n lint_ctx.warn(\n f\"Tool {node.tag} output '{node.attrib.get('name', 'with missing name')}' should use either format_source or format/ext\",\n node=node,\n )\n if \"format_source\" in node.attrib:\n return True\n if node.find(\".//action[@type='format']\") is not None:\n return True\n # if allowed (e.g. for discover_datasets), ext takes precedence over format\n fmt = None\n if allow_ext:\n fmt = node.attrib.get(\"ext\")\n if fmt is None:\n fmt = node.attrib.get(\"format\")\n if fmt == \"input\":\n message = f\"Using format='input' on {node.tag} is deprecated. Use the format_source attribute.\"\n if Version(str(profile)) <= Version(\"16.01\"):\n lint_ctx.warn(message, node=node)\n else:\n lint_ctx.error(message, node=node)\n\n return fmt is not None", "def get_opt_formatted(self, command):\n if \"formatted\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"formatted\"]\n else:\n return CommandDict.DEFAULT_OPT_FORMATTED", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def verify_diagnostics_and_usage_text():\r\n msg, status = \"\", True\r\n try:\r\n sleep(10)\r\n if g.platform == 'android':\r\n sleep(3)\r\n 'verify end user license agreement label'\r\n flag1,msg = element_textvalidation('Diagnostics_usage_lbl','Diagnostics and Usage')\r\n sleep(4) \r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_Diagnostcs_nd_usage_txt)\r\n 'verify end user license agreement label'\r\n flag2,msg = element_textvalidation('Diagnostics_usage_txt',text_to_verify)\r\n \r\n flag = False if not (flag1 and flag2) else True\r\n else:\r\n\r\n \r\n 'Verify diagnostics usage text'\r\n flag1 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_lbl'))\r\n 'Verify diagnostics usage text'\r\n flag2 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_txt'))\r\n \r\n sleep(4) \r\n \r\n flag = False if not (flag1 and flag2) else True\r\n if not flag:\r\n return False, msg\r\n else:\r\n print \"License agreement screen name is displayed properly\"\r\n \r\n \r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return False, msg\r\n return True, msg", "def _format_action(self, action):\n parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)\n if action.nargs == argparse.PARSER:\n parts = \"\\n\".join(parts.split(\"\\n\")[1:])\n return parts", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def _engine_option_string_and_comment(option: engine.Option, value: engine.ConfigValue) -> Tuple[str, str]:\n if value is None:\n value = ''\n name_equals_val = f'{option.name}={value}'\n if option.type == 'check' or option.type == 'string' or option.type == 'button':\n return (name_equals_val, f'type={option.type}')\n if option.type == 'spin':\n return (name_equals_val, f'type=spin, min={option.min}, max={option.max}')\n if option.type == 'combo':\n return (name_equals_val, f'type=combo, var={option.var}')\n return (name_equals_val, 'type=unknown')", "def TEXT(number, format_type):\n raise NotImplementedError()", "def test_form_help_text_is_correct(self):\n # https://stackoverflow.com/questions/24344981/how-to-change-help-\n # text-of-a-django-form-field\n\n # Above link helped figure out how to access help_text.\n self.assertEqual(\n self.form.fields[\"texture\"].help_text,\n \"One word descriptions seperated by commas.\",\n )", "def __verify_plot_options(self, options_str):\n default_line = '-'\n default_marker = ''\n default_colour = 'k'\n\n # Split str into chars list\n options_split = list(options_str)\n\n # If 0, set defaults and return early\n if len(options_split) == 0:\n return [default_line, default_marker, default_colour]\n\n # If line_style given, join the first two options if applicable\n # (some types have 2 characters)\n for char in range(0, len(options_split) - 1):\n # If char is '-' (only leading character in double length option)\n if options_split[char] == '-' and len(options_split) > 1:\n # If one of the leading characters is valid\n if options_split[char + 1] == '-' or \\\n options_split[char + 1] == '.':\n # Join the two into the first\n options_split[char] = options_split[char] \\\n + options_split[char + 1]\n # Shuffle down the rest\n for idx in range(char + 2, len(options_split)):\n options_split[idx - 1] = options_split[idx]\n # Remove duplicate extra\n options_split.pop()\n\n # If any unknown, throw error\n for option in options_split:\n if option not in self.__line_styles and \\\n option not in self.__marker_styles and \\\n option not in self.__colour_styles:\n error_string = \"Unknown character entered: '{0}'\"\n raise ValueError(error_string.format(option))\n\n ##############################\n # Verify Line Style\n ##############################\n line_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n line_style_index = 0\n for option in options_split:\n if option in self.__line_styles:\n line_style_count = line_style_count + 1\n line_style_index = self.__line_styles.index(option)\n\n # If more than one, throw error\n if line_style_count > 1:\n raise ValueError(\n \"Too many line style arguments given. Only one allowed\")\n # If none, set as solid\n elif line_style_count == 0 or not any(\n item in options_split for item in self.__line_styles):\n output_line = default_line\n # If one, set as given\n else:\n output_line = self.__line_styles[line_style_index]\n ##############################\n\n ##############################\n # Verify Marker Style\n ##############################\n marker_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n marker_style_index = 0\n for option in options_split:\n if option in self.__marker_styles:\n marker_style_count = marker_style_count + 1\n marker_style_index = self.__marker_styles.index(option)\n\n # If more than one, throw error\n if marker_style_count > 1:\n raise ValueError(\n \"Too many marker style arguments given. Only one allowed\")\n # If none, set as no-marker\n elif marker_style_count == 0 or not any(\n item in options_split for item in self.__marker_styles):\n output_marker = default_marker\n # If one, set as given\n else:\n output_marker = self.__marker_styles[marker_style_index]\n # If marker set and no line given, turn line to no-line\n if line_style_count == 0 or not any(\n item in options_split for item in self.__line_styles):\n output_line = ''\n ##############################\n\n ##############################\n # Verify Colour Style\n ##############################\n colour_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n colour_style_index = 0\n for option in options_split:\n if option in self.__colour_styles:\n colour_style_count = colour_style_count + 1\n colour_style_index = self.__colour_styles.index(option)\n\n # If more than one, throw error\n if colour_style_count > 1:\n raise ValueError(\n \"Too many colour style arguments given. Only one allowed\")\n # If none, set as black\n elif colour_style_count == 0 or not any(\n item in options_split for item in self.__colour_styles):\n output_colour = default_colour\n # If one, set as given\n else:\n output_colour = self.__colour_styles[colour_style_index]\n ##############################\n\n return [output_line, output_marker, output_colour]", "def reformat():\n toolkit.reformat()", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def validate_format(self):\n raise NotImplementedError()", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def _validate_performatives(performative: str) -> Tuple[bool, str]:\n # check performative is not a reserved name\n if _is_reserved_name(performative):\n return (\n False,\n \"Invalid name for performative '{}'. This name is reserved.\".format(\n performative,\n ),\n )\n\n # check performative's format\n if not _is_valid_regex(PERFORMATIVE_REGEX_PATTERN, performative):\n return (\n False,\n \"Invalid name for performative '{}'. Performative names must match the following regular expression: {} \".format(\n performative, PERFORMATIVE_REGEX_PATTERN\n ),\n )\n\n return True, \"Performative '{}' is valid.\".format(performative)", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def _format (color, style=''):\n _format = QtGui.QTextCharFormat()\n if color != '':\n _format.setForeground(getattr(QtCore.Qt, color))\n if 'bold' in style:\n _format.setFontWeight(QtGui.QFont.Bold)\n if 'italic' in style:\n _format.setFontItalic(True)\n return _format", "def get_replacement():\n run_linter_throw(\"path/to/file\",\n \"{s}\\n{m} Text{e}\",\n style,\n whitelist=[\"headerblock/desc_space\"])", "def edit_form_entry_help_text_extra(cls):\n return \"\"\"\n <ul class=\"{container_class}\">\n {edit_option_html}\n <li><a href=\"{delete_url}\">\n <span class=\"{delete_option_class}\"></span> {delete_text}</a>\n </li>\n </ul>\n <input type=\"hidden\" value=\"{form_element_position}\"\n name=\"form-{counter}-position\"\n id=\"id_form-{counter}-position\"\n class=\"form-element-position\">\n <input type=\"hidden\" value=\"{form_element_pk}\"\n name=\"form-{counter}-id\" id=\"id_form-{counter}-id\">\n \"\"\".format(\n container_class=cls.form_list_container_class,\n edit_option_html=\"{edit_option_html}\",\n delete_url=\"{delete_url}\",\n delete_option_class=cls.form_delete_form_entry_option_class,\n delete_text=\"{delete_text}\",\n form_element_position=\"{form_element_position}\",\n counter=\"{counter}\",\n form_element_pk=\"{form_element_pk}\",\n )", "def extension (formatStr):\n assert False, \"TODO:\"", "def _generateReadOnly(self, obj, **args):\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'readonly'\n if self._script.utilities.isReadOnlyTextArea(obj):\n result.append(self._script.formatting.getString(**args))\n return result", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def is_valid(self):\r\n for lineedit in self.lineedits:\r\n if lineedit in self.validate_data and lineedit.isEnabled():\r\n validator, invalid_msg = self.validate_data[lineedit]\r\n text = to_text_string(lineedit.text())\r\n if not validator(text):\r\n QMessageBox.critical(self, self.get_name(),\r\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\r\n QMessageBox.Ok)\r\n return False\r\n return True", "def test_incorrect_format_1(self):\n changelog = changelog_temp.format(\n before_changelog=\"## [Not yet released]\\n\\n### Added\\n\\n- Added a new feature\\n\"\n )\n with pytest.raises(ValueError):\n parse_changelog(changelog)", "def test_match_entry_to_format(self):\n\n # matches valid entries with valid formats\n for valid_entry in test_case_data.get('valid_entries'):\n entry = [e.strip() for e in valid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertTrue(entry_dict, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [e.strip() for e in invalid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def _format_answer(self, text):\n text = str(text).replace('\\n', ' ')\n answer_width = 70\n pretty_text = '\\n\\t'.join(textwrap.wrap(text, answer_width))\n\n return pretty_text", "def is_text_editable(path):\n return False", "def flags(self, f):\n if f.is_inlined:\n return \" (inlined)\"\n return \"\"", "def text_to_display(level):\n if level == \"html\":\n return html_answers, html_text\n elif level == \"css\":\n return css_answers, css_text\n elif level == \"python\":\n return python_answers, python_text", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def editorForTyp(typ):\n\n if typ == \"quint32\":\n return (\"QSpinBox\", \"setValue\", \"value\")\n elif typ == \"QString\":\n return (\"QLineEdit\", \"setText\", \"text\")\n elif typ == \"bool\":\n return (\"QCheckBox\", \"setChecked\", \"isChecked\")\n return (None, None, None)", "def show_fields(*fields):\n\n fields = filter( lambda x: x, fields )\n target_len = max( len(name) for name, value in fields ) + 2\n for name, value in fields:\n line = name + ':' + \" \" * (target_len - len(name))\n if type(value) == bool:\n line += color_text(\"Yes\", 'green') if value else color_text(\"No\", 'red')\n else:\n line += str(value)\n print line", "def testCheckRequiredFormat(self):\n plugin = gdrive_synclog.GoogleDriveSyncLogTextPlugin()\n\n file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()\n file_system_builder.AddFile('/file.txt', (\n b'2018-01-24 18:25:08,454 -0800 INFO pid=2376 7780:MainThread '\n b'logging_config.py:295 OS: Windows/6.1-SP1\\n'))\n\n file_entry = file_system_builder.file_system.GetFileEntryByPath('/file.txt')\n\n parser_mediator = self._CreateParserMediator(None, file_entry=file_entry)\n\n file_object = file_entry.GetFileObject()\n text_reader = text_parser.EncodedTextReader(file_object)\n text_reader.ReadLines()\n\n result = plugin.CheckRequiredFormat(parser_mediator, text_reader)\n self.assertTrue(result)", "def formatted(self) -> str:\r\n ...", "def text_editor():\n return True", "def ISTEXT(value):\n return isinstance(value, (basestring, AltText))", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text", "def validate(self, txt, pos):\n state, rpos = qt.QDoubleValidator.validate(self, txt, pos)\n if txt.length() == 0:\n state = qt.QValidator.Acceptable\n return state, rpos" ]
[ "0.74409115", "0.7369682", "0.71582556", "0.7035388", "0.7028556", "0.6883685", "0.6550002", "0.6452589", "0.6391841", "0.6309491", "0.6299821", "0.61874855", "0.60250795", "0.583455", "0.5770457", "0.573678", "0.5614053", "0.5609872", "0.56041586", "0.55477756", "0.53100324", "0.5299071", "0.5209894", "0.5205097", "0.5205097", "0.5205097", "0.5205097", "0.5205097", "0.5200622", "0.5172154", "0.51652354", "0.5159618", "0.51195395", "0.5115454", "0.5094424", "0.50928664", "0.50774634", "0.5076837", "0.506709", "0.50559866", "0.5048865", "0.50116587", "0.50006783", "0.4988621", "0.49727595", "0.49726328", "0.4945682", "0.49346167", "0.49305907", "0.49302247", "0.49205834", "0.4899774", "0.48946813", "0.4889593", "0.48848787", "0.48710102", "0.48603994", "0.485448", "0.4853418", "0.48492938", "0.4849078", "0.48415923", "0.48410097", "0.48397195", "0.48287314", "0.48252425", "0.4824147", "0.48218486", "0.48204252", "0.4820401", "0.48190412", "0.48128283", "0.48080114", "0.48075286", "0.4801114", "0.48006943", "0.4796574", "0.47940776", "0.47910094", "0.47794297", "0.4775982", "0.47566742", "0.47563574", "0.47514576", "0.47491467", "0.4748313", "0.47368434", "0.4736663", "0.47360152", "0.47345802", "0.4733563", "0.47324613", "0.47292554", "0.4723963", "0.47194177", "0.47147244", "0.47145414", "0.4712799", "0.47122368" ]
0.7101609
3
Return tuple of stored text from edited text and bool validity, using edit format option
def storedText(self, editText): return (editText, editText or not self.isRequired)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def get_data_from_nonformat_text():\n pass", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def on_edit(self, event, text):\n return None", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text", "def reformat(ctx):\n pass", "def main(text,result=\"latex\",check_text=True,index_project='',file_name=''): # TODO detect and make a warning for genuine latex marks\n if isinstance(text,str):\n text = text.split('\\n')\n \n if check_text:\n check(text)\n \n print(text)\n \n ### managing placeholders\n text = parsers['v'].main(text)\n \n ### saving names\n if index_project:\n indexer.parse(text,index_project,file_name)\n \n \n for i in range(len(text)):\n line = text[i]\n ### managing end of line\n line = line.replace(\" ,,\",\"\\\\\\\\\")\n \n while line.count(opening_mark):\n first_part, mark, late_part = line.partition(',;')\n if not late_part:\n break\n late_part, text = parsers[late_part[0]].main(late_part = late_part,\n text=text,\n result=result,\n line_nb = i)\n line = first_part + late_part\n text[i] = line\n \n return '\\n'.join(text)", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def get_edit_text(self):\n # grab edit page\n response = self._get_page('edit.php')\n html = response.text\n # parse out existing plan\n soup = bs4.BeautifulSoup(html, 'html5lib')\n plan = soup.find('textarea')\n if plan is None:\n raise PlansError(\"Couldn't get edit text, are we logged in?\")\n else:\n plan = u'' + plan.contents[0]\n # prepending the empty string somehow prevents BS from\n # escaping all the HTML characters (weird)\n assert type(plan) == str\n # convert to CRLF line endings\n plan = convert_endings(plan, 'CRLF')\n # parse out plan md5\n md5sum = soup.find('input',\n attrs={'name': 'edit_text_md5'}).attrs['value']\n # also, explicitly compute the hash, for kicks\n assert md5sum == plans_md5(plan)\n # verify that username has not changed\n assert self.username == self.parser.username\n return plan, md5sum", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def parse_edits(line):\n \n # Get edits inside brackets allowing parentheses\n edit_strings = re.findall(r'\\((.*?)\\)[,\\]]', line)\n \n edits = []\n for edit_string in edit_strings:\n # Splitting by comma is not enough. Some of the edits actually fix spacing\n # when commas are used, so we may can't use edit_string.split(', ')\n # For some reason, the unicode gold strings are enclosed in a list\n m = re.match(r'^(\\d+), (\\d+), (.*), \\[?(.*)\\]?$', edit_string)\n edit_items = [m.group(i) for i in range(1, 5)]\n \n # No way to handle this in regex\n if edit_items[3].endswith(']'):\n edit_items[3] = edit_items[3][:-1]\n \n # Cast the indices\n edit_items[0] = int(edit_items[0])\n edit_items[1] = int(edit_items[1])\n \n # Convert unicode-string-inside-string into actual unicode string\n edit_items[2] = codecs.decode(edit_items[2][2:-1], 'unicode_escape') or 'NIL'\n edit_items[3] = codecs.decode(edit_items[3][2:-1], 'unicode_escape') or 'NIL'\n \n edits.append(edit_items)\n \n return edits", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_edits(text):\n edit_p = re.compile(\"(?P<open><edit.*?>)(?P<inner>.*?)(?P<close></edit>)\")\n corr_p = re.compile(\"<corrections>.*?</corrections>\")\n edits = []\n\n offset = 0\n\n for m in re.finditer(edit_p, text):\n # Make an edit object\n edit_text = \"\".join(m.groups())\n edit = ET.XML(m.group(0))\n\n # Set the bounds of the original text and adjust offset\n inner_string = m.group('inner') \n start = m.start() - offset\n corr_m = re.search(corr_p, inner_string)\n \n if corr_m: # Replacement/insertion have a correction\n offset += len(corr_m.group(0)) \n \n if not inner_string.startswith(\"<empty/>\"):\n end = start + corr_m.start()\n else:\n offset += len(\"<empty/>\") # It is \"\" in plain text\n end = start\n else:\n # Deletions may not have a correction\n if not inner_string.startswith(\"<empty/>\"):\n end = start + len(inner_string)\n else: # Unspecified error <empty/> is \"\" in plain text\n end = start\n offset += len(inner_string)\n\n\n edit.set(\"start\", \"%d\" % start) \n edit.set(\"end\", \"%d\" % end)\n\n offset += len(m.group('open')) + len(m.group('close'))\n \n\n # Make the original text a subelement of <edit>\n # Original text may be a string or <empty/> element.\n original = ET.SubElement(edit, \"original\")\n \n if edit.text:\n original.text = edit.text\n edit.text = \"\"\n else:\n empty = edit.find('empty')\n \n try:\n edit.remove(empty)\n original.append(empty)\n except Exception as e:\n pass\n \n edits.append(edit)\n\n return edits", "def refang(self, text: str):", "def edit_text(self, _, val):\n t_edit = text_editor.TextEditor(val, \"value\")\n t_edit.execute = self.execute", "def handle_data(self, text):\n if self.bankacctfrom:\n if self.bankid:\n self.compte['banque'] = text.strip()\n self.bankid = False\n if self.branchid:\n self.compte['guichet'] = text.strip()\n self.branchid = False\n if self.acctid:\n self.compte['compte'] = text.strip()\n self.acctid = False\n if self.banktranlist:\n if self.stmttrn:\n if self.dtposted:\n self.ecriture_tmp['date'] = datetime.strptime(text.strip(), \"%Y%m%d\")\n self.dtposted = False\n if self.trnamt:\n self.ecriture_tmp['montant'] = locale.atof(text.strip())\n self.trnamt = False\n if self.trntype:\n self.ecriture_tmp['type'] = text.strip()\n self.trntype = False\n if self.name:\n self.ecriture_tmp['name'] = text.strip()\n self.name = False\n if self.memo:\n self.ecriture_tmp['memo'] = text.strip()\n self.memo = False", "def get_mark(text, short):\n\n line = text.readline()\n\n # check that the line begins with a valid entry type\n if not short and not re.match(r'^\\s*(text|mark) = \"', line):\n raise ValueError('Bad entry: ' + line)\n\n # read until the number of double-quotes is even\n while line.count('\"') % 2:\n next_line = text.readline()\n\n if not next_line:\n raise EOFError('Bad entry: ' + line[:20] + '...')\n\n line += next_line\n if short:\n pattern = r'^\"(.*?)\"\\s*$'\n else:\n pattern = r'^\\s*(text|mark) = \"(.*?)\"\\s*$'\n entry = re.match(pattern, line, re.DOTALL)\n\n return entry.groups()[-1].replace('\"\"', '\"')", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def getText(self):", "def get_text(text_input):\r\n return text_input", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def rich(text):\n return full(text, False)", "def text(value):\n return True", "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def edit():", "def get_text_editor_input(initial_msg):\n EDITOR = os.environ.get('EDITOR', 'vi')\n CROP_MARK = ('\\n\\nAnything above this line will be ignored:\\n' +\n ('-' * 34) + '>8' + ('-' * 34) + '\\n')\n\n wrapper = TextWrapper(replace_whitespace=False, drop_whitespace=False)\n initial_msg = '\\n'.join(wrapper.wrap(initial_msg))\n initial_msg += CROP_MARK\n\n with tempfile.NamedTemporaryFile(suffix='.md') as temp:\n temp.write(initial_msg.encode('utf-8'))\n temp.flush() # Write buffer to the file\n subprocess.call([EDITOR, temp.name])\n\n # The pointer was already after the initial message, but we return to\n # the beginning just in case the user added content before the mark\n temp.seek(0)\n return temp.read().decode('utf-8').split(CROP_MARK, 1)[1].strip()", "def text_example():\n \n text_store = \"01000001011000010010000001000010011000100000110100001010001100010011001000110011\"\n text.delete('1.0', tk.END) \n text.insert(tk.END, text_store) \n box=tk.Tk()\n m = tk.Message(box, text=\"You should be able to save this file and open it in a text editor like Notepad or Nano to read it. If you edit the values you may find it does not display properly as text. Unchanged, it should be interpreted by a text editor as:\\n\\nAa Bb\\n123\\n\\nAs the file was made on a Windows machines you may find other systems display the line breaks differently.\")\n m.config(padx=50, pady=50, width=350)\n m.pack()", "def is_text_editable(path):\n return False", "def show_line_edit_dialog(text):\n dialog = QDialog()\n interface = lineEditEntryGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True, str(interface.lineEdit.text())\n else:\n return False, \"\"", "def on_text(self, instance, value):\n if not EVENTS['IS_OBJ']:\n EVENTS['EDITOR_SAVED'] = False\n\n if value:\n self.valid_text = True\n EVENTS['IS_RAM_EMPTY'] = False\n else:\n self.valid_text = False", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def edit_once(self, text):\n return self._edit_engine(text, break_on_success=True)", "def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text", "def edit_google_vision_text(self,text):\n s1=text\n try:\n log_info(\"Correcting google vision text to remove extra spacing\",MODULE_CONTEXT)\n i=0\n while(i<len(text)):\n s1=text\n if text[i] in [\"/\",\"।\",'।' ,':','|',\",\" ,'०',\"]\",\"-\",\")\",\"}\"] and text[i-1]==\" \": \n text=text[:i-1]+text[i:]\n if i > 0 :\n if text[i-1] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i]==\" \":\n text=text[:i]+text[i+1:]\n elif text[i] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i+1]==\" \":\n text=text[:i+1]+text[i+2:]\n i=i+1\n except Exception as e:\n log_exception(\"Exception while correcting google vision text\", MODULE_CONTEXT, e)\n return s1\n return text", "def process_text(self, text, language):", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def get_row_input_text(self, row_idx):\n return self.row_items[row_idx][1].get()", "def updateText(widget,text,format=''):\n # autorecognition\n if format not in ['plain','html','rest']:\n if type(text) is str and text.startswith('..'):\n format = 'rest'\n\n # conversion\n if format == 'rest' and pf.options.rst2html:\n html = utils.rst2html(text)\n if html[:10] == text[:10]:\n #print \"CONVERSION TO HTML FAILED\"\n text += \"\\n\\nNote: This reStructuredText is displayed as plain text because it could not be converted to html. If you install python-docutils, you will see this text (and other pyFormex messages) in a much nicer layout!\\n\"\n else:\n text = html\n\n # We leave the format undefined, because we are not sure\n # that the conversion function (docutils) is available\n # and always produces good results\n format = ''\n\n if format == 'plain':\n widget.setPlainText(text)\n elif format == 'html':\n widget.setHtml(text)\n else:\n # As a last rescue, try QT4's autorecognition\n widget.setText(text)", "def modified_flag(self, event):\n text = self.get_current()\n text.modified = 1", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def get_text_from_editor():\n with tempfile.NamedTemporaryFile(suffix='.tmp', mode='w+t') as f:\n # Create a temporary file with instructions on describing bug\n f.write(message + '\\n\\n')\n f.flush()\n # Open the editor and allow the user to type\n editor = os.environ.get('EDITOR', 'vim')\n subprocess.call([editor, f.name])\n # Read and clean the file\n f.seek(0)\n text = ''.join([line.lstrip() for line in f.readlines()\n if line and not line.lstrip().startswith('#')])\n return '\\n'.join(textwrap.wrap(text, width=100))", "def storeTextEditValue(self):\n\n\t\tcategory, attr = self.getWidgetMeta(self.sender())\n\t\tvalue = self.sender().toPlainText()\n\t\tself.storeValue(category, attr, value)", "def _editorText(self):\n if self.__lineEditKind:\n return self._editor.text()\n else:\n return self._editor.currentText()", "def _hidden_in_unicode(self, txt):", "def fix_document(key, value, _format, _meta):\n if key == \"Link\":\n url = value[2][0]\n if url.startswith(\"user-manual\") or url.startswith(\"developers-guide\"):\n # Return the link text\n return value[1]\n # Reformat the text inside block quotes\n elif key == \"BlockQuote\":\n try:\n first_string = value[0][\"c\"][0][\"c\"]\n if first_string == \"[!NOTE]\":\n value[0][\"c\"][0] = Strong([Str(\"Note:\")])\n return BlockQuote(value)\n elif first_string == \"[!INFO]\":\n value[0][\"c\"][0] = Strong([Str(\"Info:\")])\n return BlockQuote(value)\n elif first_string == \"[!TIP]\":\n value[0][\"c\"][0] = Strong([Str(\"Tip:\")])\n return BlockQuote(value)\n elif first_string == \"[!WARNING]\":\n value[0][\"c\"][0] = Strong([Str(\"Warning:\")])\n return BlockQuote(value)\n elif first_string == \"[!ATTENTION]\":\n value[0][\"c\"][0] = Strong([Str(\"Attention:\")])\n return BlockQuote(value)\n except Exception:\n return\n return", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def edit_type(self, candidate, word):\n edit = [False] * 4\n correct = \"\"\n error = \"\"\n replaced = ''\n replacer = ''\n for i in range(min([len(word), len(candidate)]) - 1):\n if candidate[0:i + 1] != word[0:i + 1]: # inconsistency in the first (i + 1) characters of the two strings\n if candidate[i:] == word[i - 1:]:\n edit[1] = True # deletion\n correct = candidate[i - 1] # candidate[i - 1] is deleted and we get word\n error = ''\n replaced = candidate[i - 2]\n replacer = candidate[i - 2] + candidate[i - 1]\n break\n elif candidate[i:] == word[i + 1:]:\n edit[0] = True # insertion\n correct = ''\n error = word[i] # word[i] is redundant\n if i == 0:\n replacer = '@'\n replaced = '@' + error\n else:\n replacer = word[i - 1]\n replaced = word[i - 1] + error\n break\n if candidate[i + 1:] == word[i + 1:]:\n edit[2] = True # substitution\n correct = candidate[i]\n error = word[i]\n replaced = error\n replacer = correct\n break\n if candidate[i] == word[i + 1] and candidate[i + 2:] == word[i + 2:]:\n edit[3] = True # transposition\n correct = candidate[i] + candidate[i + 1]\n error = word[i] + word[i + 1]\n replaced = error\n replacer = correct\n break\n\n # string inversion\n candidate = candidate[::-1]\n word = word[::-1]\n\n for i in range(min([len(word), len(candidate)]) - 1):\n if candidate[0:i + 1] != word[0:i + 1]:\n if candidate[i:] == word[i - 1:]:\n edit[1] = True\n correct = candidate[i - 1]\n error = ''\n replaced = candidate[i - 2]\n replacer = candidate[i - 2] + candidate[i - 1]\n break\n elif candidate[i:] == word[i + 1:]:\n correct = ''\n error = word[i]\n if i == 0:\n replacer = '@'\n replaced = '@' + error\n else:\n replacer = word[i - 1]\n replaced = word[i - 1] + error\n edit[0] = True\n break\n if candidate[i + 1:] == word[i + 1:]:\n edit[2] = True\n correct = candidate[i]\n error = word[i]\n replaced = error\n replacer = correct\n break\n if candidate[i] == word[i + 1] and candidate[i + 2:] == word[i + 2:]:\n edit[3] = True\n correct = candidate[i] + candidate[i + 1]\n error = word[i] + word[i + 1]\n replaced = error\n replacer = correct\n break\n\n if word == candidate:\n return \"None\", '', '', '', ''\n if edit[0]:\n return EDIT_TYPE_INSERTION, correct, error, replaced, replacer\n elif edit[1]:\n return EDIT_TYPE_DELETION, correct, error, replaced, replacer\n elif edit[2]:\n return EDIT_TYPE_SUBSTITUTION, correct, error, replaced, replacer\n elif edit[3]:\n return EDIT_TYPE_TRANSPOSITION, correct, error, replaced, replacer", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def text_editor():\n return True", "def task_format_check():\n return {\"actions\": [[\"black\", HERE, \"--check\"]], \"verbosity\": 1}", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def stepText2Changed(build, step, text2):", "def retrieve_input():\r\n inputValue = simpleText.get(\"1.0\",\"end-1c\") #Our Variable\r\n #\"1.0\" = start from first character in the text widget\r\n #\"end-1c = delete the last character that Text creates every time\"\r\n return inputValue", "def on_idEdit_textChanged(self, txt):\n self.__generateDefaultCommitMessage()\n self.__updateOK()", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def element_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier))\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def reformat():\n toolkit.reformat()", "def process_raw_text(self, file_name, column_side):\n self.mvc_check()\n\n model_txt = None\n if column_side == LEFT_TEXT:\n model_txt = self.model.txt1\n elif column_side == RIGHT_TEXT:\n model_txt = self.model.txt2\n\n model_txt.open_raw(file_name)\n model_txt.process_raw()\n self.opened_txt[column_side] = True\n self.can_align = self.opened_txt[LEFT_TEXT] and self.opened_txt[RIGHT_TEXT]\n\n # Goldsmith\n model_txt.make_trie(column_side)\n model_txt.apply_goldsmith(1.1, 20, column_side)\n\n # Associate word for alignment if both text were opened\n if self.can_align:\n for view in self.views:\n view.end_task()\n view.change_task(\"Associating words\")\n self.model.associate_words(1.5)\n for view in self.views:\n view.end_task()\n\n # TODO : coherent saving to database using model.save_data\n\n return model_txt.str", "def handleFormatText(paragraphContent):\n # We tokenize and remove the stop word\n words = tokenizeWord(paragraphContent) \n \n stemWords = []\n # We loop on each word.\n for word in words:\n stemWord = STEMMER.stem(word)\n \n # Selection on a part of string.\n stemWord = re.sub(\"[*\\'\\.+:,\\`:/]\", '', stemWord)\n if stemWord.isdigit() or len(stemWord) < 2:\n continue\n \n stemWords.append(stemWord)\n my_r_string = stemWords.pop(0)\n for word in stemWords:\n my_r_string += \" \"+str(word)\n return my_r_string", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def get_text(self):\n\n if self.text: return self.text\n # retrieve from args and return if exists\n text = Settings.get_text() or None\n if text: \n self.text = text\n return text\n # prompt skip\n if not Settings.prompt(\"text\"): return None\n question = {\n 'type': 'input',\n 'name': 'text',\n 'message': 'Text:'\n }\n text = prompt(question)[\"text\"]\n # confirm text\n if not Settings.confirm(text): return self.get_text()\n self.text = text\n return self.text", "def read_plain_txt(input_fn: str) -> Tuple[List[str], List[str]]:\n\n with open(input_fn, 'r') as f:\n migrations = []\n queries = []\n mode = 'none'\n for line in f:\n stripped = line.strip()\n if len(stripped) == 0:\n continue\n if stripped.lower() == '== migrations':\n if mode != 'none':\n raise ValueError(f'Invalid {input_fn}: The migrations section should appear first.')\n mode = 'migrations'\n elif stripped.lower() == '== queries':\n if mode != 'migrations':\n raise ValueError(f'Invalid {input_fn}: The queries section should appear after the migrations section.')\n mode = 'queries'\n elif stripped[0] == '#':\n pass\n else:\n if mode == 'migrations':\n migrations.append(stripped)\n elif mode == 'queries':\n queries.append(stripped)\n else:\n pass\n return migrations, queries", "def on_lineEdit_textChanged(self, p0):\n # str_me = \"我爱我的祖国\"\n # self.lineEdit.setText(str_me) # 设置单行文本内容\n input_text = self.lineEdit.text()\n self.textEdit.setPlainText(input_text)\n # self.textEdit.setHtml(input_text) # 显示Html,如 <font color='red' size='20'>HELLO!</font>\n a = self.textEdit.toPlainText()\n print(a)", "def post_process_text(self, text):\n\t\treturn text", "def text(self) -> str:", "def editText(self, text, jumpIndex=None, highlight=None):\n try:\n import gui\n except ImportError, e:\n print 'Could not load GUI modules: %s' % e\n return text\n editor = gui.EditBoxWindow()\n return editor.edit(text, jumpIndex=jumpIndex, highlight=highlight)", "def alter_text_format(self):\n service = self.slides_service\n requests = [\n {\n 'updateParagraphStyle': {\n 'objectId': self.song_numbers_box_id,\n 'style': {\n 'alignment': 'CENTER'\n },\n 'fields': 'alignment'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.song_numbers_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.TITLE_FONT_SIZE, # numbers slightly larger than lyrics\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.left_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.FONT_SIZE,\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.right_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.FONT_SIZE,\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n }\n ]\n body = {\n 'requests': requests\n }\n response = service.presentations().batchUpdate(presentationId=self.presentation_id, body=body).execute()\n print(f'Updated the text style for shape with ID: {self.left_box_id}')\n return response", "def T(value):\n return (value if isinstance(value, basestring) else\n str(value) if isinstance(value, AltText) else \"\")", "def as_tuple(self, selected: bool) -> tuple:\n if self.check_ans:\n current = self.num.ljust(2) + self.answer\n else:\n current = self.num.ljust(2)+self.current.rjust(1)\n return ((\n \"fg:\"+self.style_fg + \" bg:\" # apply foreground color\n + (self.style_bg_selected if selected else self.style_bg),\n current\n if self.answer != \".\" else \"▐\" + \"█\" + \"▌\"), ('bg:#fefefe fg:#000', \"│\"))", "def edit(self,edits):\n\t\tself.alphanumeric=edits['alphanumeric'] if 'alphanumeric' in edits else None\n\t\tself.alphanumeric_color = edits['alphanumeric_color'] if 'alphanumeric_color' in edits else None\n\t\tif self.alphanumeric_color ==\"grey\":\n\t\t\tself.alphanumeric_color = \"gray\"\n\t\tself.background_color = edits['background_color'] if 'background_color' in edits else None\n\t\tif self.background_color == \"grey\":\n\t\t\tself.background_color = \"gray\";\n\t\tshapeChoices = dict((x,y) for x,y in Target.SHAPE_CHOICES)\n\t\tself.shape = str(shapeChoices[edits['shape']]) if 'shape' in edits else None\n\t\tself.orientation = edits['orientation'] if 'orientation' in edits else None\n\t\tself.ptype = edits['ptype']\n\t\tself.description = edits['description'] if 'description' in edits else None\n\t\tself.save()", "def label_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),label=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def displayText():\n global entryWidget,entryWidget1,entryWidget2,entryWidget3,entryWidget4 ,entryWidget5,entryWidget6\n global thefilename,itrial,do_stim, delaylen,ntest_arms,stop_if_error,timeout_arm_sec\n thefilename=entryWidget.get().strip()\n itrial=entryWidget1.get().strip()\n do_stim=entryWidget2.get().strip()\n delaylen=entryWidget3.get().strip()\n ntest_arms=entryWidget4.get().strip()\n stop_if_error=int(entryWidget5.get().strip())==1 # convert to logical\n print 'stop_if_error is ', stop_if_error\n\n\n timeout_arm_sec=entryWidget6.get().strip()\n root.destroy()\n return thefilename,itrial,do_stim,delaylen,ntest_arms,stop_if_error,timeout_arm_sec" ]
[ "0.78716373", "0.76830506", "0.75691116", "0.75691116", "0.7379154", "0.73117137", "0.7183602", "0.7152062", "0.7089976", "0.6903923", "0.6863199", "0.68065554", "0.6748621", "0.6604557", "0.61224514", "0.6009547", "0.5690611", "0.5690611", "0.5690611", "0.5690611", "0.5690611", "0.5534457", "0.5529326", "0.55119324", "0.54897064", "0.54593766", "0.53941077", "0.53884834", "0.53541094", "0.5348279", "0.5336523", "0.53298044", "0.53044033", "0.53017735", "0.5284678", "0.52548796", "0.5231703", "0.52075195", "0.51657903", "0.5139631", "0.51269805", "0.51183087", "0.50954133", "0.5086037", "0.50556576", "0.50475675", "0.50413114", "0.5033974", "0.50320536", "0.50238174", "0.50172436", "0.501209", "0.5011348", "0.50095177", "0.499828", "0.49958882", "0.49862808", "0.49802482", "0.49685866", "0.49656975", "0.49588487", "0.4951691", "0.49488887", "0.49448055", "0.49138415", "0.49082175", "0.48921612", "0.48836753", "0.48688877", "0.48642147", "0.48558703", "0.48427588", "0.48402458", "0.48379573", "0.48347312", "0.4829869", "0.48117617", "0.48040468", "0.48027003", "0.47989967", "0.47953638", "0.47919485", "0.47787616", "0.47736892", "0.47728088", "0.47708187", "0.4769437", "0.4768398", "0.47677627", "0.47633177", "0.47631097", "0.4755773", "0.47515184", "0.4750719", "0.47494507", "0.47457764", "0.47452554", "0.4735827", "0.47239852", "0.47187877" ]
0.62711895
14
Return initial stored value for new nodes
def getInitDefault(self): return self.initDefault
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_new_value(self):\r\n if self.initial_value is None:\r\n return None\r\n\r\n return deepcopy(self.initial_value)", "def initial_value(self):\n return self._initial_value", "def initial(self):\n return zero", "def initial_value(self) -> float:\n return self._initial_value", "def get(self, node):\n if node in self.val:\n return self.val[node]\n else:\n return self.initial", "def _node_defaults(self):\n parent = super(QTree, self)._node_defaults()\n parent[\"state\"] = np.zeros([self.size, self.size])\n parent[\"network\"] = self\n return parent", "def lazy_value(self):\n\n if self.state == Node.State.VALID:\n return self.value\n else:\n return None", "def _set_default_node(self, key):\n if key not in self._key_to_node_index:\n self._key_to_node_index[key] = self._graph.add_node(NodeData(key=key, equivs=[]))\n return self._key_to_node_index[key]", "def __init__(self):\n self.root = self.get_new_node();", "def generate_initial_state(self, x):\n\n if self.initial_state is None:\n x[:] = 0\n return x\n else:\n x[:] = self.initial_state(size=(self._num_neurons, 1))\n return x", "def getInitialValue(self):\n return _libsbml.Trigger_getInitialValue(self)", "def value(self):\n\n return deepcopy(self._node_id)", "def initial_nodes(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"initial_nodes\")", "def refresh(self):\n self._pre_action_check('refresh')\n if hasattr(self, '_id'):\n node = self.inflate(self.cypher(\"START n=node({self}) RETURN n\")[0][0][0])\n for key, val in node.__properties__.items():\n setattr(self, key, val)\n else:\n raise ValueError(\"Can't refresh unsaved node\")", "def initial_state(self):\n return 0", "def get_initial(self):\n\t\treturn self.initial", "def calculate_gn_value(self, current_path_length) :\r\n\r\n self.gn_value = (current_path_length) #The g(n) value is the distance of the path if the node is traversed\r", "def getValue(self):\n return self.initValue", "def __init__(self, value: object) -> None:\n self.value = value # to store node's data\n self.left = None # pointer to root of left subtree\n self.right = None # pointer to root of right subtree", "def __init__(self, value: object) -> None:\n self.value = value # to store node's data\n self.left = None # pointer to root of left subtree\n self.right = None # pointer to root of right subtree", "def __init__(self):\n\n\t\tself.root = None\n\t\tself.numNodes = 0", "def get_initial(self):\n return self.initial[:]", "def value(self):\n return self.node_value", "def __init__(self, data: str):\n self.root = Node(data)\n self.node_count = 1\n self.node_of_last_computed_hash = 0", "def _default_value(self):\n raise NotImplementedError", "def __init__(self, value, parent = None):\n # initialize new node\n self.value = value\n self.parent = parent\n self.left = None\n self.right = None\n self.height = 1", "def initial_state(self):\n return None", "def __init__(self):\n self.node = None\n self.data = None", "def init_id(root: TreeNode):\n current_id = [0]\n init_id_helper(root, current_id)\n return current_id[0]", "def default_value(self):\n if self.default:\n return copy.deepcopy(self.default)\n else:\n return None", "def initialize(self):\n self._value = self.initializer.evaluate(self)\n return self.value()", "def __init__(self, root_value):\n self.root = self.TreeNode(value=root_value)", "def value(self):\n self.refresh_default_value()\n return self.default_value", "def get_initial_state(self):\n return self.get_state(self.get_initial_observation())", "def __init__(self):\n self.start = Node('-1')", "def _default_value(self):\n return None", "def get_default_value(self):\n pass", "def get_initial_value(\n self, rel_name):\n return self._np_initval[rel_name].transpose()", "def initial(self) -> np.ndarray:\n return self._dist['initial']", "def __init__(self, val=None):\r\n self.root = {}", "def get_starting_node(self, graph):\n return random.choice(list(graph.nodes))", "def default(self):\r\n return self.default_value()", "def __init__(self):\n self.root = RadixTreeNode()\n self.root.key = \"\"\n self.size = 0", "def __init__(self):\n self.end_of_ngram = False #Flag marking whether this node is the end of an n-gram.\n self.value = None #Provided that the node marks the end of an n-gram, this refers to the value mapped by this n-gram.\n self.children = dict() #A dictionary which maps the next elements in the current path of the prefix tree to the respective node of the tree.", "def initialize_node(db, c):\n\n # have we already called this function?\n if saq.SAQ_NODE_ID is not None:\n return\n\n saq.SAQ_NODE_ID = None\n\n # we always default to a local node so that it doesn't get used by remote nodes automatically\n c.execute(\"SELECT id FROM nodes WHERE name = %s\", (saq.SAQ_NODE,))\n row = c.fetchone()\n if row is not None:\n saq.SAQ_NODE_ID = row[0]\n logging.debug(\"got existing node id {} for {}\".format(saq.SAQ_NODE_ID, saq.SAQ_NODE))\n\n if saq.SAQ_NODE_ID is None:\n execute_with_retry(db, c, \"\"\"INSERT INTO nodes ( name, location, company_id, is_local, last_update ) \n VALUES ( %s, %s, %s, %s, NOW() )\"\"\", \n (saq.SAQ_NODE, saq.API_PREFIX, saq.COMPANY_ID, True),\n commit=True)\n\n c.execute(\"SELECT id FROM nodes WHERE name = %s\", (saq.SAQ_NODE,))\n row = c.fetchone()\n if row is None:\n logging.critical(\"unable to allocate a node_id from the database\")\n sys.exit(1)\n else:\n saq.SAQ_NODE_ID = row[0]\n logging.info(\"allocated node id {} for {}\".format(saq.SAQ_NODE_ID, saq.SAQ_NODE))", "def get_first(self) -> object:\n if self.root is None: # If tree is empty\n return None\n\n return self.root.value # Returning root value", "def __init__(self, initial_state):\n self.initial_state = initial_state\n self.final_state = [1, 2, 3, 8, 0, 4, 7, 6, 5]\n self.nodes = {}\n self.add_node(self.initial_state)\n self.add_node(self.final_state)\n self.results = []", "def compute_default(self):\n if self.default is None and callable(self.compute_default_fn):\n self.default=self.compute_default_fn() \n if self.default not in self.objects:\n self.objects.append(self.default)", "def __init__(self):\n self._idx = Node.index\n Node.index += 1", "def initial_state(self, parameters = None):\n if parameters is None:\n parameters = self._get_static_parameters_or_die()\n return Value(\n state=ed.Categorical(logits=parameters.get('initial_dist_logits')))", "def initialstate(self):\n return self.problem.initialstate", "def zero_val(self):\r\n self.piDD = {\"[0]\": None}\r\n self.top_node = \"[0]\"\r\n self.dim = 0", "def __init__(self):\n self.root = None\n self.k = None", "def __init__(self):\n self.root = None\n self.k = None", "def _fill_root(self):\n if self.parent in filled_variables:\n return f\"{self.name} {st_persistent_perc}P {st_k}k\"\n return self.name", "def get_initial(self):\n return self.initial", "def _new_node(self):\n self._size += 1\n return self._node_factory()", "def initial_state(self):\n # Network details elided.\n initial_state = None\n\n return initial_state", "def __init__(self, value, prev=None, next=None):\n\n self.prev = prev # the node before this one — defaults to None\n self.value = value # the value to store\n self.next = next # the node after this one — defaults to None", "def initial(self):\n\n self.var.Kfrost = loadmap('Kfrost')\n self.var.Afrost = loadmap('Afrost')\n self.var.FrostIndexThreshold = loadmap('FrostIndexThreshold')\n self.var.SnowWaterEquivalent = loadmap('SnowWaterEquivalent')\n\n # FrostIndexInit=ifthen(defined(self.var.MaskMap),scalar(loadmap('FrostIndexInitValue')))\n # self.var.FrostIndex=FrostIndexInit\n self.var.FrostIndex = loadmap('FrostIndexInitValue')\n # self.var.AfrostIndex=-(1-self.var.Afrost)*self.var.FrostIndex\n # initial Frost Index value", "def getRandom(self) -> int:\n\n return random.choice(self.nodes).val", "def __init__(self):\n self.left = None\n self.right = None\n self.depth = 0\n self.val = None\n self.id = None", "def DefaultValue(self):\n return tf.zeros(self.shape, dtype=self.dtype)", "def DefaultValue(self):\n return tf.zeros(self.shape, dtype=self.dtype)", "def __init__(self, initial_node):\n self.__nodes = MinPriorityQueue({initial_node : initial_node.estimate})", "def __init__(self):\n self.sum_of_node_inputs = 0\n self.output = 0\n self.delta = 0\n self.dp = 0\n self.onehot_label = 0", "def _getDefaultValue(self):\n value = self._getDefaultValue()\n return value.getData() if value else None", "def test_find_highest_value_node_first(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 2, 2, 2)\n nn.eta = 0.1\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n nn.layers[3].nodes[0].weights = [1.0, 1.0]\n nn.layers[3].nodes[1].weights = [0.0, 0.0]\n\n val = nn.assign_output([2, 3], test=True)\n self.assertEqual(val, '10')", "def get_value(self) -> T:\n return clone(self.default_value)", "def get_value(self):\n if not self.visited:\n # first visit at node\n self.visited = True\n\n # value calculation\n for node, weight in self.predecessors:\n self.value += (node.get_value() * weight)\n\n # applying activation function\n if self.activation is not None:\n self.activation()\n\n self.calculated = True\n\n return self.value\n else:\n # visited node\n if self.calculated:\n # calculated in this computation\n return self.value\n else:\n # recurrent connection\n return self.past_value", "def first_value(self):\n return 0", "def _assign_init(self, first_item):\r\n if hasattr(self.scalar_op, 'identity'):\r\n return str(self.scalar_op.identity)\r\n else:\r\n assert isinstance(self.scalar_op, (scal.Maximum,\r\n scal.Minimum))\r\n return first_item", "def _assign_init(self, first_item):\r\n if hasattr(self.scalar_op, 'identity'):\r\n return str(self.scalar_op.identity)\r\n else:\r\n assert isinstance(self.scalar_op, (scal.Maximum,\r\n scal.Minimum))\r\n return first_item", "def __call__(self):\n value = self._value\n if value is None:\n value = self._init()\n self._value = value\n return value", "def state_initial(self):\n return self.states_initial()[0]", "def __init__(self):\n self.idx = None\n self.val = None\n self.left = None\n self.right = None", "def get_value(self):\r\n return 0", "def mutate(self, node, _):\n new_node = ast.Num(n=node.n + 1)\n return new_node", "def root_value(self):\n return self.__root.get_value()", "def initial(self):\n return self.args[3]", "def __init__(self):\n self.number = None\n self.nodes = []\n self.type = None\n self.group = None\n self.material = None\n self.key = -1", "def __init__(self):\n\n self.nodes = {}", "def identity(self):\r\n self.piDD = {\"[1]\": None}\r\n self.top_node = \"[1]\"\r\n self.dim = 0", "def get_first(self) -> object:\n #binary search tree == empty\n if self.root is None:\n return None\n\n # return\n return self.root.value", "def starting_point(self, random=False):\n sqrt_C = sqrtm(self.covariance)\n sqrt_L = np.sqrt(self.mean_intensity)\n if random:\n random_matrix = np.random.rand(self.n_nodes, self.n_nodes)\n M, _ = qr(random_matrix)\n else:\n M = np.eye(self.n_nodes)\n initial = np.dot(np.dot(sqrt_C, M), np.diag(1. / sqrt_L))\n return initial", "def __init__(self):\n self.root = Node(None)", "def setInitialValue(self, *args):\n return _libsbml.Trigger_setInitialValue(self, *args)", "def __init__(self, abstract_value=None, representation=None, index=None):\n\n if len(list(filter(None, [abstract_value, representation, index]))) != 1:\n raise ValueError('Expected exactly one initial value')\n\n if index is not None:\n self.index = index\n self._abstract_value = None\n else:\n self.index = batch.add_rows('vals_'+node_type.id, tf.zeros([1, node_type.value_type.representation_shape]))[0]\n node_type.value_type.__init__(abstract_value=abstract_value, representation=representation)\n del self._representation", "def __init__(self):\n self.val = None", "def __init__(self):\n self.root = Node('')", "def rec_default(self):\n pass", "def __init__(self):\n super().__init__()\n self._value = 0", "def default_value(self) -> float:\n return pulumi.get(self, \"default_value\")", "def getDefault():", "def prepare_node(self, node):\n # Every change at the position of node will be recognized\n aexpr(lambda: node.position, globals(), locals())\\\n .on_change(lambda obs, oldv, newv: self.set_node_position(node, *newv))", "def value(self):\n\n if self.state == Node.State.VALID:\n return self._value\n else:\n with _NodeStackFrame(self):\n self.state = Node.State.PENDING\n self.value = self.compute_value(*self.args, **self.kwargs)\n return self._value", "def fillNode(node, grounding, db):\n gn = copy.deepcopy(node)\n gn.val = query(gn, grounding, db)\n return gn", "def _init_node_attributes(self):\n assert False", "def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None", "def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None", "def _update_min(self):\n tmp = self\n while tmp.left is not None:\n tmp = tmp.left\n return tmp.parent.key" ]
[ "0.6917573", "0.6599723", "0.64693916", "0.62434894", "0.6088791", "0.605029", "0.6040265", "0.5898664", "0.58754563", "0.5820543", "0.5805469", "0.5794411", "0.57890224", "0.57560384", "0.5747938", "0.57314616", "0.5730434", "0.5725593", "0.57140493", "0.57140493", "0.56994367", "0.56713814", "0.5666693", "0.56612456", "0.5652481", "0.5645944", "0.56458586", "0.5624305", "0.56189084", "0.5617808", "0.56137997", "0.5607398", "0.5596775", "0.55964184", "0.5596218", "0.55950594", "0.558086", "0.55720526", "0.5569308", "0.55512476", "0.55413604", "0.5540463", "0.5531693", "0.5495316", "0.5485417", "0.5478051", "0.54755235", "0.5470715", "0.5461766", "0.546034", "0.5433562", "0.54317236", "0.54287916", "0.54287916", "0.54279566", "0.54207116", "0.54133886", "0.5410908", "0.5410448", "0.54101855", "0.540541", "0.5404037", "0.5400453", "0.5400453", "0.5394933", "0.5392656", "0.53907937", "0.538711", "0.5385489", "0.5383486", "0.53627026", "0.5347435", "0.5347435", "0.5345404", "0.5340262", "0.532986", "0.5327101", "0.53193027", "0.5317672", "0.5315903", "0.5313174", "0.5310825", "0.53099066", "0.5309453", "0.53000796", "0.5295013", "0.52859294", "0.5282753", "0.52809393", "0.5268559", "0.5235831", "0.52314967", "0.5229555", "0.5224865", "0.52206385", "0.5219064", "0.521509", "0.5213956", "0.52090657", "0.52090657", "0.5209046" ]
0.0
-1
Set initial value from editor version using edit format option
def setInitDefault(self, editText): self.initDefault = self.storedText(editText)[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setInitDefault(self, editText):\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def getEditInitDefault(self):\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)", "def getEditInitDefault(self):\n return self.formatEditText(self.initDefault)[0]", "def getEditInitDefault(self):\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)", "def setInitDefault(self, editText):\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def on_editor_save(self):\n self.text = self.textWidget.get(\"1.0\", tk.END)", "def setModelData(self, editor, model, index):\n try:\n date = datetime.strptime(str(editor.text()), self.format)\n model.setData(index, date, Qt.EditRole)\n except:\n pass # If the text does not conform to the date format, do nothing.", "def on_edit_changed(self, edit):\n\t\tself.emit('value-changed', edit.get_text())", "def initFormat(self):\n pass", "def update_editor ( self ):\n font = self.factory.to_wx_font( self )\n try:\n self._facename.SetStringSelection( font.GetFaceName() )\n except:\n self._facename.SetSelection( 0 )\n try:\n self._point_size.SetStringSelection( str( font.GetPointSize() ) )\n except:\n self._point_size.SetSelection( 0 )\n font.SetPointSize( min( 10, font.GetPointSize() ) )\n self._font.SetValue( self.str_value )\n self._font.SetFont( font )", "def set_format(cls,format):\n import __main__\n IP = __main__.__dict__['__IP']\n prompt = getattr(IP.outputcache,cls._prompt)\n prompt.p_template = format\n prompt.set_p_str()\n cls._format = format", "def _set_settings_version(c, settings_path, version_line):\n version_const = \"VERSION\"\n\n print(f\"Adjusting {version_const} in {settings_path} to {version_line}...\")\n c.run(f'sed -i .orig \\'s/^{version_const} =.*$/{version_const} = \"{version_line}\"/\\' \"{settings_path}\"')", "def readVersion(self):\n ds = self.root.findall(\"[@format]\")[0]\n raw_format = ds.attrib['format']\n try:\n self.documentFormatVersion = int(raw_format)\n except ValueError:\n # as of fontTools >= 3.27 'format' is formatted as a float \"4.0\"\n self.documentFormatVersion = float(raw_format)", "def setValue(self,val):\n if self._plain:\n self.input.setPlainText(str(val))\n else:\n updateText(self.input,str(val))", "def defaultLoad (self):\n self.srcEditor.setText( \"\" )\n self.srcEditor.setFocus()\n self.setReadOnly( readOnly=False )", "def createEditor(self, parent, option, index):\n editor = QLineEdit(parent)\n date = index.model().data(index, Qt.DisplayRole)\n editor.setText(date.strftime(self.format))\n return editor", "def testSetEditorValue(self):\r\n \r\n lineEdit = QtGui.QLineEdit()\r\n self._editorFactory.setEditorValue(lineEdit, u\"Test\")\r\n self.assertTrue(lineEdit.text() == u\"Test\" )\r\n \r\n spinBox = QtGui.QDoubleSpinBox()\r\n self._editorFactory.setEditorValue(spinBox, 2.05)\r\n self.assertTrue(spinBox.value() == 2.05)\r\n \r\n checkBox = QtGui.QCheckBox()\r\n self._editorFactory.setEditorValue(checkBox, True)\r\n self.assertTrue(checkBox.isChecked() == True)", "def edit():", "def setValue(self,val):\n val = str(val)\n if self._plain:\n self.input.setText(val)\n else:\n updateText(self.input,val)", "def set_initial(self, value):\n # TODO: Make an Initial Stock Adjust here\n pass", "def setContentData(self, content):\n original = content\n if IVersionedObject.providedBy(original):\n content = original.get_editable()\n if content is None:\n self.widgetFactoryFactory = SMIDisplayWidgetFactory\n content = original.get_previewable()\n\n super(SMIEditForm, self).setContentData(content)", "def initFormat(self):\n self.html = True", "def initFormat(self):\n self.html = True", "def setValue(self,val):\n val = int(val)\n self.input.setText(str(val))", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def setEditorData(self, ledit, midx):\n cond = self._sel.give_cond(midx.row())\n val = cond[midx.column()]\n txt = \"\"\n if val is not None:\n txt = str(val)\n ledit.setText(txt)", "def setEditorData(self, ledit, midx):\n cond = self._sel.give_cond(midx.row())\n val = cond[midx.column()]\n txt = \"\"\n if val is not None:\n txt = str(val)\n ledit.setText(txt)", "def edition(self, key, value):\n return clean_val(\"a\", value, str).replace(\"ed.\", \"\")", "def on_widget_edited(self, value): # this is a slot\n # note this is exactly the same as @value.setter...\n self.value = value", "def update_format_string(self):\n if self._show_units:\n units = \" {}\".format(self._unit)\n else:\n units = \"\"\n\n if self._show_step_exponent:\n self.setSuffix(\"{0} Step: 1E{1}\".format(units, self.step_exponent))\n self.lineEdit().setToolTip(\"\")\n else:\n self.setSuffix(units)\n self.lineEdit().setToolTip('Step: 1E{0:+d}'.format(self.step_exponent))", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def __set__(self, instance, value):\n # make sure value follows \"major,minor,build\" convention\n if not is_version_valid(value):\n raise InvalidVersionFormat(\"Version: {0} is invalid\".format(value))\n\n super().__set__(instance, value)", "def setContentData(self, content):\n original = content\n if IVersionedObject.providedBy(original):\n content = original.get_editable()\n if content is None:\n self.widgetFactoryFactory = SMIDisplayWidgetFactory\n content = original.get_previewable()\n\n super(SMISubEditForm, self).setContentData(content)", "def initFormat(self):\n self.formatList = self.splitText(self.format)", "def opt_format(self, fmt):\n key = get_enum_key(fmt, FORMATTERS)\n if key is not None:\n self.conf[\"format\"] = key\n print(\"Set format %r\" % key)\n else:\n print(\"Unknown format %r\" % fmt)", "def setValue(self,val):\n self.input.setText(str(val))", "def special_case(self):\n Input.clear_display(self, self.entries[4])\n self.entries[4].insert(INSERT, '1712/02/30 was a real date in Sweden')\n self.entries[4].configure(state='readonly')", "def _set_real_format(self, fmt):\n # try to use the _nomax variant if available\n if not self._max and fmt + '_nomax' in self.formats:\n self._format = self.formats[fmt + '_nomax']\n elif fmt in self.formats:\n self._format = self.formats[fmt]\n else:\n self._format = fmt\n\n self._format_line_count = self._format.count('\\n')", "def updateeng(self):\n self.enstr = self.enEdit.text()", "def format_cell_updated(self, cell, value=None):\n self.is_not_used()\n if value is not None:\n cell.value = value\n\n cell.fill = PatternFill(start_color='7fffd4', end_color='7fffd4', fill_type='solid')\n cell.font = Font(name='Ubuntu', size=11, color='555555', bold=False, italic=False)", "def convert_format(self, new_format):\n if new_format not in [0, 1, 2, 3]:\n raise ValueError(\"Unknown format specified\")\n\n inp_format = new_format\n if inp_format == 3:\n new_format = 2\n\n for block in self.frd.blocks:\n if hasattr(block, 'format'):\n block.format = new_format\n\n self.frd.node_block.format = inp_format", "def dummy():\n\t\t\tself.edit = True", "def setValue(self,val):\n val = float(val)\n self.input.setText(str(val))", "def entry_a_modified(self, content):\n if content.isdigit():\n self.model.number_a = int(content)\n self.show_calculations()", "def _init_edit(self):\n def edit(core, args):\n month = ' '.join(getattr(args, 'month', []))\n core.edit(month)\n\n usage = 'stl edit [month]'\n desc = (\n 'lets you vim the right file'\n )\n\n subp = self.subparsers.add_parser(\n 'edit', usage=usage, description=desc, help=desc)\n\n subp.add_argument(\n 'month', nargs=argparse.REMAINDER,\n help='the month you want to edit, e.g. oct 2016')\n\n subp.set_defaults(func=edit)", "def set_version(self, bundle, ctx, filename, version):", "def __init__(self, value: str):\n self.options = [\n \"v1.0\"\n ]", "def change_exteditor(self):\r\n path, valid = QInputDialog.getText(self, self.tr('External editor'),\r\n self.tr('External editor executable path:'),\r\n QLineEdit.Normal,\r\n CONF.get(self.ID, 'external_editor/path'))\r\n if valid:\r\n CONF.set(self.ID, 'external_editor/path', unicode(path))", "def set_version(v):\n old = get_version()\n sys.stderr.write('%s -> %s\\n' % (old, v))\n with open(INIT, 'r+') as f:\n text = f.read()\n text = pattern.sub(\"__version__ = %r\" % v, text)\n f.seek(0)\n f.truncate()\n f.write(text)", "def _init_obo_version(self, line):\n if line[0:14] == \"format-version\":\n self.format_version = line[16:-1]\n if line[0:12] == \"data-version\":\n self.data_version = line[14:-1]", "def reformat():\n toolkit.reformat()", "def _on_changed(self, entry, index):\r\n\r\n from re import sub\r\n from decimal import Decimal\r\n\r\n if index == 5:\r\n _text = entry.get_text()\r\n _text = Decimal(sub(r'[^\\d.]', '', _text))\r\n elif index in [16, 17]:\r\n _text = int(entry.get_text())\r\n else:\r\n _text = float(entry.get_text())\r\n\r\n self._modulebook.update(index, _text)\r\n\r\n return False", "def edit_text(self, _, val):\n t_edit = text_editor.TextEditor(val, \"value\")\n t_edit.execute = self.execute", "def format(self):\n ...", "def _update_default(self, default_value):\n if self.type == \"uri_folder\" or self.type == \"uri_file\":\n self.default = default_value\n return\n else:\n if isinstance(default_value, float) and not math.isfinite(default_value):\n # Since nan/inf cannot be stored in the backend, just ignore them.\n # logger.warning(\"Float default value %r is not allowed, ignored.\" % default_value)\n return\n \"\"\"Update provided default values.\n Here we need to make sure the type of default value is allowed or it could be parsed..\n \"\"\"\n if default_value is not None and not isinstance(default_value, self._allowed_types):\n try:\n default_value = self._parse(default_value)\n except Exception as e:\n if self.name is None:\n msg = \"Default value of %s Input cannot be parsed, got '%s', type = %s.\" % (\n self.type,\n default_value,\n type(default_value),\n )\n else:\n msg = \"Default value of %s Input '%s' cannot be parsed, got '%s', type = %s.\" % (\n self.type,\n self.name,\n default_value,\n type(default_value),\n )\n raise MldesignerComponentDefiningError(cause=msg) from e\n self.default = default_value", "def edit_date(entry):\n entry.date = get_date()\n entry.save()\n input(\"Edit successful. \")\n return entry", "def setDefaultValue(self, value: int, extend_range: bool=False):\n self.setPlaceholderText(str(self.__validate(value, extend_range)))\n if self.getCurrentValue() is None:\n self.__commitValue()", "def _update_editor(self):\n root = self.model.data_list\n root.append(RowModel(name='', value=''))\n del root[-1]", "def set_field_value(index, value):\r\n elem = world.css_find('.metadata_edit div.wrapper-comp-setting input.setting-input')[index]\r\n elem.value = value\r\n elem.type(Keys.TAB)", "def edit(self, new_content: str) -> None:\n\n # YOUR CODE HERE\n self.content = new_content", "def setField(self, data):\n\t\tview = self.view\n\t\tview.sbAbstraccion.setValue(data['sbAbstraccion'])", "def update_column_format(self):\n pass", "def rec_default(self):\n self.phase_triggers.setText('(0,1,320)')\n self.phase_min.setText('-1.57')\n self.phase_max.setText('1.57')", "def set_statement_default_value(self, value):\n self.set_value_into_input_field(self.statement_default_value_textbox_locator, value)\n self.click_element(self.statement_fields_bulk_edit_popup_title_locator)", "def createEditor(self, parent, options, midx):\n ledit = qt.QLineEdit(parent)\n vmin, vmax = self._vrange\n dnb = self._decimals_nb\n ledit.setValidator(ValueValidator(vmin, vmax, dnb, ledit))\n return ledit", "def __init__(self, value: str):\n self.options = [\n \"m\",\n ]", "def setvalue(self,num,name,val):\n self.M.reconfigure(num,{name:float(val)})", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def with_default_format(self, other):\n return evolve(\n self,\n set_format=self.set_format or other,\n default_format=other\n )", "def _setEditorText(self, text):\n if self.__lineEditKind:\n self._editor.setText(text)\n else:\n self._editor.setEditText(text)\n if text and self._editor.findText(text) == -1:\n self._editor.insertItem(0, text)", "def updateText(widget,text,format=''):\n # autorecognition\n if format not in ['plain','html','rest']:\n if type(text) is str and text.startswith('..'):\n format = 'rest'\n\n # conversion\n if format == 'rest' and pf.options.rst2html:\n html = utils.rst2html(text)\n if html[:10] == text[:10]:\n #print \"CONVERSION TO HTML FAILED\"\n text += \"\\n\\nNote: This reStructuredText is displayed as plain text because it could not be converted to html. If you install python-docutils, you will see this text (and other pyFormex messages) in a much nicer layout!\\n\"\n else:\n text = html\n\n # We leave the format undefined, because we are not sure\n # that the conversion function (docutils) is available\n # and always produces good results\n format = ''\n\n if format == 'plain':\n widget.setPlainText(text)\n elif format == 'html':\n widget.setHtml(text)\n else:\n # As a last rescue, try QT4's autorecognition\n widget.setText(text)", "def set_default_mode(args):\n default_repr = parser_opts[args.inputFormat].default_representation\n if not args.representation:\n args.representation = default_repr\n if args.representation != default_repr:\n log.info(\"Will convert from %s -> %s representation\", default_repr, args.representation)\n else:\n log.info(\"Using default %s particle representation\", args.representation)", "def set_edits(self):\n self._window.input_line.setPlaceholderText('Input item to import')\n self._window.output_text.setPlaceholderText('Import Item')", "def reset(self):\n self.setPlainText(self.label)\n self.setEditable(False)\n if (len(str(self.label)) > 0):\n self.setTextWidth(-1)\n else:\n self.setTextWidth(CurrentTheme.VERSION_LABEL_MARGIN[0])\n \n if self.isTag:\n self.setFont(CurrentTheme.VERSION_FONT)\n else:\n self.setFont(CurrentTheme.VERSION_DESCRIPTION_FONT) \n self.updatePos()\n self.parentItem().updateWidthFromLabel()", "def initFormat(self):\n self.formatList = []", "def update(self, instance: Snippet, validated_data: dict) -> Snippet:\n instance.title = validated_data.get('title', default=instance.title)\n instance.code = validated_data.get('code', default=instance.code)\n instance.language = validated_data.get('language', default=instance.language)\n instance.style = validated_data.get('style', default=instance.style)\n instance.save()\n return instance", "def entry_b_modified(self, content):\n if content.isdigit():\n self.model.number_b = int(content)\n self.show_calculations()", "def setCurrentValue(self, value: int, extend_range: bool=False):\n self.setText(str(self.__validate(value, extend_range)))\n self.__commitValue()", "def asformat(self, format):", "def __init__(self, value: str):\n self.options = [\n \"mg.min.m-3\",\n \"kg.s.m-3\"\n ]", "def str_entered(self, tf, name):\n section, option = name\n text = tf.text\n _stash.config.set(section, option, text)\n self.save()", "def getInitDefault(self):\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)", "def __init__(self, value: str):\n self.options = [\n \"m3.s-1\",\n ]", "def __editorConfigChanged(self, editor):\n fn = editor.getFileName()\n line, pos = editor.getCursorPosition()\n enc = editor.getEncoding()\n lang = editor.getLanguage()\n eol = editor.getEolIndicator()\n zoom = editor.getZoom()\n self.__setSbFile(\n fn, line + 1, pos, encoding=enc, language=lang, eol=eol, zoom=zoom)\n self._checkActions(editor, False)", "def get_initial(self):\n\t\n\t#Getting the initial data and setting it\n initial = super(UpdateView, self).get_initial()\n\timage_ref = default_value.get_setting('compute', 'image_ref') \n flavor_ref = default_value.get_setting('compute', 'flavor_ref')\n initial.update({'test_id': self.kwargs['test_id'], 'image_ref': image_ref, 'flavor_ref': flavor_ref})\n return initial", "def set_modified(self, value):\n self.modified = value\n self.save_button.setEnabled(value)", "def set_modified(self, value):\n self.modified = value\n self.save_button.setEnabled(value)", "def DoEdit(self,event):\r\n raise UncodedError", "def rec_default(self):\n self.new_func_triggers.setText('(0,5)')\n self.new_param.setText('1')", "def setValue(self, value):\n self.setText(str(value))", "def __init__(self, value: str):\n self.options = [\n \"kg.m-3\"\n ]", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def edit(self):\n _, tmp = tempfile.mkstemp()\n with open(tmp, 'w') as f:\n f.write(\"\".join([x + self.newline for x in self.buffer]))\n cledit = os.getenv('EDITOR') or 'vi'\n p = subprocess.Popen([cledit, tmp])\n p.wait()\n buffer = editor.contents(tmp)\n if not buffer:\n return\n else:\n self.buffer = buffer", "def format_(self):\n return self.set_format or self.default_format or self.FALLBACK_FORMAT", "def get_model_format_version(self):\n return None if self.model is None else self.model.get_format_version()", "def edit(self):\n\n pass", "def set_value_to_default(self):\n self.setValue(self.default_value)", "def run(self, edit, text):\n\n self.view.replace(edit, sublime.Region(0, self.view.size()), text)", "def testGetValueFromEditor(self):\r\n \r\n lineEdit = QtGui.QLineEdit()\r\n lineEdit.setText(QtCore.QString(u\"TestValue\"))\r\n self.assertEquals(self._editorFactory.getValueFromEditor(lineEdit), u\"TestValue\")\r\n \r\n lineEdit = QtGui.QLineEdit()\r\n lineEdit.setText(QtCore.QString(u\"\"))\r\n self.assertEquals(self._editorFactory.getValueFromEditor(lineEdit), None)\r\n \r\n spinBox = QtGui.QDoubleSpinBox()\r\n spinBox.setValue(23.04)\r\n self.assertEquals(self._editorFactory.getValueFromEditor(spinBox), 23.04)\r\n \r\n checkBox = QtGui.QCheckBox()\r\n checkBox.setChecked(True)\r\n self.assertTrue(self._editorFactory.getValueFromEditor(checkBox))\r\n \r\n comboBox = QtGui.QComboBox()\r\n comboBox.addItems([u\"test1\"])\r\n self.assertEquals(self._editorFactory.getValueFromEditor(comboBox), u\"test1\")\r\n \r\n listEditor = ListEditor(dict(), self._editorFactory, [\"test\"])\r\n self.assertEquals(self._editorFactory.getValueFromEditor(listEditor), [\"test\"])\r\n \r\n listEditor = ListEditor(dict(), self._editorFactory)\r\n self.assertEquals(self._editorFactory.getValueFromEditor(listEditor), list())", "def update_editor ( self ):\n super( SimpleFontEditor, self ).update_editor()\n set_font( self )" ]
[ "0.62972623", "0.62713385", "0.62475604", "0.60933644", "0.6084278", "0.60778177", "0.6009081", "0.5782725", "0.561945", "0.5530775", "0.54974884", "0.549157", "0.54463965", "0.54128057", "0.5395345", "0.5382154", "0.53760034", "0.5361667", "0.53412473", "0.52998084", "0.5289389", "0.52869236", "0.52869236", "0.5259486", "0.5258819", "0.52222645", "0.52222645", "0.5215186", "0.5185209", "0.51613635", "0.5159629", "0.5156949", "0.51537293", "0.5149537", "0.51475304", "0.5142282", "0.51360196", "0.5129256", "0.51132214", "0.5093449", "0.50853586", "0.50847447", "0.50830996", "0.5079649", "0.5073335", "0.5070425", "0.5065359", "0.50636923", "0.505949", "0.5054124", "0.5048021", "0.50445235", "0.50234437", "0.5021516", "0.5009924", "0.50038314", "0.49990717", "0.49904346", "0.49880743", "0.49787438", "0.49762034", "0.49664184", "0.49618316", "0.49618265", "0.4934095", "0.492767", "0.49204952", "0.49121454", "0.49046645", "0.48975104", "0.48964268", "0.48951375", "0.48920292", "0.4882344", "0.48813882", "0.4873522", "0.48650837", "0.48650536", "0.48596686", "0.48518443", "0.48500192", "0.48462826", "0.48410955", "0.48317665", "0.48291975", "0.48284602", "0.48284602", "0.4828238", "0.48231953", "0.48153916", "0.48150164", "0.48150015", "0.4812036", "0.4806887", "0.4806189", "0.4804705", "0.47982624", "0.47940466", "0.47935316", "0.47929528" ]
0.6216652
3
Return initial value in edit format, found in edit format option
def getEditInitDefault(self): return self.formatEditText(self.initDefault)[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEditInitDefault(self):\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)", "def getEditInitDefault(self):\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def setInitDefault(self, editText):\n self.initDefault = self.storedText(editText)[0]", "def setInitDefault(self, editText):\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def setInitDefault(self, editText):\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def on_edit_changed(self, edit):\n\t\tself.emit('value-changed', edit.get_text())", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def retrieve_input():\r\n inputValue = simpleText.get(\"1.0\",\"end-1c\") #Our Variable\r\n #\"1.0\" = start from first character in the text widget\r\n #\"end-1c = delete the last character that Text creates every time\"\r\n return inputValue", "def get_initial(self):\n\t\treturn self.initial", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def get_val_str(self):\n fmt_str = self.template.get_format_str()\n if self.val_obj is None:\n return \"\"\n elif fmt_str:\n return fmt_str % (self.val_obj.val)\n else:\n return str(self.val_obj.val)", "def get_opt_formatted(self, command):\n if \"formatted\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"formatted\"]\n else:\n return CommandDict.DEFAULT_OPT_FORMATTED", "def formatsrc(self):\n return self[\"formatsrc\"]", "def formatsrc(self):\n return self[\"formatsrc\"]", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def get_initial(self):\n return self.initial", "def getInitDefault(self):\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)", "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def format(self):\n return self.getparam(\"FORMAT\")", "def format(self):\n return self.getparam(\"FORMAT\")", "def _getAlterToFormat(cls, alter):\n if alter == '':\n alter = ['', '']\n if isinstance(alter, str): # nothing to do if it is dict\n alter = ['', alter]\n return alter", "def value(self):\n return str(self.input.currentText())", "def presentation(self, value):\r\n return value", "def default_formatter(self, data):\n return data", "def get_format(self):\n return self._format[0]", "def initial_value(self):\n return self._initial_value", "def get_value_display(self):\r\n if self.display_as == 'percentage':\r\n return '{0}%'.format(self.latest_value)\r\n if self.display_as == 'boolean':\r\n return bool(self.latest_value)\r\n if self.display_as == 'byte':\r\n return defaultfilters.filesizeformat(self.latest_value)\r\n if self.display_as == 'second':\r\n return time.strftime('%H:%M:%S', time.gmtime(self.latest_value))\r\n return self.latest_value", "def edition(self, key, value):\n return clean_val(\"a\", value, str).replace(\"ed.\", \"\")", "def format_(self):\n return self.set_format or self.default_format or self.FALLBACK_FORMAT", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def get_format(self):\n pass", "def get_value ( self, object ):\n try:\n if self.format_func is not None:\n return self.format_func( self.get_raw_value( object ) )\n\n return self.format % ( self.get_raw_value( object ), )\n except:\n logger.exception( 'Error occurred trying to format a %s value' %\n self.__class__.__name__ )\n return 'Format!'", "def format(self) -> str:", "def int_format(self):\n ...", "def get_input_data(input_section: Dict) -> str:\n default_value = input_section.get(\"value\")\n if isinstance(default_value, str):\n return default_value\n\n if default_value:\n complex_field = default_value.get(\"complex\")\n if complex_field:\n if complex_field.get(\"accessor\"):\n return f\"{complex_field.get('root')}.{complex_field.get('accessor')}\"\n else:\n return f\"{complex_field.get('root')}\"\n return default_value.get(\"simple\")\n\n return \"\"", "def _get_field_edit_widget(self, row_index):\n field_row = self.field_rows[row_index]\n if not field_row.editable:\n raise TypeError(\"Cannot edit a boolean or dropdown field. (Internal error, tell the developer!)\")\n field_type = field_row.field_type\n field_value = self.get_field_dict(self.get_entry_id(self.active_row_index))[field_row.field_name]\n initial_text = repr(sorted(field_value)) if issubclass(field_type, list) else str(field_value)\n return self.Entry(\n field_row.value_box,\n initial_text=initial_text,\n integers_only=field_type == int,\n numbers_only=field_type == float,\n sticky=\"ew\",\n width=5,\n )", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def get_value ( self, object ):\n try:\n value = getattr( object, self.name )\n try:\n return self.format % ( value, )\n except:\n return 'Format!'\n except:\n return 'Undefined!'", "def value(self):\n self.refresh_default_value()\n return self.default_value", "def initial(self):\n return self.args[3]", "def asformat(self, format):", "def _format_default_value(self, default):\n return json.dumps(default)", "def getValue(self):\n return self.field.currentText()", "def format(self):\n return self._format", "def createEditor(self, parent, option, index):\n editor = QLineEdit(parent)\n date = index.model().data(index, Qt.DisplayRole)\n editor.setText(date.strftime(self.format))\n return editor", "def _getDefaultValue(self):\n value = self._getDefaultValue()\n return value.getData() if value else None", "def _getAlter(self):\n return self._getAlterToFormat(self.attr('alter'))", "def format( self ) :\n\n return( self.__format )", "def format(self):\n return self[\"format\"]", "def format(self):\n return self[\"format\"]", "def display_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"display_value\")", "def display_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"display_value\")", "def initFormat(self):\n pass", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def format(self) -> str:\n return pulumi.get(self, \"format\")", "def default_from(self):\n\n return \"\"", "def value_unformatted(self):\n return self._unformated_value", "def get_prep_value(self, value):\n\n try:\n return value.isoformat()\n except:\n pass\n\n # maybe value is a string containing a PartialDate?\n try:\n pd = string_to_partialdate(value)\n return pd.isoformat()\n except:\n return ''", "def default(self):\n\n return self._get_field(\"value\")", "def special_case(self):\n Input.clear_display(self, self.entries[4])\n self.entries[4].insert(INSERT, '1712/02/30 was a real date in Sweden')\n self.entries[4].configure(state='readonly')", "def format(self):\n ...", "def __str__(self):\n return '[{0}, {1}]'.format(self.timeValuePairs, self.defaultValue)", "def get_value( self, trans, grid, repository ):\n select_field = grids_util.build_changeset_revision_select_field( trans, repository, downloadable=False )\n if select_field.options:\n return select_field.options[ 0 ][ 0 ]\n return ''", "def format_field(model, name, value):\n if value is None: return value\n t = type( getattr(model,name) )\n if t == datetime:\n return value.replace('T',' ')\n return value", "def update_format_string(self):\n if self._show_units:\n units = \" {}\".format(self._unit)\n else:\n units = \"\"\n\n if self._show_step_exponent:\n self.setSuffix(\"{0} Step: 1E{1}\".format(units, self.step_exponent))\n self.lineEdit().setToolTip(\"\")\n else:\n self.setSuffix(units)\n self.lineEdit().setToolTip('Step: 1E{0:+d}'.format(self.step_exponent))", "def _get_FIELD_display(self, field):\n value = getattr(self, field.attname)\n if value is None:\n return\n template = ''\n template += '{:d}' if field.decimals == 0 else '{:.%sf}' % field.decimals\n template += ' ' if field.spaced_display else ''\n template += '{!s:s}'\n return template.format(value, field.unit)", "def get_value(self, key, args, kwargs):\n if self.default is not None:\n try:\n return string.Formatter.get_value(self, key, args, kwargs)\n except KeyError:\n return self.default\n else:\n return string.Formatter.get_value(self, key, args, kwargs)", "def get_value( self, trans, grid, repository ):\n # A repository's metadata revisions may not all be installable, as some may contain only invalid tools.\n select_field = grids_util.build_changeset_revision_select_field( trans, repository, downloadable=False )\n if len( select_field.options ) > 1:\n return select_field.get_html()\n elif len( select_field.options ) == 1:\n option_items = select_field.options[ 0 ][ 0 ]\n rev_label, rev_date = option_items.split( ' ' )\n rev_date = '<i><font color=\"#666666\">%s</font></i>' % rev_date\n return '%s %s' % ( rev_label, rev_date )\n return select_field.options[ 0 ][ 0 ]\n return ''", "def edit():", "def get_value( self, trans, grid, repository ):\n select_field = grids_util.build_changeset_revision_select_field( trans, repository, downloadable=True )\n if len( select_field.options ) > 1:\n return select_field.get_html()\n elif len( select_field.options ) == 1:\n return select_field.options[ 0 ][ 0 ]\n return ''", "def get_new_value(self):\r\n if self.initial_value is None:\r\n return None\r\n\r\n return deepcopy(self.initial_value)", "def value(self):\n return str(self.input.text())", "def fmt_option_val(option):\n if option is None:\n return \"\"\n return str(option)", "def getInitDefault(self):\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)", "def get_default_value(self):\n pass", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def get_one(self, *args, **kw):\n #this would probably only be realized as a json stream\n tmpl_context.widget = self.edit_form\n pks = self.provider.get_primary_fields(self.model)\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n value = self.edit_filler.get_value(kw)\n return dict(value=value,model=self.model.__name__)", "def getValue(self):\n return self.initValue", "def get_note_value(self):\n return f\"{self.first_name} {self.last_name}\"", "def _get_nullformat(self, newformat):\n if self._type == int:\n length = len(str(newformat % 1))\n return '%'+str(length)+'s'\n elif self._type == float:\n length = len(str(newformat % 1.0))\n return '%'+str(length)+'s'\n else:\n return newformat", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def set_format(cls,format):\n import __main__\n IP = __main__.__dict__['__IP']\n prompt = getattr(IP.outputcache,cls._prompt)\n prompt.p_template = format\n prompt.set_p_str()\n cls._format = format", "def value(self):\n s = str(self.input.toPlainText())\n if self._is_string_:\n return s\n else:\n return eval(s)", "def get_initial(self):\n return self.initial[:]", "def value(self):\n value = super(SpeciesListFilter, self).value()\n if value is None:\n if self.default_value is None:\n first_species = Book.objects.order_by('title').first()\n value = None if first_species is None else first_species.id\n self.default_value = value\n else:\n value = self.default_value\n return str(value)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def render_input(env_spec_entry):\n default_value = env_spec_entry[\"default_value\"]\n default_value_state = f'value=\"{default_value}\"' if default_value else \"\"\n\n env_spec_entry_input = (\n f'<input id=\"env_spec_{env_spec_entry[\"name\"].lower()}\" '\n f'name=\"{env_spec_entry[\"name\"].lower()}\" type=\"{env_spec_entry[\"type\"]}\" '\n f'{default_value_state}\" />\\n'\n )\n return env_spec_entry_input", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def format_default(reg):\n\t\tif reg.size == \"accum\":\n\t\t\treturn str(float(reg.default)) + \"k\"\n\t\telse:\n\t\t\treturn str(int(reg.default)) + \"L\"", "def field_value(self):\n return \"{}_{}\".format(self.place.id, self.line_location)", "def Value(self) -> str:", "def get_display_value(self):\n\n\t\treturn self.__display_value", "def default_field_formatter(variable_name: str, field: Field) -> str:\n return \"{{ \" + f\"form.{variable_name}\" + \" }}\"", "def adjust(self):\n if self._adjust is None:\n return \"\"\n return self._adjust" ]
[ "0.69168735", "0.6777391", "0.6163817", "0.5962609", "0.59190995", "0.5825621", "0.5639453", "0.55958575", "0.5588548", "0.55880916", "0.55728984", "0.5547174", "0.55372924", "0.5518307", "0.55125266", "0.54999983", "0.54888153", "0.54888153", "0.54887563", "0.5471209", "0.5466419", "0.545412", "0.54069316", "0.54069316", "0.53839904", "0.5377033", "0.53640187", "0.53620666", "0.5355793", "0.5349789", "0.5343258", "0.5341933", "0.533963", "0.53153485", "0.53153485", "0.5288899", "0.5269197", "0.5263535", "0.5258517", "0.5249449", "0.52473253", "0.5231447", "0.52152646", "0.5197711", "0.5186485", "0.51838565", "0.51748896", "0.5161524", "0.51536065", "0.51321536", "0.51240325", "0.51217145", "0.5121519", "0.5117097", "0.5117097", "0.51054955", "0.51054955", "0.5105423", "0.5100754", "0.50976294", "0.5095106", "0.50944275", "0.50924397", "0.508725", "0.5084116", "0.5081867", "0.50796103", "0.5072638", "0.5071877", "0.5069576", "0.50672466", "0.50647104", "0.5051511", "0.5050451", "0.50481766", "0.5046123", "0.49976718", "0.49971545", "0.49888214", "0.49814808", "0.4975419", "0.4968391", "0.4959143", "0.49534622", "0.4947233", "0.49469805", "0.4945034", "0.49448776", "0.4943174", "0.49430725", "0.493905", "0.49366254", "0.49283075", "0.49213216", "0.4913436", "0.49123442", "0.49113816", "0.49082282", "0.49070314", "0.4901602" ]
0.7327655
0
Return a list of choices for setting the init default
def initDefaultChoices(self): return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def initDefaultChoices(self):\n return [text for text in self.formatList]", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def as_choices():\n return (\n # Live is currently disabled as a choice\n # pending implementation\n (\"live\", \"Use working directory\"),\n (\"latest\", \"Use latest snapshot\"),\n (\"pinned\", \"Pinned to snapshot\"),\n )", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices", "def get_choices(cls):\n return cls.values.items()", "def __init__(self,choices,caption='ListSelection',default=[],single=False,check=False,sort=False,*args,**kargs):\n InputDialog.__init__(self,caption=caption,items = [\n dict(name='input',value=default,itemtype='list',choices=choices,\n text='',single=single,check=check,sort=sort,*args,**kargs),\n ],)", "def choices(self):\n return tuple(self._choices)", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def multiple_choice():\n\n return [\"MAR\", \"MAR\", \"NI\", \"NI\", \"MCAR\"]", "def choices(cls):\n return tuple(item.as_tuple for item in list(cls))", "def as_choices(cls, key_type=None):\n if key_type is None:\n key_type = cls.get_default_choice_type()\n return cls.enum_class.as_choices(key_type)", "def _set_default_suits(self):\n # set up suits\n suit_types = [('Spades', 1), ('Hearts', 2), ('Diamonds', 3), ('Clubs', 4)]\n # populate the list of suits\n suit_list = list()\n for s in suit_types:\n suit_list.append(Suit(s[0], s[1]))\n\n return suit_list", "def choices(self) -> list:\n return [self.mapper(item) for item in self]", "def _resolve_defaults(self, **kwargs):\n res = list()\n for name, value in kwargs.items():\n if value is None:\n value = self.default(name)\n if value is None:\n raise RuntimeError(f\"Missing default {name}\")\n res.append(value)\n return res", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def get_setting_choices(cls, key, **kwargs):\n setting = cls.get_setting_definition(key, **kwargs)\n\n choices = setting.get('choices', None)\n\n if callable(choices):\n # Evaluate the function (we expect it will return a list of tuples...)\n return choices()\n\n return choices", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def create_default_repo_choice(self, default_repo):\n return (default_repo, default_repo)", "def get_template_base_dir_choices() -> list[tuple[str, str]]:\n # handle predefined choices\n choices, seen = [], set()\n for template_name in TemplateName:\n choices.append((template_name.value, template_name.label))\n seen.add(template_name.value)\n\n # handle custom choices via settings\n for template_name, display_name in getattr(settings, \"CAST_CUSTOM_THEMES\", []):\n if template_name not in seen:\n choices.append((template_name, display_name))\n seen.add(template_name)\n\n # search for template base directories\n template_directories = get_template_directories()\n template_base_dir_candidates = get_template_base_dir_candidates(template_directories)\n for candidate in template_base_dir_candidates:\n if candidate not in seen:\n choices.append((candidate, candidate))\n\n return choices", "def initialise_options():\r\n default_options = list(range(NUMBER_OF_TILES))\r\n default_weights = [1/NUMBER_OF_TILES]*NUMBER_OF_TILES\r\n return default_options, default_weights", "def default_controls(self):\n\t\tcontrol_list = []\n\t\tconfig = ConfigParser.ConfigParser()\n\t\tconfig.read(\"./config.ini\")\n\t\tcontrols = config.options(\"default_controls\")\n\t\tfor c in controls:\n\t\t\ttry: control_list.append( config.get(\"default_controls\", c) )\n\t\t\texcept:\n\t\t\t\tprint \"ERROR: missing control settings. Check config.ini.\"\n\t\t\t\traise(SystemExit)\n\t\treturn control_list", "def default_variation(random, candidates, args):\r\n return candidates", "def default_variation(random, candidates, args):\r\n return candidates", "def get_default_options():\n return GROUPS_.values()", "def __init__(self, *initial):\n self.prompt_list = list(initial)", "def get_choices_new_protected():\n ret = []\n ret.append( (1, _(u'Nur Community-Mitglieder dürfen neue Beiträge leisten')) )\n ret.append( (-1, _(u'Offener Zugang')) )\n return ret", "def choices(self, var):\r\n return (self.curr_domains or self.domains)[var]", "def choices(self, choices):\n\n self._choices = choices", "def get_choices_for_var(self, var):\n return self.choices[var]", "def get_options(self):\n return []", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def set_defaults(self):\r\n for name, option in self.options.iteritems():\r\n if not option.is_required():\r\n self.set_value(name, option, option.default)", "def default_value_list(sources: List[str] = None):\n if not default:\n return list()\n if not sources:\n return [default]\n else:\n return sources", "def _get_target_choices():\n apps = [('public', _(\"Public website\"))]\n for model, entity in registry.registry.items():\n if entity.menu:\n appname = model._meta.app_label.lower()\n apps.append((appname, unicode(entity.label)))\n return tuple(apps)", "def get_generic_choices(model, key, allow_null=False):\n CHOICES = [('', '--------')] if allow_null else []\n for i in model.objects.all().values_list(key, flat=True).distinct():\n CHOICES.append((str(i), str(i)))\n CHOICES.sort()\n\n return CHOICES", "def get_default_is_selected_index(self, choicesdata):\n\n return 0", "def _create_defaults(self):\n return DefaultCommandOptionValues(\n min_confidence=3, output_format='vs7')", "def __init__(self, choices, *args, **kwargs):\n super(RangePollChoiceForm, self).__init__(*args, **kwargs)\n nominees = [(i, '%d' % i) for i in range(0, choices.count()+1)]\n for choice in choices:\n self.fields['range_poll__%s' % str(choice.id)] = (\n forms.ChoiceField(widget=forms.Select(),\n choices=nominees,\n label=choice.nominee.get_full_name()))", "def create_options(self):\n return []", "def Choices(cls):\n attr = '_choice_attr_' + cls.__name__\n if hasattr(cls, attr):\n return getattr(cls, attr)\n\n choices = set()\n for (k, v) in cls.__dict__.items():\n if not k.startswith('_') and issubclass(type(v), (str, unicode)):\n choices.add(v)\n for base in cls.__bases__:\n if issubclass(base, ChoiceBase) and base is not ChoiceBase:\n choices = set.union(choices, base.Choices())\n setattr(cls, attr, choices)\n\n return choices", "def multiple_choices(self, choices, response):\n for elem in self.method_order:\n if elem in choices:\n return [elem]\n raise NoData", "def form_SelectChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(options)\n form['mySelect'].default = 2\n return form", "def season_choices():\n return [(s, s) for s in range(0, 3)]", "def is_a_list_of_choices(self):\n pass", "def setChoices(self, choices):\n self.getGtkObject('property_liststore').clear()\n for choice in choices:\n self.getGtkObject('property_liststore').append([str(choice)])", "def _get_choices ( self, context, path = '' ):\n choices = []\n gdc = context.get_data_context\n for name in context.data_contexts:\n next_path = path_for( path, name )\n choices.append( TemplateChoice( choice_value = next_path ) )\n choices.extend( self._get_choices( gdc( name ), next_path ) )\n \n return choices", "def choices(symbols, k):\n return [R.choice(symbols) for _ in range(k)]", "def episode_choices():\n return [(e, e) for e in range(0, 2)]", "def setAll(self):\n self.setValue(self._choices_)", "def configure_list_of_choices_type_question(self, question_data):\n self.driver.find_radio_button(LIST_OF_CHOICE_RB).click()\n index = 1\n for choice in fetch_(CHOICE, from_(question_data)):\n if index > 1:\n self.driver.find(ADD_CHOICE_LINK).click()\n self.driver.find_text_box(by_xpath(CHOICE_XPATH_LOCATOR + \"[\" + str(index) + \"]\" + CHOICE_TB_XPATH_LOCATOR)).enter_text(choice)\n index += 1\n choice_type = fetch_(ALLOWED_CHOICE, from_(question_data))\n if ONLY_ONE_ANSWER == choice_type:\n self.driver.find_radio_button(ONLY_ONE_ANSWER_RB).click()\n elif MULTIPLE_ANSWERS == choice_type:\n self.driver.find_radio_button(MULTIPLE_ANSWER_RB).click()\n return self", "def get_init_list(self):\n\n return self.convert_compartments_to_list(self.init_compartments)", "def __init__(self, choiceList=None, prompt=DEFAULT_PROMPT, title=DEFAULT_TITLE):\n self.choice = None\n \n wpf.LoadComponent(self, GUI_XAML_FILE)\n \n self.Title = title\n self.lblPrompt.Content = prompt\n \n self.choicesBox.ItemsSource = choiceList", "def initDefaults(self):\n return _libsbml.Species_initDefaults(self)", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def _get_choices_str(self):\n return ', '.join(\n '\"%s\"' % choice\n for choice in self.choices\n )", "def get_defaults(self):\n\t\treturn self.__defaults", "def choices(self):\n self._choices = self.getChoices()\n return len(self._choices)", "def init_defaults(self, defaults):\r\n for (sect, opt, default) in defaults:\r\n self._default(sect, opt, default)", "def choices(self, cl):\n # TODO: Determine if non-static choices would be cleaner here.\n # Honestly, I tried a more generic version and it was even harder to\n # follow than this version.\n yield {\n 'selected': not (self.lookup_val_gte or self.lookup_val_lt),\n 'query_string': cl.get_query_string({}, [self.lookup_kwarg_gte,\n self.lookup_kwarg_lt]),\n 'display': 'All'\n }\n\n goal = settings.FACEBOOK_CLICK_GOAL\n yield {\n 'selected': self.lookup_val_gte and not self.lookup_val_lt,\n 'query_string': cl.get_query_string({self.lookup_kwarg_gte: goal},\n [self.lookup_kwarg_lt]),\n 'display': 'Yes'\n }\n yield {\n 'selected': self.lookup_val_lt and not self.lookup_val_gte,\n 'query_string': cl.get_query_string({self.lookup_kwarg_lt: goal},\n [self.lookup_kwarg_gte]),\n 'display': 'No'\n }", "def choices(self, typename, value_field='description'):\n rows = self.type(typename).values('id', value_field)\n return [(r['id'], r[value_field]) for r in rows]", "def default_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"default_values\")", "def test_get_prior_string_list(self):\n categories = list(range(10))\n categories[0] = \"asdfa\"\n categories[2] = \"lalala\"\n dim = Categorical(\n \"yolo\", categories, shape=2, default_value=[\"asdfa\", \"lalala\"]\n )\n assert dim.get_prior_string() == (\n \"choices(['asdfa', 1, 'lalala', 3, 4, 5, 6, 7, 8, 9], \"\n \"shape=2, default_value=['asdfa', 'lalala'])\"\n )", "def get_choices(self, cutoff=None):\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict(\n [\n (\n # This line below is modifed.\n item.pk,\n self.display_value(item),\n )\n for item in queryset\n ]\n )", "def choices(self, choices: Iterable[Tuple[str, str]]):\n try:\n iter(choices)\n except TypeError:\n raise TypeError(\"'choices' isn't a valid iterable\")\n\n apply_choices = []\n for i, (choice_id, choice_label) in enumerate(choices):\n apply_choices.append((str(choice_id), str(choice_label)))\n\n if len(apply_choices) < 2:\n raise ValueError(\"you need to specify at least two choices\")\n\n self._choices = apply_choices\n self.specific.refresh()\n self._selected = 0", "def choose_option(self, state):\n options = [o for o in self.options if o.initiation_set[state] == 1]\n return random.choice(options)", "def setUp(self):\n current_date = date.today()\n name = 'name'\n possible_meals = [Meal(date=current_date, name=name)]\n self.possible_meals_choices = [(possible_meal.id, possible_meal.name)\n for possible_meal in possible_meals]", "def all_options():\n return _OptionRegistry.values()", "def __init__(self, name, attrs={}):\n ChoiceFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n ChoiceFormat.__init__(self, name, attrs)", "async def choices(self, ctx, *, options):\n choices = options.split('-')\n choice = random.choice(choices)\n await ctx.send(f'My choice is\\\"{choice}\\\"')", "def get_chosen_options(user):\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n ocl = eval(user_application.options_selected)\n chosen_options = []\n for oc in ocl:\n chosen_options.append(Option.objects.get(opt_code=int(oc))) \n return chosen_options", "def initialize_options(self):", "def initDefaults(self):\n return _libsbml.Reaction_initDefaults(self)", "def calendar_choices(self):\n if not self._calendars:\n if self.authenticated:\n default = self.account.schedule().get_default_calendar()\n # {\n # \"default\" : <DEFAULT_CALENDAR>,\n # \"<CALENDAR_NAME>: <CALENDAR>,\n # ...\n # }\n self._calendars = {\n DEFAULT_CALENDAR: default,\n **{\n c.name: c\n for c in self.account.schedule().list_calendars() if c.name != default.name\n }\n }\n\n return self._calendars", "def get_options(self):\r\n return self._option_values", "def getOptionsNames(self) -> List[unicode]:\n ...", "def default_args(self) -> Optional[list[str]]:\n _args: list[Arg] = []\n _ctx = self._select(\"defaultArgs\", _args)\n return _ctx.execute_sync(Optional[list[str]])", "def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n # choicesdata = []\n # found_selected_value = False\n # for value, label in self.get_choices():\n # is_selected = value == selected_value\n # if is_selected:\n # found_selected_value = True\n # url = self.build_set_values_url(values=[value])\n # choicesdata.append({\n # 'url': url,\n # 'label': label,\n # 'is_selected': is_selected\n # })\n choicesdata, found_selected_value = self.__make_choicesdata_list(\n choices=self.get_choices(),\n selected_value=self.get_cleaned_value())\n if not found_selected_value and len(choicesdata) > 0:\n selected_index = self.get_default_is_selected_index(choicesdata=choicesdata)\n choicesdata[selected_index]['is_selected'] = True\n return choicesdata", "def available_binary_choices() -> Iterable[str]:\n for name, _ in inspect.getmembers(sys.modules[__name__], inspect.isclass):\n if name.startswith('Binary'):\n yield name", "def initDefaults(self):\n return _libsbml.Event_initDefaults(self)", "def form_CheckboxMultiChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('multiChoice', schemaish.Sequence(schemaish.Integer()))\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['multiChoice'].widget = formish.CheckboxMultiChoice(options)\n form['multiChoice'].default = [2]\n return form", "def default_selection(random, population, args):\r\n return population", "def form_SequenceOfStringsWithDefault(request):\n schema = schemaish.Structure()\n schema.add( 'myList', schemaish.Sequence( schemaish.String() ))\n\n form = formish.Form(schema, 'form')\n form.defaults = {'myList': ['a','b']}\n return form", "def test_model_choices_all_models(self):\n unique_action_admin = UniqueActionAdmin(UniqueAction, self.site)\n\n self.assertFalse(getattr(unique_action_admin, '_model_choices', False))\n\n model_choices = unique_action_admin.model_choices()\n\n self.assertTrue(getattr(unique_action_admin, '_model_choices'))\n self.assertTrue(isinstance(model_choices, list))", "def sel_prep(self):\n sel_blob = []\n for sel in self.blob['options']:\n if self.blob['defaultValue'] == sel['name']:\n sel_blob.append({'value': sel['name'], 'selected': 'true'})\n else:\n sel_blob.append({'value': sel['name'], 'selected': 'false'})\n\n return sel_blob", "def test_default(self):\n for n in range(1, 5):\n for prefix in ['', 'git-', 'gbp-']:\n parser = GbpOptionParser('%scmd%d' % (prefix, n))\n self.assertEqual(parser.config['default_option'], 'default_default1')", "def setChoices(self,report):\n\t\tif report is not None:\n\t\t\tbrowser = report[1]['objects']\n\n\t\t\tif browser is not None:\n\t\t\t\tbrowserChoices = list()\n\t\n\t\t\t\t#compute select list\n\t\t\t\tfor b in browser:\n\t\t\t\t\tif \"chrome\" in b['name'].lower():\n\t\t\t\t\t\tformString = constConfig.HISTORY_FORM_CHROME\n\t\t\t\t\telif \"firefox\" in b['name'].lower():\n\t\t\t\t\t\tformString = constConfig.HISTORY_FORM_FF\n\t\t\t\t\telif \"thunderbird\" in b['name'].lower():\n\t\t\t\t\t\tformString = constConfig.HISTORY_FORM_TH\n\n\t\t\t\t\tfor p in b['profiles']:\n\t\t\t\t\t\tformValue = str(formString)+\"_\"+p['profileName']\t\n\t\t\t\t\t\tbrowserChoices.append((formValue,b['name']+\" - \"+p['profileName']))\n\t\t\t\n\t\t\t\tch = forms.ChoiceField(label=\"Profile\",widget=forms.Select(attrs={'class':'form-control'}),choices=browserChoices)\n\t\t\t\tself.fields['choices'] = ch", "def form_SelectWithOtherChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectWithOtherChoice(options)\n form['mySelect'].default = 2\n return form" ]
[ "0.83096915", "0.8089902", "0.7565213", "0.7451019", "0.699929", "0.699929", "0.680488", "0.67091656", "0.66209406", "0.65692645", "0.6532258", "0.6486172", "0.64289325", "0.6406578", "0.63146526", "0.62376446", "0.62375015", "0.62119025", "0.61605716", "0.6160515", "0.6089932", "0.6064072", "0.60535115", "0.60409874", "0.6025764", "0.6001356", "0.5992603", "0.5973309", "0.59606636", "0.5928593", "0.59253234", "0.59120667", "0.59013265", "0.5882774", "0.5882774", "0.58603424", "0.5836189", "0.58113027", "0.57965106", "0.5786334", "0.57581234", "0.5740283", "0.573565", "0.57340217", "0.57094455", "0.5690138", "0.56835073", "0.56539315", "0.5648747", "0.5648359", "0.5643329", "0.56336606", "0.5628389", "0.5607492", "0.5601162", "0.55952716", "0.5583834", "0.5582097", "0.55678433", "0.5567291", "0.5554405", "0.55435175", "0.5521484", "0.5509103", "0.549984", "0.5486964", "0.54801327", "0.5473168", "0.54703456", "0.5448587", "0.5415777", "0.5399514", "0.5390045", "0.5388922", "0.5384503", "0.5379113", "0.53730917", "0.53626585", "0.5349482", "0.53474087", "0.53474087", "0.534685", "0.5342996", "0.5342234", "0.5339548", "0.533718", "0.53333235", "0.5328711", "0.5322346", "0.53161764", "0.53090143", "0.5302724", "0.52999085", "0.52886415", "0.52831566", "0.5275521", "0.5271917", "0.52677983", "0.52644336", "0.525422" ]
0.8791058
0
Return value to be compared for sorting and conditionals
def sortValue(self, data): storedText = data.get(self.name, '') return storedText.lower()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare(self, value: int, /) -> None:", "def compare(self) -> int:", "def compareFunction( self, first, second ):\n for ascending,column in self.sortOrder:\n aValue,bValue = column.get(first),column.get(second)\n diff = cmp(aValue,bValue)\n if diff:\n if not ascending:\n return - diff \n else:\n return diff \n return 0", "def cmpValue(subInfo1, subInfo2):\n val1 = subInfo1[VALUE]\n val2 = subInfo2[VALUE]\n return val1 > val2", "def cmpValue(subInfo1, subInfo2):\n val1 = subInfo1[VALUE]\n val2 = subInfo2[VALUE]\n return val1 > val2", "def compare(a, b):\n if a > b:\n return a\n return b", "def _get_comparison_func(self, adjective):\n return self.SONG_ADJECTIVES.get(adjective, {}).get(\"comparison\")", "def item_comparer(self):\n return self.item_comparer_value", "def compare(a,b):\r\n if a>b:\r\n return 1\r\n elif a==b:\r\n return 0\r\n else:\r\n return -1", "def compare(self, variable):\n if (not self.lower and variable > self.value) or \\\n (self.lower and variable < self.value):\n return pt.common.Status.SUCCESS\n return pt.common.Status.FAILURE", "def operator(self, sort):\r\n return None", "def comparison(self):\n return self._comparison", "def _cmp(a, b): # pylint: disable=invalid-name\n return (a > b) - (a < b)", "def less_than_or_equal(self) -> global___Expression:", "def __cmp__(self,o):\n\t\tif o != None:\n\t\t\treturn cmp(self.value,o.value)\n\t\telse:\n\t\t\treturn cmp(self.value,0)", "def comparison(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"comparison\")", "def __cmp__(self, x):\n if self.score < x.score: return -1\n elif self.score == x.score: return 0\n else: return 1", "def _compare(self, value, target):\n result = getattr(self.reg, target) - value\n self.reg.N = result >> 7\n self.reg.C = getattr(self.reg, target) >= value\n self.reg.Z = result == 0", "def __ge__( self, value ):\r\n\t\treturn ( self > value ) or ( self == value )", "def test_key_predicate(datum):\n return 0 < datum", "def cmp(x, y):\n return (x > y) - (x < y)", "def _less_than_or_equal_to_op(spec):", "def cmp(a, b):\n return (a > b) - (a < b)", "def conditional_value(self) -> global___Expression.ConditionalOperator:", "def cmp(x, y):\n return (x > y) - (x < y)", "def less(value, other):\n return value > other", "def comparison(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"comparison\")", "def comparison(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"comparison\")", "def _default_eval_func(a, b):\n emphasis = \"r2\"\n a_value = getattr(a, emphasis)\n b_value = getattr(b, emphasis)\n return a_value > b_value", "def comparison(self) -> str:\n return self._values.get('comparison')", "def greater_than_or_equal(self) -> global___Expression:", "def cmp ( self, object1, object2 ):\n return cmp( self.get_raw_value( object1 ),\n self.get_raw_value( object2 ) )", "def _test_method_sorter(_, x, y):\n if x == 'test_gc':\n return 1\n if y == 'test_gc':\n return -1\n if x > y:\n return 1\n if x < y:\n return -1\n return 0", "def greater(value, other):\n return value < other", "def _greater_than_or_equal_to_op(spec):", "def compare(a, b, larger_is_better):\n\n if larger_is_better:\n return a > b\n else:\n return a < b", "def cmp(x, y):\n if x == y:\n return 0\n elif x is None:\n if y is None:\n return 0\n else:\n return -1\n elif y is None:\n return 1\n else:\n # TODO: consider casting the values to string or int or floats?\n # note that this is the minimal replacement function\n return (x > y) - (x < y)", "def compare(x, y):\n if x >= y:\n return 1.0\n else:\n return 0.0", "def __gt__(self, value):\n self = self.__ge__(value)\n return self.__invert__()", "def __gt__(self, other):\n\t\ttry:\n\t\t\treturn self.val > other.val\n\t\texcept:\n\t\t\treturn self.val > other", "def _custom_sorter(self, key1, key2):\n\n col = self._col\n ascending = self._colSortFlag[col]\n real = self.get_real_col(col)\n item1 = self.itemDataMap[key1][real]\n item2 = self.itemDataMap[key2][real]\n\n # Internationalization of string sorting with locale module\n if isinstance(item1, str) and isinstance(item2, str):\n cmpVal = locale.strcoll(item1, item2)\n elif isinstance(item1, bytes) or isinstance(item2, bytes):\n cmpVal = locale.strcoll(str(item1), str(item2))\n else:\n cmpVal = cmp(item1, item2)\n\n # If the items are equal, then pick something else to make the sort value unique\n if cmpVal == 0:\n cmpVal = cmp(*self.GetSecondarySortValues(col, key1, key2))\n\n if ascending:\n return cmpVal\n else:\n return -cmpVal", "def _comparison_function(comp, value=0.0, **kwargs):\n if comp == 'g' or comp == '>':\n func = np.greater\n elif comp == 'ge' or comp == '>=':\n func = np.greater_equal\n elif comp == 'l' or comp == '<':\n func = np.less\n elif comp == 'le' or comp == '<=':\n func = np.less_equal\n elif comp == 'e' or comp == '=' or comp == '==':\n func = np.equal\n elif comp == 'ne' or comp == '!=':\n func = np.not_equal\n else:\n raise ValueError(\"Unrecognized comparison '{}'.\".format(comp))\n\n def comp_func(xx):\n return func(xx, value, **kwargs)\n\n return comp_func", "def adjustedCompareValue(self, value):\n return value", "def foo_2(x, y):\n\tif x > y:\n\t\treturn x\n\treturn y", "def cmp(x, y):\n if x + y > y + x: return 1\n elif x + y == y + x: return 0\n else: return -1", "def test_get_sort_value_with_after_object(self):\n test_object = self.test.datum_type2\n actual = test_object.get_sort_value(after_object=self.test.datum_type1)\n expected = 10101\n self.assertEqual(expected, actual)", "def statusCompare (x, y):\n xs = db.status.get(x, 'order')\n ys = db.status.get(y, 'order')\n c = float(xs) - float(ys)\n if c >= 0.0: \n return int(c)\n else:\n return -int(abs(c))", "def sortValue(self, data):\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''", "def _default_eval_func(a, b):\n emphasis = \"accuracy\"\n a_value = getattr(a, emphasis)\n b_value = getattr(b, emphasis)\n return a_value > b_value", "def __cmp__(self, other) :\n if self.strength > other.strength:\n return 1;\n elif self.strength == other.strength :\n if self.rank > other.rank :\n return 1;\n elif self.rank == other.rank :\n return 1 if self.kickers > other.kickers else -1 if self.kickers < other.kickers else 0;\n return -1;", "def best_value(self):\r\n return self._best_value", "def get_result(mishkaScore: int, chrisScore: int) -> bool:\n if mishkaScore > chrisScore:\n return \"M\"\n if mishkaScore < chrisScore:\n return \"C\"\n return \"D\"", "def getValue(self):\n if self.left.getValue() >= self.right.getValue():\n return self.left.getValue()\n\n return self.right.getValue()", "def compare(num1, num2):\n if num1 > num2:\n return num1, num2\n return num2, num1", "def decide():", "def compare(self, *args):\n return _ida_hexrays.creturn_t_compare(self, *args)", "def lt_success_func(target, result):\n if result is None:\n return False\n return result < target", "def getValue(self):\n r = 1 if self.left.getValue() > self.right.getValue() else 0\n return r", "def test_get_sort_value_without_after_object(self):\n test_object = self.test.datum_type2\n actual = test_object.get_sort_value()\n expected = 10101\n self.assertEqual(expected, actual)", "def compare(self, operator, value, **kw):\n\n return operator(self.comparator, value)", "def assembly_compare(x, y) :\n if x.kinf() < y.kinf() :\n return 1\n elif x.kinf() == y.kinf() :\n return 0\n else : #x.resultType < y.resultType\n return -1", "def _cmp(x, y):\n if x[1].count > y[1].count:\n return CmpRelation.GREATER\n if x[1].count < y[1].count:\n return CmpRelation.LESS\n if x[1].ptn_length < y[1].ptn_length:\n return CmpRelation.GREATER\n if x[1].ptn_length > y[1].ptn_length:\n return CmpRelation.LESS\n return CmpRelation.EQUAL", "def comparator(self):\n return self.get_scores()", "def getValue(self):\n if self.left.getValue() <= self.right.getValue():\n return self.left.getValue()\n\n return self.right.getValue()", "def __cmp__(self, other):\n \n result = cmp(self.value, other.value)\n if result == 0:\n \"\"\"Values are identical, suits differ. Doesn't affect ranking in\n any way.\"\"\"\n result = cmp(self.suit, other.suit)\n return result", "def gt_success_func(target, result):\n if result is None:\n return False\n return result > target", "def getValue(self):\n r = 1 if self.left.getValue() <= self.right.getValue() else 0\n return r", "def compare(a, b):\n return a - b", "def getValue(self):\n r = 1 if self.left.getValue() >= self.right.getValue() else 0\n return r", "def getValue(self):\n r = 1 if self.left.getValue() < self.right.getValue() else 0\n return r", "def compare(self, comp_els):\n return min(comp_els, key= lambda x: x[1])[0]", "def compare(self, *args):\n return _ida_hexrays.cwhile_t_compare(self, *args)", "def comparator_converter(self, val):\r\n return val", "def _greater_than_op(spec):", "def cmp(a, b):\n if a is None and b is None:\n return 0\n elif a is None:\n return -1\n elif b is None:\n return 1\n else:\n return (a > b) - (a < b)", "def _slack_get_value(slack_response, search_value, search_field, return_field, classifier):\n if not slack_response['ok']:\n return False\n for item in slack_response[classifier]:\n if search_field in item and search_value == item[search_field] and return_field in item:\n return item[return_field]", "def item_comparer(self, value):\n self.item_comparer_value = value", "def _less_than_op(spec):", "def compare(self, comp_els):\n return max(comp_els, key=lambda x: x[1])[0]", "def compare_to(self, other) -> int:\n if self.id == other.id:\n return 0\n if self.status != other.status:\n return -1 if self.status < other.status else 1\n if self.last_played != other.last_played:\n return -1 if self.last_played < other.last_played else 1\n return -1 if self.id < other.id else 1", "def value(self) -> bool:", "def compare_entities(e1, e2):\n sp1 = e1.sorting_priority\n sp2 = e2.sorting_priority\n if sp1 > sp2:\n return 1\n elif sp1 == sp2:\n return 0\n else:\n return -1", "def try_compare(obj, key, comparison, search_value, override_value=\"\"):\n value = override_value if override_value else obj[key]\n try:\n return getattr(value, comparison)(search_value)\n except KeyError:\n return False\n except Exception as e:\n logging.warning('The following exception was ignored in {0}: {1}'.format(try_compare.__name__, e))", "def compare(first, second):\n for i in data:\n if(i['name'] == first ):\n first_num = i['follower_count']\n if(i['name'] == second):\n second_num = i['follower_count']\n if first_num > second_num:\n return 'a'\n else:\n return 'b'", "def compare(self, *args):\n return _ida_hexrays.fnumber_t_compare(self, *args)", "def fn_if(self, value):\n\n condition_name, true_value, false_value = value\n if self.parser.conditions.evaluate(condition_name):\n return true_value\n else:\n return false_value", "def compare(self, *args):\n return _ida_frame.stkpnt_t_compare(self, *args)", "def compare(self, *args):\n return _ida_hexrays.cnumber_t_compare(self, *args)", "def __cmp__(self, other):\n \n result = cmp(self.rank(), other.rank())\n if (result == 0):\n # Compare hand values\n for i in range(len(self.values())):\n result = cmp(self.values()[i], other.values()[i])\n if (result != 0):\n return result\n return result", "def __cmp__(self, other):\n if options.rank_by.lower() != \"money\":\n \"\"\"flags ▲, money ▲, hints ▼, time ▼\"\"\"\n this, that = len(self.flags), len(other.flags)\n if this == that:\n this, that = self.money, other.money\n if this == that:\n this, that = len(other.hints), len(self.hints)\n if this == that:\n this, that = other.last_scored(), self.last_scored()\n else:\n \"\"\"money ▲, hints ▼, time ▼, flags ▲\"\"\"\n this, that = self.money, other.money\n if this == that:\n this, that = len(other.hints), len(self.hints)\n if this == that:\n this, that = other.last_scored(), self.last_scored()\n if this == that:\n this, that = len(self.flags), len(other.flags)\n if this < that:\n return 1\n elif this == that:\n return 0\n else:\n return -1", "def ge_success_func(target, result):\n if result is None:\n return False\n return result >= target", "def le(self, val):\n\t\treturn LessOrEquals(self, val)", "def compareAUTOR(offense1, offense2):\n \n if (offense1 == offense2):\n return 0\n elif (offense1 > offense2):\n return 1\n else:\n return -1", "def ge(self, val):\n\t\treturn GreaterOrEquals(self, val)", "def __lt__(self, rs):\n Number.comparisons += 1\n result = self.data < rs.data\n return result", "def __lt__(self, value):\n return self.name < value.name", "def comparator(self) -> typing.Callable[[Vec, Vec, Term], bool]:\n pass", "def answer_sorter(thing):\r\n try:\r\n return float(thing[0])\r\n except ValueError:\r\n # Put all non-numerical answers first.\r\n return float('-inf')", "def __value_of(sentiment):\n if sentiment == 'positive': return 1\n if sentiment == 'negative': return -1\n return 0", "def comparison(op):\n def comp(*args):\n if args:\n item = args[0]\n for o in args[1:]:\n if op(item, o):\n item = o\n else:\n return Boolean(False)\n return Boolean(True)\n else:\n return Boolean(True)\n return comp", "def GetPriorityValue(self, *args, **kwargs):\n pass" ]
[ "0.6866802", "0.6862163", "0.6673221", "0.63514173", "0.63514173", "0.63162124", "0.6138884", "0.6054708", "0.60404193", "0.5911979", "0.5881882", "0.5874233", "0.58493686", "0.58137196", "0.5800126", "0.57923204", "0.57919735", "0.5789459", "0.57723606", "0.576029", "0.57464844", "0.5740192", "0.5739131", "0.57377875", "0.5727295", "0.5722804", "0.572151", "0.572151", "0.5721253", "0.57123834", "0.56671613", "0.5662787", "0.56535935", "0.564566", "0.5643461", "0.56243473", "0.55913526", "0.55684245", "0.55416465", "0.55376697", "0.55260545", "0.5520376", "0.550396", "0.5501438", "0.55007446", "0.54968375", "0.5496444", "0.54888177", "0.5479773", "0.5475158", "0.54647714", "0.546428", "0.5462475", "0.5460215", "0.5457367", "0.5456048", "0.54536116", "0.54472333", "0.5445657", "0.54444236", "0.54395616", "0.5425802", "0.54251575", "0.54206574", "0.5419941", "0.54156464", "0.5415602", "0.5401689", "0.5400704", "0.5400345", "0.53997445", "0.5399354", "0.53986514", "0.5389632", "0.5386925", "0.5379262", "0.53642875", "0.5363486", "0.5362223", "0.5340353", "0.5338446", "0.5338289", "0.53378546", "0.53312", "0.53285456", "0.53116953", "0.53059", "0.5297315", "0.529591", "0.52924097", "0.5286062", "0.52838206", "0.5283629", "0.5281948", "0.5263634", "0.52635086", "0.52569175", "0.5256176", "0.5250123", "0.5241499", "0.52405554" ]
0.0
-1
Return conditional comparison value with realtime adjustments, used for date and time types' 'now' value
def adjustedCompareValue(self, value): return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adjustedCompareValue(self, value):\n if value.startswith('now'):\n return repr(GenTime())\n return value", "def adjustedCompareValue(self, value):\n if value.startswith('now'):\n return repr(GenDate())\n return value", "def condition(self):\n HH = str(time.localtime().tm_hour)\n MM = str(time.localtime().tm_min)\n return eval(self._cond_str)", "def get_state_by_time(python_time):\n present = datetime.now()\n\n if python_time <= present:\n return 2\n else:\n return 1", "def now(self):\n return conditional_now() + self.timedelta(**self.now_shift_kwargs)", "def check(self, comparison, value, value_type, second_value=None):\n now = datetime.now()\n if value_type == \"WEEKDAY\":\n if comparison not in [\"NE\", \"E\", \"WEEKDAY\", \"WEEKEND\"]:\n raise Exception(f\"Comparison {comparison} \"\n \"not valid for WEEKDAY\")\n if comparison == \"E\":\n return now.weekday() == value\n elif comparison == \"NE\":\n return now.weekday() != value\n elif comparison == \"WEEKDAY\":\n return now.weekday() < 5 # ISO counts from 0\n else:\n return now.weekday() > 4 # so Sat,Sun are 5,6\n if value_type == \"DATE\":\n dt = datetime.strptime(value, DATE_FMT)\n dt = dt.date()\n now = now.date()\n elif value_type == \"TIME\":\n dt = datetime.strptime(value, TIME_FMT)\n dt = dt.time()\n now = now.time()\n else:\n dt = datetime.strptime(value, DATETIME_FMT)\n if comparison == \"LE\":\n return now <= dt\n elif comparison == \"E\":\n return now == dt\n elif comparison == \"GE\":\n return now >= dt\n # At this point, we're doing either IN or OUT, so read second time\n # format\n if value_type == \"DATE\":\n second = datetime.strptime(second_value, DATE_FMT)\n second = second.date()\n elif value_type == \"TIME\":\n second = datetime.strptime(second_value, TIME_FMT)\n second = second.time()\n else:\n second = datetime.strptime(second_value, DATETIME_FMT)\n if comparison == \"IN\":\n return now >= dt and now <= second\n elif comparison == \"OUT\":\n return now <= dt or now >= second", "def __gt__(self, other):\n return self.to_seconds() > other.to_seconds()", "def __gt__(self, other):\n return self.to_seconds() > other.to_seconds()", "def conditional_value(self) -> global___Expression.ConditionalOperator:", "def get(self):\n now = datetime.datetime.utcnow()\n if now > self.time_of_next_update:\n self._update_value()\n return self.value", "def set_when(day, today):\n if day < today:\n return \"past\"\n if day == today:\n return \"present\"\n return \"future\"", "def greater_than_or_equal(self) -> global___Expression:", "def __cmp__(self, other):\n return (self._cmp(self.seconds, other.seconds)\n or self._cmp(self.nanosecond, other.nanosecond))", "def __gt__(self, other):\n if self.date > other.date:\n return True\n else:\n return False", "def test_expression_dates(self):\n import datetime\n import time\n time1 = datetime.datetime.now()\n time.sleep(0.01)\n time2 = datetime.datetime.now()\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with integers\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at < time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time2 with models.Network.id=time1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at > time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time2 with models.Network.id=time1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at < time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time2}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at > time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time2}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at == time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at == time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")", "def less_than_or_equal(self) -> global___Expression:", "def current_time(cls) -> float:", "def next_update_in(self, now):\n # Never updated: NOW!\n if self.last_tested is None:\n return 0.0\n\n # Was updated\n seconds_ago = (now - self.last_tested).total_seconds()\n delay = self.real_period - seconds_ago\n return max(delay, 0.0) # don't allow it to be negative", "def after(v1,v2):\n return v1.time_left>v2.time_left", "def check_time_since_last_data(device_origin):\n actual_time = time.time()\n sec_since_last_data = actual_time - mon_item.read_device_status_values(device_origin)[1]\n min_since_last_data = sec_since_last_data / 60\n min_since_last_data = int(min_since_last_data)\n latest_data_hr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(latest_data))\n return min_since_last_data", "def test_process_filter_value():\n now = dt.utcnow()\n now_ts = now.timestamp()\n filter_ = {'column': \"ts_created_at\", 'value': now_ts, type: 'leq'}\n assert process_filter_value(filter_) == now\n\n filter_ = {'column': \"created_at\", 'value': now_ts, type: 'leq'}\n assert process_filter_value(filter_) == now_ts", "def compare(x, y):\n if x >= y:\n return 1.0\n else:\n return 0.0", "def newer(a, b):\n\treturn modtime(a) < modtime(b) # smaller is earlier", "def test_larger_lhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = datetime(2012, 9, 20, 3, 45)\n rhs = datetime(2012, 9, 20, 2, 45)\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(lhs, result)", "def test_get_current_time_is_constant() -> None:\n time_provider = TimeProvider()\n current_time_1 = time_provider.get_current_time()\n current_time_2 = time_provider.get_current_time()\n\n assert current_time_1 == current_time_2", "def _compare(self, value, target):\n result = getattr(self.reg, target) - value\n self.reg.N = result >> 7\n self.reg.C = getattr(self.reg, target) >= value\n self.reg.Z = result == 0", "def search_cond(ts):\n ts = convert(ts, mode='timestamp')\n at = [\"year\", \"month\", \"day\", \"hour\", \"minute\"]\n if all(getattr(ts, a) == getattr(upper_bound, a) for a in at):\n return 0\n elif ts < upper_bound:\n return -1\n elif ts > upper_bound:\n return 1", "def time_before(time_a, time_b=None) -> bool:\n if time_b is None:\n time_b = time_now()\n\n # make sure both times are floats\n time_a = float(date_to_epoch(time_a))\n time_b = float(date_to_epoch(time_b))\n return time_a < time_b", "def native_value(self) -> float:\n if (self.coordinator.data is None) or (self._last_updated is not None and \"last_updated\" in self.coordinator.data and self._last_updated > self.coordinator.data[\"last_updated\"]):\n self._attributes[\"last_updated_timestamp\"] = self._last_updated\n return self._state\n \n self._attributes[\"last_updated_timestamp\"] = self.coordinator.data[\"last_updated\"]\n self._state = self.coordinator.data[\"charge_limit_weekday\"]\n \n return self._state", "def less_than(self) -> global___Expression:", "def _comparison_function(comp, value=0.0, **kwargs):\n if comp == 'g' or comp == '>':\n func = np.greater\n elif comp == 'ge' or comp == '>=':\n func = np.greater_equal\n elif comp == 'l' or comp == '<':\n func = np.less\n elif comp == 'le' or comp == '<=':\n func = np.less_equal\n elif comp == 'e' or comp == '=' or comp == '==':\n func = np.equal\n elif comp == 'ne' or comp == '!=':\n func = np.not_equal\n else:\n raise ValueError(\"Unrecognized comparison '{}'.\".format(comp))\n\n def comp_func(xx):\n return func(xx, value, **kwargs)\n\n return comp_func", "def report_status(scheduled_time, estimated_time):\n if scheduled_time == estimated_time:\n return 'on time'\n elif scheduled_time > estimated_time:\n return 'early'\n else:\n return 'delayed'", "def is_before(self,other_date):", "def REAL_TIME_ADVANCE(dt):", "def __gt__(self, other):\n self_list = self.date.split(\"/\")\n other_list = other.date.split(\"/\")\n if self_list[2] > other_list[2]:\n return True\n else:\n if self_list[2] == other_list[2]:\n if self_list[1] > other_list[1]:\n return True\n elif self_list[1] == other_list[1]:\n if self_list[0] > other_list[0]:\n return True\n return False", "def __gt__(self, other):\n return self._metric_value > other.metric_value()", "def last_checked(self):\n\t\treturn self.current().time", "def greater_than(self) -> global___Expression:", "def ge(self, val):\n\t\treturn GreaterOrEquals(self, val)", "def when(self):\n\n # current UTC time\n now = datetime.datetime.utcnow()\n # calculate timedelta and return\n return now - self.creation_time", "def _greater_than_or_equal_to_op(spec):", "def test_since(self):\n import datetime\n dt1 = datetime.datetime(2013, 12, 15, 10, 10, 10)\n dt2 = datetime.datetime(2013, 12, 15, 10, 11, 10)\n\n check_list = health.CheckList(refresh=1)\n check_list._refreshed_at = dt1\n\n mock_datetime = self.mocker.replace(datetime)\n mock_datetime.datetime.now()\n self.mocker.result(dt2)\n self.mocker.replay()\n\n self.assertEqual(check_list.since(), '0:01:00')", "def comparison(self):\n return self._comparison", "def __gt__(self, other):\n if isinstance(other, float):\n return self.floatvalue > other\n else:\n return not self.negative and not self == other", "def __gt__(self, value):\n self = self.__ge__(value)\n return self.__invert__()", "def deciding(self):\n\n if not self.db.cacheEmpty():\n cacheMsgs = self.db.getCacheMsgs()\n prev = datetime.datetime.min\n prev_location = \"FOO LOCATION\"\n for msg in cacheMsgs:\n neutrinoTime = msg[\"neutrino_time\"]\n # go through messages to check if any two or more are within the time threshold\n if neutrinoTime - datetime.timedelta(seconds=self.coinc_threshold) <= prev:\n # verify the locations are different\n if msg[\"location\"] != prev_location:\n return True\n prev = neutrinoTime\n prev_location = msg[\"location\"]\n return False\n\n # return not self.db.cacheEmpty()", "def acceptable(self):\n now = datetime.datetime.now()\n origin = datetime.datetime.combine(self.date, datetime.time.min)\n start = origin + datetime.timedelta(hours=6)\n end = origin + datetime.timedelta(days=1)\n morning = end + datetime.timedelta(hours=6)\n if now < origin or now > morning:\n return 0\n if now >= end or now <= start:\n return 1\n return 3", "def time_after(time_a, time_b=None) -> bool:\n if time_b is None:\n time_b = time_now()\n\n # make sure both times are floats\n time_a = float(date_to_epoch(time_a))\n time_b = float(date_to_epoch(time_b))\n return time_a > time_b", "def debugTest(self):\n startTime = datetime.today()\n serverTzInfo = self.serverTimeZone\n startTime = startTime.replace(tzinfo=serverTzInfo)\n self.notify.info('startTime = %s' % startTime)\n serverTime = self.getCurServerDateTime()\n self.notify.info(\"serverTime = %s\" % serverTime)\n result = startTime <= serverTime\n self.notify.info(\"start < serverTime %s\" % result)\n startTime1MinAgo = startTime + timedelta(minutes = -1)\n self.notify.info('startTime1MinAgo = %s' % startTime1MinAgo)\n result2 = startTime1MinAgo <= serverTime\n self.notify.info(\"startTime1MinAgo < serverTime %s\" % result2)\n serverTimeForComparison = self.getCurServerDateTimeForComparison()\n self.notify.info(\"serverTimeForComparison = %s\" % serverTimeForComparison)\n result3 = startTime1MinAgo <= serverTimeForComparison\n self.notify.info(\"startTime1MinAgo < serverTimeForComparison %s\" % result3)", "def __gt__(self, other):\n\t\ttry:\n\t\t\treturn self.val > other.val\n\t\texcept:\n\t\t\treturn self.val > other", "def compare(self, value: int, /) -> None:", "def _update_time(self):\n if self.time.year != datetime.datetime.now().year or self._this_year is None:\n self._this_year = _data.this_year(self.df, 'case_timestamp')\n if self.time.month != datetime.datetime.now().month or self._this_month is None:\n self._this_month = _data.this_month(self.df, 'case_timestamp')\n if self.time.day != datetime.datetime.now().day or self._today is None:\n self._today = _data.today(self.df, 'case_timestamp')\n self.time = datetime.datetime.now()", "def check_last_update(self):\n now = self.get_clock().now()\n diff_L = (now - self.last_stamp_L).nanoseconds * 1e-9\n diff_R = (now - self.last_stamp_R).nanoseconds * 1e-9\n if diff_L > 0.1:\n self.duty_left = 0.0\n if diff_R > 0.1:\n self.duty_right = 0.0", "def __gt__(self, other):\n return self.__f > other.get_f()", "def is_after(t1,t2):\n return (t1.hour, t1.minute, t1.second) > (t2.hour, t2.minute, t2.second)", "def curr_time():\r\n try:\r\n curr_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n except Exception as e:\r\n print(e)\r\n curr_time = False\r\n return curr_time", "def __ge__( self, value ):\r\n\t\treturn ( self > value ) or ( self == value )", "def evaluate_stopping_condition(self, current_value: Union[float, int, np.float64, np.ndarray]):\n\n if self.__reference_value is not None:\n\n if type(current_value) in [float, int, np.float64]:\n if not self.__smaller_value_required:\n if not self.__equal_required:\n return current_value > self.__reference_value\n else:\n return current_value >= self.__reference_value\n else:\n if not self.__equal_required:\n return current_value < self.__reference_value\n else:\n return current_value <= self.__reference_value\n\n elif type(current_value) == np.ndarray:\n if not self.__smaller_value_required:\n if not self.__equal_required:\n return (current_value > self.__reference_value).all()\n else:\n return (current_value >= self.__reference_value).all()\n else:\n if not self.__equal_required:\n return (current_value < self.__reference_value).all()\n else:\n return (current_value <= self.__reference_value).all()\n\n else:\n raise NotImplementedError\n\n else:\n return False", "def statusCompare (x, y):\n xs = db.status.get(x, 'order')\n ys = db.status.get(y, 'order')\n c = float(xs) - float(ys)\n if c >= 0.0: \n return int(c)\n else:\n return -int(abs(c))", "def test_larger_rhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = datetime(2012, 9, 20, 2, 59)\n rhs = datetime(2012, 9, 20, 3, 00)\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(rhs, result)", "def update_waiting(self):\n if self.get_value(0) is not None and self.get_value(1) is not None:\n if self.name == \"greater\":\n self.set_value(self.get_value(0) > self.get_value(1), 0)\n if self.name == \"greater or equal\":\n self.set_value(self.get_value(0) >= self.get_value(1), 0)\n if self.name == \"less\":\n self.set_value(self.get_value(0) < self.get_value(1), 0)\n if self.name == \"less or equal\":\n self.set_value(self.get_value(0) <= self.get_value(1), 0)\n if self.name == \"not equal\":\n self.set_value(self.get_value(0) != self.get_value(1), 0)\n if self.name == \"xor\":\n self.set_value(bool(self.get_value(0)) ^ bool(self.get_value(1)), 0)\n self.state = ACTIVE", "def _get_current_time_if_none(given_time):\n\t\treturn given_time or time.time()", "def value_equal_keyvalue(attr, current_time=False):\n anim_val = get_anim_value_at_current_frame(attr)\n if current_time:\n val = cmds.getAttr(attr, time=current_time)\n else:\n val = cmds.getAttr(attr)\n if anim_val == val:\n return True", "def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds", "def compare_dates(date1, date2, flag):\n if date1 > date2:\n if flag == \"l\":\n return date1\n return date2\n if flag == \"l\":\n return date2\n return date1", "def _get_comparison_func(self, adjective):\n return self.SONG_ADJECTIVES.get(adjective, {}).get(\"comparison\")", "def above_threshold(self, value):\n # We use floating point number here so we have to take care\n return finf(value,self.min) or finf(self.max,value)", "def __le__(self, other):\n return self.timestamp <= other.timestamp", "def getValueAt(self, time):\n for tvp in self.timeValuePairs:\n if time <= tvp[0]:\n return tvp[1]\n return self.defaultValue", "def __cmp__(self, other):\n if not isinstance(other, datetime):\n types = (type(other), datetime)\n raise TypeError('Type mismatch: %s not instance of %s' % types)\n # pylint: disable=protected-access\n return (self._cmp(self._days, other._days)\n or self._cmp(self.seconds, other.seconds)\n or self._cmp(self.nanosecond, other.nanosecond))", "def dynamic_comparison(v1, op, v2):\n assert op in ['gt', 'lt']\n\n operator_map = {'gt': operator.gt,\n 'lt': operator.lt}\n\n return operator_map[op](v1, v2)", "def ge(self, y):\n return 1 - self.lt(y)", "def less_equal(value, other):\n return value >= other", "def test_equal_inputs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = rhs = datetime(2012, 9, 20, 2, 59)\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(rhs, result)\n self.assertIs(lhs, result)", "def check_compare(change, reference_value):\n rounded_change = round(change, 2)\n compare_values(reference_value, rounded_change)", "def _greater_than_op(spec):", "def _get_delta(self, now, then):\n if now.__class__ is not then.__class__:\n now = datetime.date(now.year, now.month, now.day)\n then = datetime.date(then.year, then.month, then.day)\n if now < then:\n raise ValueError(\"Cannot determine moderation rules because date field is set to a value in the future\")\n return now - then", "def currentTime(*args, update: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[time, Any]:\n pass", "def lessThanEqualTo(self, t):\n if t is None:\n return False\n if isinstance(t, (float, int)):\n return self._micros <= long(t * 1000000)\n else:\n return self._micros <= t._micros", "def get_now():\n return datetime.now()", "def get_now():\n return datetime.now()", "def __ge__(self, other):\n self.conds.append((self.name, '>=', other))\n return self\n return self.name, '>=', other", "def __get_timeval():\n return convert_timeval(time.time())", "def absulute2relative_time(x): \n if x.viewed:\n x.viewed_reltime=x.viewed_time-x.start\n \n if x.completed:\n x.completed_reltime=x.completed_time-x.start\n \n return x", "def ComputeTimeReward(self, currentTime, expectedTime):\r\n return (expectedTime - currentTime) * 1 if currentTime < expectedTime else (expectedTime - currentTime) * 1", "def _get_half_time(self):\n return self.__half_time", "def lessThan(self, t):\n if t is None:\n return False\n if isinstance(t, (float, int)):\n return self._micros < long(t * 1000000)\n else:\n return self._micros < t._micros", "def new_value(self):\n on_val = get_usable_value(self._momentary_mode_on_prop)\n follow_val = get_usable_value(self._momentary_follow_sense_prop)\n on_off_val = get_usable_value(self._momentary_on_off_trigger_prop)\n\n new_value = _calc_relay_mode(on_val, follow_val, on_off_val)\n if new_value == self.value:\n return None\n return new_value", "def __gt__(self, other):\n return self.greaterThan(other)", "def __ge__(self, other):\n # self >= other\n return self.runtime.greater_than_equal(self, other)", "def is_after(t1, t2):\n return (t1.hour, t1.minute, t1.second) > (t2.hour, t2.minute, t2.second)", "def match(self, dt):\n logic_map = {\n CLOSED_CLOSED: ((self.start is None or dt >= self.start) and\n (self.end is None or dt <= self.end)),\n CLOSED_OPEN: ((self.start is None or dt >= self.start) and\n (self.end is None or dt < self.end)),\n OPEN_CLOSED: ((self.start is None or dt > self.start) and\n (self.end is None or dt <= self.end)),\n OPEN_OPEN: ((self.start is None or dt > self.start) and\n (self.end is None or dt < self.end)),\n }\n return logic_map[self.interval]", "def __gt__(self, *args):\n return _ida_hexrays.cdo_t___gt__(self, *args)", "def check(self):\r\n boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed))\r\n\r\n if self.hourly and not self.last_executed:\r\n return 0\r\n \r\n if self.daily and not self.last_executed:\r\n if int(self.hour) == self.now.hour:\r\n return 0\r\n else:\r\n return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60\r\n\r\n delta = self.now - self.last_executed\r\n if self.hourly:\r\n if delta.seconds >= 60*60:\r\n return 0\r\n else:\r\n return 60*60 - delta.seconds\r\n else:\r\n if int(self.hour) == self.now.hour:\r\n if delta.days >= 1:\r\n return 0\r\n else:\r\n return 82800 # 23 hours, just to be safe\r\n else:\r\n return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60", "def _get_detection_time_multiplier(self):\n return self.__detection_time_multiplier", "def test_details_time(self):\n self.assertLess(self.details.time, datetime.now(timezone.utc))", "def compare_datetime(self_datetime, other_datetime):\n # pylint: disable=superfluous-parens\n if (isinstance(self_datetime and other_datetime, (datetime, type(None)))):\n return (\n (self_datetime == other_datetime\n if all(str(_.time()) != \"00:00:00\"\n for _ in [self_datetime, other_datetime])\n else self_datetime.date() == other_datetime.date())\n if self_datetime and other_datetime\n else self_datetime == other_datetime)\n else:\n Representation.attrs_values_types_error(\n self_attr=self_datetime, other_attr=other_datetime,\n expected_types=(datetime.__name__, type(None).__name__))", "def comparison(self) -> str:\n return self._values.get('comparison')", "def compare(date1,date2):\n d1,m1,y1 = breakdate(date1)\n d2,m2,y2 = breakdate(date2)\n if y2>y1:\n return -1\n elif y1>y2:\n return 1\n else:\n if m2>m1:\n return -1\n elif m1>m2:\n return 1\n else:\n if d2>d1:\n return -1\n elif d1>d2:\n return 1\n else:\n return 0", "def test_greater_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::gt\"},\n )" ]
[ "0.71395195", "0.68972015", "0.61192656", "0.6031985", "0.60295343", "0.5849544", "0.5812679", "0.5812679", "0.58069235", "0.56918633", "0.56755346", "0.5637127", "0.55914676", "0.558945", "0.557699", "0.55746406", "0.55669403", "0.5496197", "0.5487749", "0.54704833", "0.54651785", "0.54432815", "0.5394511", "0.5365986", "0.5359473", "0.53566545", "0.5320096", "0.5315645", "0.5310563", "0.5301557", "0.527551", "0.5273069", "0.5261676", "0.52586067", "0.52555126", "0.52424544", "0.52319825", "0.5230108", "0.52275896", "0.5226086", "0.52235067", "0.5222052", "0.5216998", "0.519802", "0.51860225", "0.51825964", "0.51818466", "0.51808065", "0.51784295", "0.5174853", "0.5174165", "0.51591575", "0.51574826", "0.51509833", "0.5137403", "0.5121277", "0.5118871", "0.511412", "0.5112014", "0.5104849", "0.5103911", "0.51010597", "0.5092621", "0.5087296", "0.5081318", "0.507838", "0.507586", "0.5069268", "0.5066894", "0.5063684", "0.50515413", "0.5049069", "0.5039244", "0.5037714", "0.50368106", "0.50357425", "0.5035702", "0.5033839", "0.50336504", "0.50320554", "0.50320554", "0.5030757", "0.50246775", "0.50211287", "0.50192016", "0.5017037", "0.5002551", "0.50016844", "0.50009704", "0.49993572", "0.4998586", "0.4993339", "0.4989118", "0.49890694", "0.49812198", "0.49782676", "0.49733946", "0.4968706", "0.49659705", "0.49650618" ]
0.517292
51
Return what we need to write into an XSL file for this type
def xslText(self): return u'<xsl:if test="normalize-space(./%s)">%s'\ '<xsl:value-of select="./%s"/>%s</xsl:if>' % \ (self.name, xslEscape(self.prefix), self.name, xslEscape(self.suffix))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, f):\n tree = f.build_etree(lxml=True)\n return self.xslt(tree)", "def process(self):\n try:\n f = StringIO.StringIO(self.content)\n dom = XTree.parse(f)\n xslt = XTree.parse(self.stylesheet)\n transform = XTree.XSLT(xslt)\n newdom = transform(dom)\n except IOError:\n print \"Xml or Xsl file not found!\"\n return False\n return XTree.tostring(newdom, pretty_print=True)", "def xslText(self):\n return TextFormat.xslText(self)", "def convert(self):\n self._convert()\n self._write_docx()", "def toxml(self) :\n\t\treturn self.doc.toxml()", "def _ooxml(self):\n # LOG: processing_type property\n self.set_property('processing_type', 'ooxml')\n try:\n doc = officedissector.doc.Document(self.src_path)\n except Exception:\n self.make_dangerous('invalid ooxml file')\n return\n # There are probably other potentially malicious features:\n # fonts, custom props, custom XML\n if doc.is_macro_enabled or len(doc.features.macros) > 0:\n self.make_dangerous('macro')\n if len(doc.features.embedded_controls) > 0:\n self.make_dangerous('activex')\n if len(doc.features.embedded_objects) > 0:\n # Exploited by CVE-2014-4114 (OLE)\n self.make_dangerous('embedded obj')\n if len(doc.features.embedded_packages) > 0:\n self.make_dangerous('embedded pack')", "def xslText(self):\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)", "def xslText(self):\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name", "def _write_docx(self):\n with ZipFile(self.out_file, 'w') as f:\n self._write_content_types(f)\n self._write_app(f)\n self._write_core(f)\n self._write_rels(f)\n self._write_document(f)\n self._write_fonts(f)\n self._write_document_rels(f)\n self._write_settings(f)\n self._write_styles(f)", "def create_output_file(self):\r\n self.output_file = openpyxl.Workbook()", "def _create_oai_xsl_template():\n oai_xsl_template = OaiXslTemplate()\n oai_xsl_template = _set_oai_xsl_template_fields(oai_xsl_template)\n\n return oai_xsl_template", "def createXML(whatToCreate):\n\n XMLSerializer = serializers.get_serializer(\"xml\")\n xml_serializer = XMLSerializer()\n if whatToCreate == \"allAccount\":\n path_fullToOutputFile = os.path.join(settings.PDF_OUTPUT_ROOT, \"accounts.xml\")\n objectsToSerialize = Account.objects.all()\n else:\n raise ProgrammingError(\n _(\"During XML Export it was not correctly specified which data that has to be exported\"))\n out = open(os.path.join(settings.PDF_OUTPUT_ROOT, \"accounts.xml\"), \"w\")\n if objectsToSerialize == '':\n raise NoObjectsToBeSerialzed(_(\"During XML Export it was not correctly specied data has to be exported\"))\n else:\n xml_serializer.serialize(objectsToSerialize, stream=out, indent=3)\n out.close()\n return path_fullToOutputFile\n\n # TODO def importAllAccountsXML(self):", "def export_to_xml(self, resource_fs):\r\n raise NotImplementedError('Modules must implement export_to_xml to enable xml export')", "def writeXml(self):\n text = u' type=\"%s\"' % self.typeName\n if self.format:\n text += u' format=\"%s\"' % escape(self.format, treedoc.escDict)\n if self.prefix:\n text += u' prefix=\"%s\"' % escape(self.prefix, treedoc.escDict)\n if self.suffix:\n text += u' suffix=\"%s\"' % escape(self.suffix, treedoc.escDict)\n if self.html:\n text += u' html=\"y\"'\n if self.isRequired:\n text += u' required=\"y\"'\n if self.hidden:\n text += u' hidden=\"y\"'\n if self.numLines > 1:\n text += u' lines=\"%d\"' % self.numLines\n if self.initDefault:\n text += u' init=\"%s\"' % escape(self.initDefault, treedoc.escDict)\n if self.linkAltField:\n text += u' linkalt=\"%s\"' % escape(self.linkAltField,\n treedoc.escDict)\n return text", "def xslText(self):\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name", "def xml(self):\n raise NotImplementedError('must be implemented by all subclasses')", "def _set_path_to_xml(self):\n\n self._path_to_xml = Path(__file__).parent / Path(XML_TABLE_PATH.format(self._instrument,\n self._original_file_type))\n\n if not self._path_to_xml.exists():\n raise ValueError(INVALID_FILE_TYPE_ERROR.format(self._original_file_type, self._instrument))", "def getXml(self):\n return _SALOMERuntime.InputXmlPort_getXml(self)", "def generateXML(self):\n return self.formatEval(\n self.TEMPLATES[self.attrs['name']]['XML'],\n self.attrs\n )", "def storeAndReturnXML(self):\n self._storeItems()\n return self.toXML()", "def get_xml(self):\n return etree.tostring(self.get_etree())", "def definition_to_xml(self, resource_fs):\r\n raise NotImplementedError(\r\n \"%s does not implement definition_to_xml\" % self.__class__.__name__)", "def create_gen_xml(self, out_file):\n\n param_list = []\n msg = []\n msg_type = []\n dep_node = []\n for line in self.full_ed_lines:\n param_list.append(line.text())\n dep_pkg = param_list[6].split(', ')\n if dep_pkg[len(dep_pkg) - 1] == '':\n dep_pkg.pop()\n for dep in self.manager.wid.sub_list:\n dep_node.append(dep['msg_type'])\n for dep in self.manager.wid.pub_list:\n dep_node.append(dep['msg_type'])\n for dep in dep_node:\n a, b = dep.split('/')\n msg.append(a)\n msg_type.append(b)\n f = open('../genkernel/templates/package_rosgen.xml')\n o = open(out_file, 'a')\n flag = 0\n while 1:\n line = f.readline()\n if not line: break\n for i in range(6):\n line = line.replace('[{0}]'.format(i), param_list[i])\n line = line.replace('[7]', param_list[7])\n if line.find('[6]') != -1:\n for dep in dep_pkg:\n line_dep = '\\t<depend>{0}</depend>\\n'.format(dep)\n o.write(line_dep)\n flag = 1\n elif line.find('[8]') != -1:\n for dep, tp in zip(msg, msg_type):\n line_dep = '\\t\\t<depend type=\"{1}\">{0}</depend>\\n'.format(dep, tp)\n o.write(line_dep)\n flag = 1\n elif line.find('<subscribers>') != -1:\n o.write('\\t\\t<subscribers>\\n')\n for sub in self.manager.wid.sub_list:\n o.write('\\t\\t\\t<sub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(sub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(sub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(sub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(sub['queue_size']))\n o.write('\\t\\t\\t</sub>\\n')\n o.write('\\t\\t</subscribers>\\n')\n flag = 1\n elif line.find('<publishers>') != -1:\n o.write('\\t\\t<publishers>\\n')\n for pub in self.manager.wid.pub_list:\n o.write('\\t\\t\\t<pub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(pub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(pub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(pub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(pub['queue_size']))\n o.write('\\t\\t\\t</pub>\\n')\n o.write('\\t\\t</publishers>\\n')\n flag = 1\n if flag == 0:\n o.write(line)\n else:\n flag = 0\n o.close()\n f.close()\n self.changed = False", "def to_xml_file(self, xml_file_path):\n s = self.to_xml()\n with open(xml_file_path, \"w+b\") as f:\n f.write(s)", "def saveToXml(self) -> org.jdom.Element:\n ...", "def getXML(self):\n\n def _getElementForMappingEntry(entry, mappingStyle):\n xmlDocTmp = Document()\n element = xmlDocTmp.createElement(mappingStyle)\n for k, v in viewitems(entry):\n # ignore empty, None or compiled regexp items into output\n if not v or (k == \"path-match-expr\"):\n continue\n element.setAttribute(k, str(v))\n return element\n\n xmlDoc = Document()\n root = xmlDoc.createElement(\"storage-mapping\") # root element name\n for mappingStyle, mappings in viewitems(self):\n for mapping in mappings:\n mapElem = _getElementForMappingEntry(mapping, mappingStyle)\n root.appendChild(mapElem)\n return root.toprettyxml()", "def write(self):\n temp_string = minidom.parseString(ET.tostring(self.root)).toprettyxml(encoding=\"UTF-8\")\n with open(self.xml_file, 'w') as f:\n f.write(temp_string)\n # f = open(self.xml_file, \"w\")\n # f.write(temp_string)\n # f.close()", "def write(self):\n filters = {\n 'MSGUID': lambda x: ('{%s}' % x).upper(),\n 'relslnfile': lambda x: os.path.relpath(x, os.path.dirname(self.FileName))\n }\n context = {\n 'sln': self\n }\n return self.render(self.__jinja_template__, self.FileName, context, filters)", "def generate(self):\n xml = self.template()\n fp = BytesIO()\n gzip = GzipFile(self.filename, \"wb\", 9, fp)\n if isinstance(xml, str):\n xml = xml.encode(\"utf8\")\n gzip.write(xml)\n gzip.close()\n data = fp.getvalue()\n fp.close()\n return data", "def to_xml(self) -> str:\n # default name and stuff setup\n element_root, xml_tree = super()._add_basics()\n element_root = element_root.find('elementProp')\n element_root = element_root.find('collectionProp')\n for element in list(element_root):\n try:\n if element.attrib['name'] == 'influxdbUrl':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.influx_db_url:\n elem.text = self.influx_db_url\n elif element.attrib['name'] == 'application':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.application:\n elem.text = self.application\n elif element.attrib['name'] == 'measurement':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.measurement:\n elem.text = self.application\n elif element.attrib['name'] == 'summaryOnly':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value':\n elem.text = str(self.summary_only).lower()\n elif element.attrib['name'] == 'samplersRegex':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.samplers_regexp:\n elem.text = self.samplers_regexp\n elif element.attrib['name'] == 'percentiles':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.percentiles:\n elem.text = self.percentiles\n elif element.attrib['name'] == 'testTitle':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.test_title:\n elem.text = self.test_title\n elif element.attrib['name'] == 'eventTags':\n for elem in list(element):\n if elem.attrib['name'] == 'Argument.value' and self.event_tags:\n elem.text = self.event_tags\n except Exception:\n raise Exception(f'Unable to render xml from {type(self).__class__}')\n return tree_to_str(xml_tree, hashtree=True)", "def test_assemble_xml_file_write(self):\n self.maxDiff = None\n\n fh = StringIO()\n worksheet = Worksheet()\n worksheet._set_filehandle(fh)\n cell_format = Format({\"xf_index\": 1})\n\n # No format. Should be ignored.\n worksheet.write(0, 0, None)\n\n worksheet.write(1, 2, None, cell_format)\n\n worksheet.select()\n worksheet._assemble_xml_file()\n\n exp = _xml_to_list(\n \"\"\"\n <?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n <worksheet xmlns=\"http://schemas.openxmlformats.org/spreadsheetml/2006/main\" xmlns:r=\"http://schemas.openxmlformats.org/officeDocument/2006/relationships\">\n <dimension ref=\"C2\"/>\n <sheetViews>\n <sheetView tabSelected=\"1\" workbookViewId=\"0\"/>\n </sheetViews>\n <sheetFormatPr defaultRowHeight=\"15\"/>\n <sheetData>\n <row r=\"2\" spans=\"3:3\">\n <c r=\"C2\" s=\"1\"/>\n </row>\n </sheetData>\n <pageMargins left=\"0.7\" right=\"0.7\" top=\"0.75\" bottom=\"0.75\" header=\"0.3\" footer=\"0.3\"/>\n </worksheet>\n \"\"\"\n )\n\n got = _xml_to_list(fh.getvalue())\n\n self.assertEqual(got, exp)", "def getXSLURL(self, output='page'):\n mn_props = getToolByName(self, \"portal_properties\")['metnav_properties']\n output = output.lower().strip()\n if output == 'page':\n return mn_props.getProperty('XSL_PAGE', '')\n if output == 'portlet':\n return mn_props.getProperty('XSL_PORTLET', '')\n if output == 'dict':\n return mn_props.getProperty('XSL_DICT', '')\n if output == 'count':\n return mn_props.getProperty('XSL_COUNT', '')\n if output == 'rss':\n return mn_props.getProperty('XSL_RSS', '')\n if output == 'class_list':\n return mn_props.getProperty('XSL_CLASS_LIST', '')\n if output == 'table':\n return mn_props.getProperty('XSL_TABLE', '')\n if output == 'year_list':\n return mn_props.getProperty('XSL_YEAR_LIST', '')\n raise \"%s is not a valid value for an XSL output\" % output", "def export_to_xml(self, resource_fs):\r\n\r\n # Set up runtime.export_fs so that it's available through future\r\n # uses of the pure xblock add_xml_to_node api\r\n self.runtime.export_fs = resource_fs\r\n\r\n # Get the definition\r\n xml_object = self.definition_to_xml(resource_fs)\r\n self.clean_metadata_from_xml(xml_object)\r\n\r\n # Set the tag so we get the file path right\r\n xml_object.tag = self.category\r\n\r\n # Add the non-inherited metadata\r\n for attr in sorted(own_metadata(self)):\r\n # don't want e.g. data_dir\r\n if attr not in self.metadata_to_strip and attr not in self.metadata_to_export_to_policy:\r\n val = serialize_field(self._field_data.get(self, attr))\r\n try:\r\n xml_object.set(attr, val)\r\n except Exception:\r\n logging.exception(\r\n u'Failed to serialize metadata attribute %s with value %s in module %s. This could mean data loss!!!',\r\n attr, val, self.url_name\r\n )\r\n\r\n for key, value in self.xml_attributes.items():\r\n if key not in self.metadata_to_strip:\r\n xml_object.set(key, value)\r\n\r\n if self.export_to_file():\r\n # Write the definition to a file\r\n url_path = name_to_pathname(self.url_name)\r\n filepath = self._format_filepath(self.category, url_path)\r\n resource_fs.makedir(os.path.dirname(filepath), recursive=True, allow_recreate=True)\r\n with resource_fs.open(filepath, 'w') as file:\r\n file.write(etree.tostring(xml_object, pretty_print=True, encoding='utf-8'))\r\n\r\n # And return just a pointer with the category and filename.\r\n record_object = etree.Element(self.category)\r\n else:\r\n record_object = xml_object\r\n\r\n record_object.set('url_name', self.url_name)\r\n\r\n # Special case for course pointers:\r\n if self.category == 'course':\r\n # add org and course attributes on the pointer tag\r\n record_object.set('org', self.location.org)\r\n record_object.set('course', self.location.course)\r\n\r\n return etree.tostring(record_object, pretty_print=True, encoding='utf-8')", "def transform_s3_xsl(**kwargs):\n access_id = kwargs.get(\"access_id\")\n access_secret = kwargs.get(\"access_secret\")\n bucket = kwargs.get(\"bucket\")\n dest_prefix = kwargs.get(\"destination_prefix\")\n source_prefix = kwargs.get(\"source_prefix\")\n if kwargs.get(\"dag\"):\n run_id = kwargs.get(\"dag\").dag_id\n else:\n run_id = \"no-dag-provided\"\n\n saxon = prepare_saxon_engine()\n transformed = etree.Element(\"collection\")\n transformed.attrib[\"dag-id\"] = run_id\n transformed.attrib[\"dag-timestamp\"] = kwargs.get(\"timestamp\", \"no-timestamp-provided\")\n xsl = \"https://raw.github.com/{repo}/{branch}/{filename}\".format(\n repo=kwargs.get(\"xsl_repository\", \"tulibraries/aggregator_mdx\"),\n branch=kwargs.get(\"xsl_branch\", \"main\"),\n filename=kwargs.get(\"xsl_filename\")\n )\n\n for s3_key in process.list_s3_content(bucket, access_id, access_secret, source_prefix):\n logging.info(\"Transforming File %s\", s3_key)\n s3_content = process.get_s3_content(bucket, s3_key, access_id, access_secret)\n s3_xml = etree.fromstring(s3_content)\n for record in s3_xml.iterchildren():\n record_id = record.get(\"airflow-record-id\")\n logging.info(\"Transforming Record %s\", record_id)\n result_str = subprocess.check_output([\"java\", \"-jar\", saxon, \"-xsl:\" + xsl, \"-s:-\"], input=etree.tostring(record, encoding=\"utf-8\"))\n result = etree.fromstring(result_str)\n result.attrib[\"airflow-record-id\"] = record_id\n transformed.append(result)\n filename = s3_key.replace(source_prefix, dest_prefix)\n transformed_xml = etree.tostring(transformed, encoding=\"utf-8\")\n process.generate_s3_object(transformed_xml, bucket, filename, access_id, access_secret)", "def writeXMLDecl(self):\n return _libsbml.XMLOutputStream_writeXMLDecl(self)", "def setXSLTMode(self,value):\n self.PDFreactorConfiguration.in1[\"XSLTMode\"] = value", "def save(self, filename=None):\n f = filename if filename else self.path\n etree.register_namespace('', TEI)\n etree.register_namespace('mith', MITH)\n self.doc.write(f, xml_declaration=True, encoding='utf-8', method='xml')", "def toXML(self):\n return self._xmlpre+\"\\n\".join(map(lambda f:f.toXML(),self._items))+self._xmlpost", "def to_xml(self):\n # lines = super(FileCatNoEmpty, self).cat(filepath)\n structure = super(Point, self).to_xml()\n\n\n coords = GeometryTopologyData.__to_xml_vector__(self.coordinate, self.format)\n # description_str = ''\n # if self.description is not None:\n # description_str = '<Description>%s</Description>' % self.description\n\n return '<Point>%s<Coordinate>%s</Coordinate></Point>' % (structure, coords)", "def write(klass, document, target=None, stylesheet=\"\"):\n writer = LatexWriter(document, target, stylesheet)\n return writer.go()", "def to_xml(self, file_name=None):\n if file_name:\n xml_file = open(file_name, 'w')\n results = ['<schema name=\"%s\">' % self.name]\n for schema_object in list(self.schema.keys()):\n results.append(self.schema[schema_object].to_xml())\n results.append('</schema>')\n if file_name:\n xml_file.write('\\n'.join(results))\n xml_file.close()\n else:\n return '\\n'.join(results)", "def write_xosc(self, generated_xml):\n reparsed_xml = minidom.parseString(generated_xml).toprettyxml(indent=\" \")\n xosc_file = open(self._filepath, \"w\")\n xosc_file.write(reparsed_xml)\n xosc_file.close()\n\n msg = QMessageBox()\n if self._warning_message:\n msg.setIcon(QMessageBox.Warning)\n text = f\"Exported OpenSCENARIO file {self._filepath} has warnings!\\n\\n\"\n text += \"\\n\".join(self._warning_message)\n else:\n msg.setIcon(QMessageBox.Information)\n text = f\"Successfully exported OpenSCENARIO file to {self._filepath}\"\n msg.setText(text)\n msg.setWindowTitle(\"OpenSCENARIO Export\")\n msg.setStandardButtons(QMessageBox.Ok)\n msg.exec()", "def get_xml(self):\n with io.StringIO() as string:\n string.write(ET.tostring(self.root, encoding=\"unicode\"))\n return string.getvalue()", "def start_serialization(self):\n self.xml = SimplerXMLGenerator(self.stream, self.options.get(\"encoding\", settings.DEFAULT_CHARSET))\n self.xml.startDocument()\n self.xml.startElement(\"xliff\", {\n \"version\": \"1.2\",\n \"xmlns\": \"urn:oasis:names:tc:xliff:document:1.2\",\n \"xmlns:d\": \"https://docs.djangoproject.com/\"\n })", "def XML_EC_PL(Name, InputsFile, OutputFile, emin,emax):\n\n\t#On commence par afficher ce qu'on fait\r\n\tprint \" Build xml file \"\r\n\r\tprint InputsFile\n\t#ouverture du fichier dans lequel on place le source model\n\ttry:\n\t\tfresult = open(OutputFile, 'w')\n\texcept:\n\t\tprint \"Coucou\"\r\n \t#ecriture des premieres lignes invariantes\n\tfresult.write('<?xml version=\"1.0\" ?>')\r\n\tfresult.write(\"<source_library title=\\\"source library\\\">\\n\")\n\r\n \t#ouverture du fichier avec les entrees\r\n\tf = open(InputsFile,\"r\")\r\n\tlines = f.readlines()\r\n\t\r\n \t#Ajout des sources detectees dans le catalogue\n\t#Pour chaque ligne du fichier d'entree\r\n\tfor line in range(len(lines)):\n\t\t#Lire les donnees de la ligne\t\t\r\n\t\tdata = lines[line].split()\r\n\t\tname = data[0]\n\n\t\t#Verification : est on en train de traiter la source que l'on veut etudier ou une autre ?\r\n\t\tif str(name) == Name :\r\n\t\t\tmysource = 1\r\n\t\telse:\r\n\t\t\tmysource = 0\n\n\t\t#recuperation des donnees\r\n\t\tRA = data[1]\r\n\t\tDEC = data[2]\r\n\t\tIntegral = float(data[3])*float(Frac)\r\n\t\tGamma= data[4]\n\n\t\t\r\n\t\ttry:\n\t\t\t#essai de definition des donnees pour un PL avec ExpCut\n\t\t\tPrefactor = float(data[5])*float(Frac)\r\n\t\t\tEnergy = float(data[6])\r\n\t#\t\tPrefactor = Prefactor/pow(Energy/100., float(Gamma)) #Densite de flux calculee a Epivot\r\n\t#\t\tPrefactor = Prefactor*pow(1000./100., float(Gamma)) #We do the calculation with (E/1000.)^Gamma\n\t\t\tvariabilite=float(data[8])\n\n#\t\t\tprint variabilite\n\n\n\n\r\n\t\t\tcut = float(data[7]) # Cut est la variable qui nous permettra de savoir si il faut utiliser un cut off (1) ou une loi de puissance normale (2)\r\n\t\texcept:\r\n\t\t\ttry:\r\n\t\t\t\tcut = float(data[5])\r\n\t\t\texcept:\r\n\t\t\t\tprint \" Wrong size of list \"\r\n\t\t\t\tsys.exit()\r\n \t#Si on considere un ccut off exponentiel pour la source :\r\n\t\tif cut == 1:\n\t\t\t#ecriture du nom de la source consideree\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\r\n\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\r\n\t\t\tspectrum_type = \"PLSuperExpCutoff\"\n\t\t\t#Utilisation de la modelisation PLSuperExpCutoff car plus simple et plus intuitive pour nous et pour la modelisation des pulsars si il faut en modeliser\n\r\n\t\t\t#definition des parametres spectraux a prendre en comtpe et de la chaine de caractere a integrer\r\n\n\n\n\t\t\tif variabilite==0.0 or variabilite==2.0:\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"10000000.0\\\" min=\\\"0.0000001\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\r\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.001\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\n\r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\t\t\telif variabilite==1.0 :\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10000000.0\\\" min=\\\"0.0\\\"\"\n\n\t\t\t\t#d'ou vient ce 1e-12\r\n\t\t\t\tIntegral = float(Prefactor)*1.0e10\r\n\t\t\t\tscale = 1.0e-10\n\n\t\t\t\tspectrum_lines += \" name=\\\"Prefactor\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index1\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"20000.0\\\" min=\\\"1.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Scale\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Energy)+\"\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"100.0\\\" min=\\\"0.0001\\\"\"\r\t\t\t\tspectrum_lines += \" name=\\\"Cutoff\\\" scale=\\\"1000.0\\\" value=\\\"30.0\\\"/>\\n\"\r\n \r\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\t\tspectrum_lines += \" name=\\\"Index2\\\" scale=\\\"1.0\\\" value=\\\"1.0\\\"/>\\n\"\n\n\r\n \r\n\n# <spectrum type=\"PLSuperExpCutoff\">\n# <parameter free=\"1\" max=\"100000\" min=\"0\" name=\"Prefactor\" scale=\"1e-10\" value=\"Prefactor*1e-10\"/>\n# <parameter free=\"1\" max=\"0\" min=\"5\" name=\"Index1\" scale=\"-1\" value=\"valeur du catalogue\"/>\n# <parameter free=\"0\" max=\"20000\" min=\"1.0\" name=\"Scale\" scale=\"1\" value=\"Epivot\"/>\n# <parameter free=\"1\" max=\"300000\" min=\"100\" name=\"Cutoff\" scale=\"1\" value=\"3000\"/>\n# <parameter free=\"0\" max=\"5\" min=\"0\" name=\"Index2\" scale=\"1\" value=\"1.5\"/>\n# </spectrum>\n\n\r\n\t\telse:\n\t\t#Sinon (si on considere une loi de puissance simple)\n\t\t#definition de la chaine de caractere comportant le nom de la source\r\n\t\t\tresult_line=\" <source \"\r\n\t\t\tresult_line += \"name=\\\"\"+name+\"\\\"\"\n\t\t\tif mysource == 0:\r\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\n\t\t\telse:\n\t\t\t\tresult_line += \" type=\\\"PointSource\\\">\\n\"\t\t\t\t\n\n\t\t\t#definition de la chaine de caractere correspondant a la forme de fit que l'on souhaite utiliser (Loi de puissance)\r\n\t\t\tspectrum_type = \"PowerLaw2\"\r\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre Integrale\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\r\n\t\t\telse:\n\t\t\t#sinon on le libere\r\n\t\t\t\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"1000000.0\\\" min=\\\"0.0\\\"\"\n\n\n\n\n\n\t\t\t#Toujours ce facteur....\r\n\t\t\tIntegral = float(Integral)*1e10\r\n\t\t\tscale = 1e-10\n\n\n\t\n\r\n\t\t\tspectrum_lines += \" name=\\\"Integral\\\" scale=\\\"\"+str(scale)+\"\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Integral)+\"\\\" />\\n\"\n\r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t\t\t\t#si ce n'est pas la source que l'on etudie on fige le parametre gamma\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\r\n\t\t\telse:\n\t\t\t\t#si c'est pas la source que l'on etudie on le laisse libre\r\n\t\t \t\tspectrum_lines += \" <parameter free=\\\"1\\\" max=\\\"5.0\\\" min=\\\"0.\\\"\"\n\n\t\t\t#fin de la chaine de parametres sur le modele spectral\r\n\t\t\tspectrum_lines += \" name=\\\"Index\\\" scale=\\\"-1.0\\\" value=\\\"\"\r\n\t\t\tspectrum_lines += str(Gamma)+\"\\\"/>\\n\"\r\n \r\n\t\t\tif mysource == 0 and variabilite!=1.0:\n\t \n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"1000.0\\\"/>\\n\"\r\n \r\n\t\t\t spectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"1000000.0\\\" min=\\\"20.0\\\"\"\r\n\t\t\t spectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\t\t\telse:\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"200000.0\\\" min=\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"LowerLimit\\\" scale=\\\"1.0\\\" value=\\\"100\\\"/>\\n\"\n\n\t\t\t\tspectrum_lines += \" <parameter free=\\\"0\\\" max=\\\"100000.0\\\" Min =\\\"20.0\\\"\"\n\t\t\t\tspectrum_lines += \" name=\\\"UpperLimit\\\" scale=\\\"1.0\\\" value=\\\"100000.0\\\"/>\\n\"\n\n \t\t#ajout du modele spectral a la liste de parametres \r\n\t\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\t\tresult_line += spectrum_lines\r\n\t\tresult_line += \" </spectrum>\\n\"\n\n\t\t\n\n\t\tif mysource==0 and variabilite!=1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telif mysource==0 and variabilite==1.0:\n \t\t\t#ajout du modele spatial a la liste de parametres \r\n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\r\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\r\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\r\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\telse:\n #ajout du modele spatial a la liste de parametres \n\t\t\tresult_line += \" <spatialModel type=\\\"SkyDirFunction\\\">\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"360\\\" min=\\\"-360\\\"\"\n\t\t\tresult_line += \" name=\\\"RA\\\" scale=\\\"1\\\" value=\\\"\"+RA+\"\\\"/>\\n\"\n\t\t\tresult_line += \" <parameter free=\\\"1\\\" max=\\\"90\\\" min=\\\"-90\\\"\"\n\t\t\tresult_line += \" name=\\\"DEC\\\" scale=\\\"1\\\" value=\\\"\"+DEC+\"\\\"/>\\n\"\n\t\t\tresult_line += \" </spatialModel>\\n\"\n\t\t\t\n\t\tresult_line += \" </source>\\n\"\r\n\t\tfresult.write(result_line+\"\\n\")\r\n #Ajout du fond diffus galactique\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"gal_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"ConstantValue\"\r\n\r\n\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Value\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n\r\n\tresult_line += \" <spatialModel file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/gll_iem_v02.fit\\\" type=\\\"MapCubeFunction\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"1000.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Normalization\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\r\n \t#Ajout du fond diffus extragalactique\r\n\tresult_line=\" <source \"\r\n\tresult_line += \"name=\\\"eg_v02\\\"\"\r\n\tresult_line += \" type=\\\"DiffuseSource\\\">\\n\"\r\n\tspectrum_type = \"FileFunction\"\r\n\r\tspectrum_lines = \" <parameter free=\\\"1\\\" max=\\\"10.0\\\" min=\\\"0\\\"\"\r\n\tspectrum_lines += \" name=\\\"Normalization\\\" scale=\\\"1.0\\\" value=\\\"\"+str(Frac)+\"\\\" />\\n\"\r\n\r\n\tresult_line += \" <spectrum file=\\\"/nfs/farm/g/glast/u31/marianne/VelaX/July09_Pointed/isotropic_iem_v02.txt\\\" type=\\\"\"+spectrum_type+\"\\\">\\n\"\r\n\tresult_line += spectrum_lines\r\n\tresult_line += \" </spectrum>\\n\"\r\n \r\n\tresult_line += \" <spatialModel type=\\\"ConstantValue\\\">\\n\"\r\n\tresult_line += \" <parameter free=\\\"0\\\" max=\\\"100.0\\\" min=\\\"0.0\\\"\"\r\n\tresult_line += \" name=\\\"Value\\\" scale=\\\"1\\\" value=\\\"1.0\\\"/>\\n\"\r\n\tresult_line += \" </spatialModel>\\n\"\r\n\tresult_line += \" </source>\\n\"\r\n\tfresult.write(result_line+\"\\n\")\r\n\n \t#Fermeture des fichiers \r\n\tf.close() \r\n\tfresult.write(\"\\n</source_library>\\n\")\r\n\tfresult.close()\r\n\treturn", "def get(self):\n return _SALOMERuntime.OutputXmlPort_get(self)", "def toXML(self):\n return _libsbml.Layout_toXML(self)", "def main():\n\n parser = argparse.ArgumentParser(description='NetXML to XSLX')\n parser.add_argument(\n 'input', nargs='+',\n help='One or more netxml files, space separated.'\n )\n parser.add_argument(\n '-o', '--output', metavar='output', default='netxml.xlsx',\n help='Output file path. Defaults to \"./netxml.xslx\" if left blank.'\n )\n parser.add_argument(\n '-d', '--dir', action='store_true', default=False,\n help=(\n 'Use when supplying a directory of netxml files instead of direct '\n 'file references.'\n )\n )\n args = parser.parse_args()\n\n out_path = Path(args.output).resolve()\n if out_path.is_file():\n print(f'{out_path} already exists. Quitting...')\n return\n\n input_paths = []\n for i in args.input:\n try:\n input_path = Path(i).resolve()\n except Exception as e:\n print(\n f'Could not resolve the file path for {i}. It will be skipped'\n )\n if args.dir and input_path.is_dir():\n netxml_glob = input_path.glob('*.netxml')\n for path in netxml_glob:\n input_paths.append(path)\n elif not input_path.is_file():\n thing = 'directory' if args.dir else 'file'\n print(f'{i} is not a {thing}. Skipping...')\n continue\n else:\n input_paths.append(input_path)\n # Create the in-memory Excel Workbook\n wb = Workbook()\n networks_sheet = wb.active\n networks_sheet.title = 'Wireless Networks'\n # Add the title row for the networks WorkSheet\n networks_sheet.append(\n [\n 'BSSID', 'ESSID', 'Hidden', 'Channel', 'Signal Strength', 'Open',\n 'WEP', 'WPA', 'WPA2', 'WPS', 'Auth', 'TKIP', 'AES', 'Manufacturer',\n 'No. Clients', 'Latitude', 'Longitude'\n ]\n )\n clients_sheet = wb.create_sheet(title='Clients')\n # Add the title row for the clients WorkSheet\n clients_sheet.append(\n ['MAC', 'Manufacturer', 'Signal Strength', 'BSSID', 'ESSID']\n )\n networks_list = []\n clients_list = []\n for input_path in input_paths:\n _parse_netxml(input_path, networks_list, clients_list)\n # Ensure no duplicates end up in the spreadsheets.\n unique_networks = set(networks_list)\n unique_clients = set(clients_list)\n\n # Add the results of all files to the spreadsheets\n for row in unique_networks:\n networks_sheet.append(row)\n\n # Add total row\n for row in unique_clients:\n clients_sheet.append(row)\n # Turn the resulting tables in to Excel \"Tables\"\n _create_table(networks_sheet, 'Networks')\n _create_table(clients_sheet, 'Clients')\n # Create totals WorkSheet\n totals_sheet = wb.create_sheet(title='Totals')\n _populate_totals(totals_sheet, networks_sheet)\n _create_table(totals_sheet, 'Totals')\n\n wb.save(str(out_path))", "def write(self):\r\n for prop in self.prpnames:\r\n elem = SubElement(self._root, prop)\r\n data = self.__getattribute__(prop)\r\n if self.prpnames[prop]['type'] == \"text\":\r\n elem.text = data\r\n elif self.prpnames[prop]['type'] == 'list':\r\n for x in data:\r\n SubElement(elem, 'regel').text = x\r\n elif self.prpnames[prop]['type'] == 'attr':\r\n elem.set(self.prpnames[prop]['naam'], data)\r\n tree = ElementTree(self._root)\r\n tree.write(self._fn)\r\n if not self.exists:\r\n self.exists = True", "def write(self, file_or_filename):\n etMap = revert(self)\n xmlTree = ET.ElementTree(etMap)\n xmlTree.write(file_or_filename)", "def save_to_xml(self, xwriter):\r\n\r\n xwriter.WriteStartElement(\"Profile\")\r\n xwriter.WriteAttributeString(\"Name\", self.Name)\r\n xwriter.WriteStartAttribute(\"Version\")\r\n xwriter.WriteValue(self.Version)\r\n xwriter.WriteEndAttribute()\r\n\r\n for var_name in self.__dict__:\r\n var_type = type(getattr(self, var_name))\r\n\r\n if var_type is str and var_name != \"Name\":\r\n self.write_string_to_xml(var_name, xwriter)\r\n\r\n elif var_type is bool:\r\n self.write_bool_to_xml(var_name, xwriter)\r\n\r\n elif var_type is dict:\r\n self.write_dict_to_xml(var_name, xwriter)\r\n\r\n elif var_type is list and var_name != \"ExcludeRules\":\r\n self.write_list_to_xml(var_name, xwriter)\r\n\r\n xwriter.WriteStartElement(\"ExcludeRules\")\r\n xwriter.WriteAttributeString(\"Operator\", self.ExcludeOperator)\r\n xwriter.WriteAttributeString(\"ExcludeMode\", self.ExcludeMode)\r\n for rule in self.ExcludeRules:\r\n if rule:\r\n rule.save_xml(xwriter)\r\n xwriter.WriteEndElement()\r\n \r\n xwriter.WriteEndElement()", "def xmlWrite(self, xmlWriter, font, value, name, attrs):\n raise NotImplementedError(self)", "def writeXMLNS(self, *args):\n return _libsbml.ASTBasePlugin_writeXMLNS(self, *args)", "def write_output_file(self, xml_text, xml_file):\n xml_fo = open(xml_file, 'w')\n xml_fo.write(xml_text+'</xml>')\n xml_fo.close()\n return", "def _generate_xml(self, body, destn_dir, nodes=True):\n fn = ''.join([random.choice(string.ascii_letters) for _ in range(12)])\n fn += '.xml'\n\n _dir = os.path.dirname(os.path.abspath(__file__))\n _tmpl = 'multi_node.template' if nodes else 'single_node.template'\n _env = Environment(autoescape=False,\n loader=FileSystemLoader(_dir),\n trim_blocks=False)\n\n with open(fn, 'w+') as f:\n o = _env.get_template(_tmpl).render(body)\n f.write(o)\n\n _d = destn_dir + '/' + fn\n self._remote_copy(fn, _d)\n # Remove the XML file created locally\n os.remove(fn)\n\n return _d", "def dumps(records, xslt_filename=None, **kwargs):\n root = dumps_etree(records=records, xslt_filename=xslt_filename)\n return etree.tostring(\n root,\n pretty_print=True,\n xml_declaration=True,\n encoding='UTF-8',\n **kwargs\n )", "def xml(self):\n raise NotImplementedError('This api does not return xml')", "def _writeXML(self,output,outputDictionary):\n if self.dynamic:\n outputInstance = xmlUtils.DynamicXmlElement('MetricPostProcessor', pivotParam=self.pivotParameter)\n else:\n outputInstance = xmlUtils.StaticXmlElement('MetricPostProcessor')\n if self.dynamic:\n for key, values in outputDictionary.items():\n assert(\"|\" in key)\n metricName, nodeName = key.split('|')\n for ts, pivotVal in enumerate(self.pivotValues):\n if values.shape[0] == 1:\n outputInstance.addScalar(nodeName, metricName,values[0], pivotVal=pivotVal)\n else:\n outputInstance.addScalar(nodeName, metricName,values[ts], pivotVal=pivotVal)\n else:\n for key, values in outputDictionary.items():\n assert(\"|\" in key)\n metricName, nodeName = key.split('|')\n if len(list(values)) == 1:\n outputInstance.addScalar(nodeName, metricName, values[0])\n else:\n self.raiseAnError(IOError, \"Multiple values are returned from metric '\", metricName, \"', this is currently not allowed\")\n return outputInstance", "def writeXml(self):\n curdir = os.getcwd()\n os.chdir(self.Imagedir)\n allImageLists = [self.sciImlist, self.ctxImlist, self.wgtImlist, self.rmsImlist]\n \n for imlist in allImageLists:\n for im in imlist:\n file = xmlUtil.markupImage(im,dataset=self.obsName)\n \n # Don't write these images as output of this module, which\n # really doesn't have any.\n \n #if file not in self.outputList.keys():\n # self.outputList[file] = [im]\n \n os.chdir(curdir)\n return", "def docType():\n return (u'<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'\n u'<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 '\n u'Transitional//EN\" '\n u'\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\\n')", "def to_xml(self, scene_dir: str) -> Tuple[Et.Element, bool]:\n raise NotImplementedError", "def save_xml_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n step = len(column_names)\n\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"XML\", \"*.xml\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.xml')\n data = import_lst\n\n if len(data[0]) == step:\n pass\n else:\n data = import_lst[step::]\n\n data2 = list(map(list, zip(*data)))\n\n data3 = {key: value for key, value in zip(column_names, data2)}\n\n column = list(data3.keys())\n\n df = pd.DataFrame(data3, columns=column)\n\n data_dict = df.to_dict(orient=\"records\")\n with open('output.json', \"w+\") as f:\n json.dump(data_dict, f, indent=4)\n\n xml_data = dicttoxml(data_dict).decode()\n with open(save_name, \"w+\") as f:\n f.write(xml_data)\n\n data.clear()\n data2.clear()\n data3.clear()", "def write(self, filename, agg_dir):\n super().write(filename)\n\n if self.aggregation:\n agg = self.aggregation\n abs_subdir = os.path.join(agg_dir, agg.sub_dir)\n if not os.path.isdir(abs_subdir):\n os.makedirs(abs_subdir)\n\n agg.xml_element.write(os.path.join(abs_subdir, agg.basename))", "def Write(self):\n f = open(self.project_path, 'wt')\n self.doc.writexml(f,\n encoding='Windows-1252',\n addindent=' ',\n newl='\\n')\n f.close()", "def to_sbml(self, params):\n files = {}\n _id, cobra_model = self._ws_obj_to_cobra(params['input_ref'])\n files['file_path'] = os.path.join(params['destination_dir'], _id + \".xml\")\n cobra.io.write_sbml_model(cobra_model, files['file_path'])\n\n return _id, files", "def write2file(self, save_to):\n headerstyle = xlwt.easyxf(self.header_style.get_style_string())\n missing_val_style = xlwt.easyxf(\n self.missing_value_style.get_style_string())\n row_styles = [xlwt.easyxf(self.first_style.get_style_string()),\n xlwt.easyxf(self.second_style.get_style_string())]\n\n properties, sections, table = self._build_table()\n\n workbook = xlwt.Workbook()\n sheet = workbook.add_sheet(self.sheet_name)\n\n if os.path.splitext(save_to)[-1] == '':\n save_to += '.xls'\n\n max_col_len = []\n\n if (self.switch):\n\n for i, prop in enumerate([''] + properties):\n sheet.write(0, i, prop, headerstyle)\n max_col_len.append(len(str(prop)))\n\n for row_num, sec in enumerate(sections):\n sheet.write(row_num + 1, 0, sec, headerstyle)\n if len(str(sec)) > max_col_len[0]:\n max_col_len[0] = len(str(sec))\n\n for row_num, row in enumerate(table):\n for col_num, elem in enumerate(row):\n\n if elem is None:\n style = missing_val_style\n cell_content = \"\"\n else:\n style = row_styles[row_num % 2]\n cell_content = elem\n\n if isinstance(cell_content, datetime.datetime):\n style.num_format_str = \"DD-MM-YYYY HH:MM:SS\"\n elif isinstance(cell_content, datetime.date):\n style.num_format_str = \"DD-MM-YYYY\"\n elif isinstance(cell_content, datetime.time):\n style.num_format_str = \"HH:MM:SS\"\n else:\n style.num_format_str = \"\"\n\n sheet.write(row_num + 1, col_num + 1, cell_content, style)\n if len(str(cell_content)) > max_col_len[col_num+1]:\n max_col_len[col_num+1] = len(str(cell_content))\n\n else:\n\n for i, sec in enumerate([''] + sections):\n sheet.write(0, i, sec, headerstyle)\n max_col_len.append(len(str(sec)))\n\n for row_num, prop in enumerate(properties):\n sheet.write(row_num + 1, 0, prop, headerstyle)\n if len(str(prop)) > max_col_len[0]:\n max_col_len[0] = len(str(prop))\n\n for col_num, col in enumerate(table):\n for row_num, elem in enumerate(col):\n\n if elem is None:\n style = missing_val_style\n cell_content = \"\"\n else:\n style = row_styles[row_num % 2]\n cell_content = elem\n\n if isinstance(cell_content, datetime.datetime):\n style.num_format_str = \"DD-MM-YYYY HH:MM:SS\"\n elif isinstance(cell_content, datetime.date):\n style.num_format_str = \"DD-MM-YYYY\"\n elif isinstance(cell_content, datetime.time):\n style.num_format_str = \"HH:MM:SS\"\n else:\n style.num_format_str = \"\"\n\n sheet.write(row_num + 1, col_num + 1, cell_content, style)\n if len(str(cell_content)) > max_col_len[col_num+1]:\n max_col_len[col_num+1] = len(str(cell_content))\n\n # adjust width of he columns\n for col_id, col_len in enumerate(max_col_len):\n sheet.col(col_id).width = (256 * (col_len+1))\n\n workbook.save(save_to)", "def writeToTempXml(self):\n name = self.fileToProcess.name\n all_tokens = ET.Element(\"tokens\")\n for token in self.tokensTable:\n if token.getType() == KEYWORD:\n keyword = ET.SubElement(all_tokens, \"keyword\")\n keyword.text = ' '+token.getValue()+' '\n elif token.getType() == IDENTIFIER:\n identifier = ET.SubElement(all_tokens, \"identifier\")\n identifier.text = ' '+token.getValue()+' '\n elif token.getType() == SYMBOL:\n symbol = ET.SubElement(all_tokens, \"symbol\")\n symbol.text = ' '+token.getValue()+' '\n elif token.getType() == STRING_CONST:\n stringConstant = ET.SubElement(all_tokens, \"stringConstant\")\n stringConstant.text = ' '+token.getValue()+' '\n elif token.getType() == INT_CONST:\n integerConstant = ET.SubElement(all_tokens, \"integerConstant\")\n integerConstant.text = ' '+token.getValue()+' '\n tree = ET.ElementTree(all_tokens)\n tree.write(name + 'T' + '.xml')", "def xslText(self):\n return u'<xsl:for-each select = \"./%s\">%s<xsl:choose>'\\\n '<xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\">'\\\n '<xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise>'\\\n '<a href=\"%s{.}\"><xsl:value-of select=\".\"/></a>'\\\n '</xsl:otherwise></xsl:choose>%s</xsl:for-each>' % \\\n (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix))", "def getXML(self):\n nodes = list(self.nodes(data=True))\n nodes.sort()\n node_string = ''\n for n in nodes:\n attribute_string = ''\n keys = list(n[1].keys())\n keys.sort()\n for k in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(k, n[1][k], k)\n modification_string = ''\n modified_by = self.predecessors(n[0])\n if modified_by:\n for mod in modified_by:\n modification_string += \"\"\"<modified_by>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifyingNode> %s </modifyingNode>\\n\"\"\"%mod.getTagID()\n modification_string += \\\n \"\"\"<modifyingCategory> %s </modifyingCategory>\\n\"\"\"%mod.getCategory()\n modification_string += \"\"\"</modified_by>\\n\"\"\"\n modifies = self.successors(n[0])\n if modifies:\n for modified in modifies:\n modification_string += \"\"\"<modifies>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifiedNode> {0} </modifiedNode>\\n\"\"\".format(modified.getTagID())\n modification_string += \\\n \"\"\"</modifies>\\n\"\"\"\n node_string += \\\n NODE_XML_SKEL.format(attribute_string+\"{0}\".format(n[0].getXML()) +\\\n modification_string)\n edges = list(self.edges(data=True))\n edges.sort()\n edge_string = ''\n for edge in edges:\n keys = list(edge[2].keys())\n keys.sort()\n attribute_string = ''\n for key in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(key, edge[2][key], key)\n edge_string += \"{0}\".format(EDGE_XML_SKEL.format(edge[0].getTagID(),\n edge[1].getTagID(),\n attribute_string))\n\n return CONTEXT_MARKUP_XML_SKEL.format(xmlScrub(self.getRawText()),\n xmlScrub(self.getText()),\n node_string,\n edge_string)", "def writeXMLNS(self, *args):\n return _libsbml.MultiASTPlugin_writeXMLNS(self, *args)", "def _write_context_to_file(self, context):\n om.out.debug('[xml_file.flush()] Starting _write_context_to_file()')\n\n template = self._jinja2_env.get_template('root.tpl')\n\n # We use streaming as explained here:\n #\n # http://flask.pocoo.org/docs/0.12/patterns/streaming/\n #\n # To prevent having the whole XML in memory\n # pylint: disable=E1101\n report_stream = template.stream(context)\n report_stream.enable_buffering(3)\n # pylint: enable=E1101\n\n # Write everything to a temp file, this is useful in two cases:\n #\n # * An external tool will always see a valid XML in the output,\n # and not just a partially written XML document.\n #\n # * If w3af is killed in the middle of writing the XML report,\n # the report file will still be valid -- if xml_file.flush() was\n # run successfully at least once\n tempfh = NamedTemporaryFile(delete=False,\n prefix='w3af-xml-output',\n suffix='.xml')\n\n om.out.debug('[xml_file.flush()] write_context_to_file() created'\n ' template.stream and NamedTemporaryFile')\n\n try:\n # Write each report section to the temp file\n for report_section in report_stream:\n tempfh.write(report_section.encode(DEFAULT_ENCODING))\n except Exception:\n # No exception handling is done here, we just raise the exception\n # so that the core can handle it properly\n raise\n else:\n # Close the temp file so all the content is flushed\n tempfh.close()\n\n om.out.debug('[xml_file.flush()] write_context_to_file() starting to'\n ' copy temp file to destination')\n\n # Copy to the real output file\n report_file_name = os.path.expanduser(self._file_name)\n\n cmd = 'cp %s %s' % (tempfh.name, report_file_name)\n subprocess.call(cmd, shell=True)\n\n om.out.debug('[xml_file.flush()] write_context_to_file() finished copy'\n ' operation.')\n\n stat_info = os.stat(report_file_name)\n om.out.debug('The XML output file size is %s bytes.' % stat_info.st_size)\n\n finally:\n os.remove(tempfh.name)\n\n om.out.debug('[xml_file.flush()] write_context_to_file() finished')", "def run_xslt(xml_filename, xsl_filename):\n import subprocess\n from misc.localexec import LocalExec\n from distutils.spawn import find_executable\n\n def seekout_xsltproc():\n XSLTPROC_BIN = 'xsltproc'\n executable = find_executable(XSLTPROC_BIN)\n if executable:\n return os.path.abspath(executable)\n raise SignerError(\"it has not found {} binary\".format(XSLTPROC_BIN))\n\n le = LocalExec(err_mute=True)\n exe = seekout_xsltproc()\n exe_args = [ xsl_filename, xml_filename ]\n\n try:\n return le([exe] + exe_args, cmd_timeout=20, ign_rcs=None)\n except subprocess.CalledProcessError as e:\n msg = \"Command raised exception\\nOutput: \" + str(e.output)\n raise Exception(msg)", "def to_workbook(self) -> tuple:\n\n # Initialize the bytestream\n f = io.BytesIO()\n wb = xw.Workbook(f, {\"in_memory\": True})\n\n # Open a workbook\n self._book = wb\n self._book.set_properties({\"category\": \"atomica:databook\"})\n self._formats = standard_formats(self._book)\n self._references = {} # Reset the references dict\n\n # Write the contents\n self._write_pops()\n self._write_tdve()\n self._write_interpops()\n self._write_transfers()\n\n # Clean internal variables related to writing the worbkook\n self._book = None\n self._formats = None\n self._references = None\n\n return f, wb", "def format_xml(self,query_results):\n results=query_results.data\n factory=factory_xml()\n dump=factory.dumps({'data':results})\n print(dump)\n # TODO return output for this\n return \"\"", "def write_to_xml(dictData, metadata, xmlfile):\n\tfout = codecs.open(xmlfile, 'w', 'utf-8')\n\tfout.write('<?xml version = \"1.0\" encoding = \"UTF-8\" standalone = \"no\" ?>\\n')\n\tfout.write('<?xml-stylesheet type=\"text/xsl\" href=\"maketable.xsl\"?>\\n')\n\tfout.write('<root>\\n')\n\tfout.write('<meta>\\n')\n\tfor key, value in metadata.items():\n\t\tfout.write('<' + key + '>' + value + '</' + key + '>\\n')\n\tfout.write('</meta>\\n')\n\tfout.write('<content>\\n')\n\tfor (hw, meanings, verse, verseNumDetails, pageNumDetails) in dictData:\n\t\txmlline = ''\n\t\txmlline += '<word><headword>' + hw + '</headword><meanings>'\n\t\tfor meaning in meanings:\n\t\t\txmlline += '<m>' + meaning + '</m>'\n\t\txmlline += '</meanings>'\n\t\txmlline += '<verse>'\n\t\tlines = verse.split('<BR>')\n\t\tfor line in lines:\n\t\t\txmlline += '<line>' + line + '</line>'\n\t\txmlline += '</verse>'\n\t\txmlline += '<verseNumber>' + verseNumDetails + '</verseNumber>'\n\t\txmlline += '<pageNumber>' + pageNumDetails + '</pageNumber></word>'\n\t\t# Write in babylon format. <BR><BR> is to separate verses.\n\t\tfout.write(xmlline + '\\n')\n\t\txmlline = ''\n\tfout.write('</content>\\n</root>')\n\tfout.close()\n\n\t# Give some summary to the user\n\tprint('XML file generated. Success!')\n\tprint('{} metadata lines and {} content lines written to XML file.'.format(len(metadata), len(dictData)))", "def xml(self):\n return self._xml", "def xml(self):\n return self._xml", "def writeSBMLToFile(self, *args):\n return _libsbml.SBMLWriter_writeSBMLToFile(self, *args)", "def _create_xml_report(self, test, xml_obj):\n xml_report_path = os.path.join(test.work_dir,\n self.XML_REPORT_PATH)\n with open(xml_report_path, 'w') as xml_report:\n xml_report.write(etree.tostring(xml_obj, pretty_print=True))", "def xml2html(self):\n handler = open(self.xml_doc).read()\n soup = BeautifulSoup(handler, 'xml')\n\n fw = open(self.filename_out, 'w')\n\n fw.write(\"<!DOCTYPE html>\" + os.linesep)\n fw.write(\"<html>\" + os.linesep)\n fw.write(\"<head>\" + os.linesep)\n fw.write('<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\">' + os.linesep)\n fw.write(\"<link rel=\\\"stylesheet\\\" href=\\\"%s\\\" type=\\\"text/css\\\" />\" % self.stylesheet_name + os.linesep)\n fw.write(\"<title></title>\" + os.linesep)\n fw.write(\"</head>\" + os.linesep)\n fw.write(\"<body>\" + os.linesep)\n\n # Load styles in dictionaries\n for style in soup.find_all(\"style\"):\n style_name = style.get(\"style:name\")\n #print \"style: %s children: %s descendants: %s\" % (str(style_name), str(len(list(style.children))), len(list(style.descendants)))\n for style_child in style.children:\n fs = style_child.get(\"fo:font-style\")\n if fs:\n self.style_fontstyle[style_name] = fs\n fontw = style_child.get(\"fo:font-weight\")\n if fontw:\n self.style_fontweight[style_name] = fontw\n # read alignment\n txta = style_child.get(\"fo:text-align\")\n if txta:\n self.style_textalignment[style_name] = txta\n # !!!\n tu = style_child.get(\"style:text-underline-type\")\n if tu:\n self.style_textunderline[style_name] = \"underlined\"\n # page break\n break_before = style_child.get(\"fo:break-before\")\n if break_before:\n self.style_break_before[style_name] = break_before\n\n\n # Navigate down the document through h and p tags\n #\n for text in soup.find_all(re.compile(\"^h|^p\")):\n\n # From bs4 docs: If a tag has only one child, and that child is a NavigableString, the child is made available as .string:\n # This covers the following case (e.g.):\n #\n # <text:p text:style-name=\"P9\">- Any text here!</text:p>\n #\n # To do:\n #\n # Beware of this case:\n # - <text:p text:style-name=\"P8\">\n # <text:span text:style-name=\"T4\">\n #\n\n # Get the attributes so the styles and the outlines\n text_attrs = dict(text.attrs)\n\n # Get the styles, if any\n try:\n t_style = text_attrs[\"text:style-name\"]\n except:\n t_style = \"nostyle\"\n\n # Get the outline-levels, if any\n try:\n t_outline_level = text_attrs[\"text:outline-level\"]\n except:\n t_outline_level = \"paragraph\"\n\n if text.string:\n t = unicode(text.string)\n if t:\n fw.write(self.outliner(self.stylizer(t, t_style), t_outline_level, t_style).encode('utf-8'))\n\n # e.g. page breaks come as a node with no children whose style contains fo:break-before:\"page\"\n elif len(list(text.children)) == 0:\n fw.write(self.outliner(unicode(\"\"), t_outline_level, t_style).encode('utf-8'))\n\n # This covers the following case (e.g.):\n #\n # <text:p text:style-name=\"Textbody\">\n # jkjksk skjkjkjs dhh\n # <text:s />\n # <text:span text:style-name=\"T3\">Bold</text:span>\n # <text:s />\n # </text:p>\n #\n # else drill down one level\n else:\n buffer = unicode(\"\")\n t = buffer\n u = buffer\n t_outline_level = \"paragraph\"\n t_style = \"\"\n for i in text.children:\n # Get the attributes so the styles\n try:\n text_attrs = dict(i.attrs)\n t_style = text_attrs[\"text:style-name\"]\n except:\n # whenever the element has no style\n # take the parent's one\n try:\n text_attrs = dict(i.parent.attrs)\n t_style = text_attrs[\"text:style-name\"]\n except:\n t_style = \"nostyle\"\n\n # Get the outline-levels, if any\n try:\n t_outline_level = text_attrs[\"text:outline-level\"]\n except:\n t_outline_level = \"paragraph\"\n\n # if the current tag has only one child, and that child is a NavigableString\n if i.string:\n t = unicode(i.string)\n\n # space\n elif i.name == \"s\":\n t = unicode(\"&nbsp;\")\n\n # else drill down another level\n else:\n t = unicode(\"\")\n for j in i.children:\n if j.string:\n u = unicode(j.string)\n elif j.name == \"s\":\n u = unicode(\"&nbsp;\")\n else:\n u = unicode(\"\")\n if u:\n t = t + self.stylizer(u, t_style)\n\n # build up a unicode string containing the whole paragraph\n if t:\n buffer = buffer + self.stylizer(t, t_style)\n\n # outline the buffered unicode string and write it to the output file\n fw.write(self.outliner(buffer, t_outline_level, t_style).encode('utf-8'))\n\n fw.write(\"</body>\" + os.linesep)\n fw.write(\"</html>\" + os.linesep)\n fw.close()", "def writexml(file):\n OUTFILE=open(file,\"w\")\n doc = xml.dom.minidom.Document()\n\n # Create the <dec_reg_list> base element\n decl_reg_list = doc.createElement(\"decl_reg_list\")\n doc.appendChild(decl_reg_list)\n\n regname_old=\"\"\n rows.pop(0)\n for row in rows:\n (regdesc,regname,offset,default,regtype,expose_reg,depth,incsz,bitdesc,bitname,loc,bittype)= row\n if regname != regname_old:\n # Create the register element\n register = doc.createElement(\"register\")\n register.setAttribute(\"name\", regname)\n register.setAttribute(\"offset\", offset)\n if default != \"\" : register.setAttribute(\"default\", default)\n register.setAttribute(\"type\", regtype)\n if expose_reg == \"1\": register.setAttribute(\"usr\", expose_reg)\n if depth != \"\": register.setAttribute(\"size\", depth)\n if incsz != \"\": register.setAttribute(\"incsz\", incsz)\n text = doc.createTextNode(regdesc)\n register.appendChild(text)\n decl_reg_list.appendChild(register)\n \n # Create the field element\n if bitname != \"\":\n field = doc.createElement(\"field\")\n field.setAttribute(\"name\", bitname)\n if loc !=\"\": field.setAttribute(\"loc\", addcolon(loc))\n if bittype != \"\": field.setAttribute(\"type\", bittype)\n if bitdesc != \"\":\n text = doc.createTextNode(bitdesc)\n field.appendChild(text)\n register.appendChild(field)\n regname_old = regname\n\n\n # Print our newly created XML\n #print doc.toprettyxml(indent=\" \")\n #OUTFILE.write(doc.saveXML(decl_reg_list))\n OUTFILE.write(doc.toprettyxml(indent=\" \"))\n OUTFILE.close()", "def write(self, filename):\n \n return self.model.write(filename,xml_declaration=True, encoding='utf-8')", "def export_to_file(self, filename):\n if len(filename.split(\".\")) == 1:\n filename += \".xml\"\n xmlstring = self._dommodel.toprettyxml(\" \", \"\\n\")\n with open(filename, \"w\") as f:\n f.write(xmlstring)", "def save_as(self, fname, base = None, indent = '', topns = True, namespaces = {}):\n with codecs.open(fname, \"w\", encoding=\"utf-8\") as outf:\n self.serialize_xml(outf.write, base=base, indent=indent, topns=topns, namespaces=namespaces)", "def write(self):\n return JavaMLWriter(self)", "def CreateXMLTransformer() -> TransformerFunc:\n return partial(traverse, transformation_table=XML_AST_transformation_table.copy())", "def wrez2xml(self,newdoc,newroot):\n\t\twrez = newdoc.createElement('wrez')\n\t\twrez.setAttribute('hasChanged', str(self.hasChanged))\n\t\tnewroot.appendChild(wrez)\n\n\t\tpath = newdoc.createElement('path')\n\t\tpath.setAttribute('value', self.path)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('init_str')\n\t\tpath.setAttribute('value', self.init_str)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('hash_sha512')\n\t\tpath.setAttribute('value', self.hash_sha512)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('src_rip')\n\t\tpath.setAttribute('value', self.src_rip)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('quality')\n\t\tpath.setAttribute('value', self.quality)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('codec')\n\t\tpath.setAttribute('value', self.codec)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('language')\n\t\tpath.setAttribute('value', self.language)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('audio')\n\t\tpath.setAttribute('value', self.audio)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('encoder')\n\t\tpath.setAttribute('value', self.encoder)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('version')\n\t\tpath.setAttribute('value', self.version)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('extension')\n\t\tpath.setAttribute('value', self.extension)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('release_year')\n\t\tpath.setAttribute('value', self.release_year)\n\t\twrez.appendChild(path)\n\t\n\t\tpath = newdoc.createElement('title')\n\t\tpath.setAttribute('value', self.title)\n\t\twrez.appendChild(path)\n\n\t\tpath = newdoc.createElement('size')\n\t\tpath.setAttribute('value', str(self.size))\n\t\twrez.appendChild(path)\n\t\treturn wrez", "def toXMLElement(self):\n property_element = xml.etree.ElementTree.Element('property')\n property_element.set('concept', self.concept_ref)\n\n if self.is_parent:\n property_element.set('isParent', 'true')\n\n return property_element", "def xmlwrite(self, doc, filename):\n pathname = os.path.join(self.session.session_dir, filename)\n f = open(pathname, \"w\")\n doc.writexml(writer=f, indent=\"\", addindent=\" \", newl=\"\\n\", encoding=\"UTF-8\")\n f.close()", "def _get_eps_xml(self):\n format_path = os.path.join(os.path.dirname(__file__), \"formats\")\n\n # loop through files where filename starts with \"eps_ascat\".\n for filename in fnmatch.filter(os.listdir(format_path), \"eps_ascat*\"):\n doc = etree.parse(os.path.join(format_path, filename))\n file_extension = doc.xpath(\"//file-extensions\")[0].getchildren()[0]\n\n format_version = doc.xpath(\"//format-version\")\n for elem in format_version:\n major = elem.getchildren()[0]\n minor = elem.getchildren()[1]\n\n # return the xml file matching the metadata of the datafile.\n if major.text == self.mphr[\"FORMAT_MAJOR_VERSION\"] and \\\n minor.text == self.mphr[\"FORMAT_MINOR_VERSION\"] and \\\n self.mphr[\n \"PROCESSING_LEVEL\"] in file_extension.text and \\\n self.mphr[\"PRODUCT_TYPE\"] in file_extension.text:\n return os.path.join(format_path, filename)", "def get_xml(self):\n xml = svgwrite.etree.etree.Element(self.elementname)\n if self.debug:\n self.validator.check_all_svg_attribute_values(self.elementname, self.attribs)\n for attribute, value in self.attribs.items():\n # filter 'None' values\n if value is not None:\n value = self.value_to_string(value)\n if value: # just add not empty attributes\n xml.set(attribute, value)\n \n for element in self.elements:\n xml.append(element)\n return xml", "def makexmlfunc(healpix,ra,dec,week1,week2,distance):\n\t\n\tif week1!=week2:\n\t\tidentity=\"%06d_%d_%d_w%03d_w%03d\" %(healpix,ra,dec,week1,week2)\n\t\tltcube=\"%s/lat_ltcube_weekly_w%03d_w%03d_p203_v001.fits\" %(cfg.home,week1,week2)\n\t\tspacecraft=\"%s/w%03d_w%03d_newspacecraft.fits\" %(cfg.ispace,week1,week2)\n\telse:\n\t\tidentity=\"%06d_%d_%d_w%03d\" %(healpix,ra,dec,week1)\n\t\tltcube=\"%s/lat_spacecraft_weekly_w%03d_p203_v001_ltcube.fits\" %(cfg.home,week1)\n\t\tspacecraft=\"%s/lat_spacecraft_weekly_w%03d_p202_v001.fits \" %(cfg.ispace,week1)\n\n\tregion_filtered=\"%s_region_filtered_gti.fits\" %(identity)\n\tfermisources=\"%s_fermisources_model.xml\" %(identity)\n\tinputmodel=\"%s_input_model.xml\" %(identity)\n\tfermis=\"%s_fermis.xml\" %identity\n\tresponse=\"P7REP_SOURCE_V15\"\n\tmakexmllog=\"%s_output_makexml.log\" %identity\n\tglobal extendedsource\n\tglobal numberofextendedsources\n\textendedlog=\"%s_number_of_extendedsources.log\" %identity\n\tExtendedList=\"ExtendedList.txt\"\n\tOthersList=\"OthersList.txt\"\n\n\t\n\twith open (makexmllog,'r') as outputFile: #opens the makexmllog file from makesyfunc. This document contains info about the extended sources.\n\t\t\n\t\tfor line in outputFile:\n\t\t\t\n\t\t\twith open (makexmllog,'r') as File:\n\t\t\t\tif line.startswith('Added')==True:\n\t\t\t\t\ta,b=line.split('and ')\t\n\t\t\t\t\tb1,b2,b3=b.split(' ')\n\t\t\t\t\n\t\t\t\t\tnumberofextendedsources=int(b1) #b1 is the number of extended sources\n\toutputFile.close()\n\toutputFile=open(inputmodel, 'w')\n\tprint numberofextendedsources\n\n\tif numberofextendedsources==1: #if there is an extended source\n\t\twith open (makexmllog,'r') as outputFile:\n\t\t\n\t\t\tfor line in outputFile:\n\t\t\t\n\t\t\t\twith open (makexmllog,'r') as File:\n\t\t\t\t\tif line.startswith('Extended')==True:\n\t\t\t\t\t\tprint line\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tc,d=line.split(' in')\n\t\t\t\t\t\n\t\t\t\t\t\tc1,c2,c3,c4=c.split(' ')\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t\textendedsource=str(c3) #extracts the name of the extended source from makexmllog\n\t\n\n\t\t\n\n\n\t\toutputFile.close()\t\n\n\n\t\n\n\t\twith open(\"%s\" %fermisources) as thefile: #opens the xml file that was created from makesyfunc\n\t\t\tfor line in thefile:\n\t\t\t\tif line.startswith('\t<spatialModel file=\"%s.fits\"' %(extendedsource))==True:\n\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\tspecial=str.replace(line,'%s.fits'%extendedsource,'%s/%s.fits' %(cfg.homesy,extendedsource)) \n\t\t\t\t\tprint special #replace with the correct path to the extendedsource(Templates folder)\n\t\t\t\n\t\t\t\t\tspecial1=str.replace(special,'type=\"SpatialMap\"','type=\"SpatialMap\" map_based_integral=\"true\"')\n\t\t\t\t\tprint special1 #instruction from fermi tutorial, you must add map_based...\n\t\t\t\t\toutputFile=open(fermis, 'w') #write to fermis, the original xml with the right path to the extended source\n\t\t\t\t\twith open(\"%s\" %fermisources,'r') as infile:\n\t\t\t\t\t\tfor line in infile:\n\t\t\t\t\t\t\tif line.startswith('\t<spatialModel file=\"%s.fits\"' %(extendedsource))==False:\n\t\t\t\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\toutputFile.write(special1)\n\t\t\t\t\toutputFile.close()\n\t\t\t\t\t\t\t\t\t\n\n\n\t\t\t\n\t\toutputFile=open(inputmodel, 'w') #final xml file. contains the right path and the source info of \"your\" source.\n\t\twith open(fermis,'r') as infile:\n\t\t\tfor line in infile:\n\t\t\t\tif line.startswith('</source_library>')==False:\n\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\t\t\n\t\toutputFile.write('\\n\\\n\t\t\t<!-- My sources -->\\n\\\n\t\t\t<source name=\"%f_%f\" type=\"PointSource\">\\n\\\n\t\t\t<spectrum type=\"PowerLaw\">\\n\\\n\t\t\t<parameter free=\"1\" max=\"1000.0\" min=\"0.001\" name=\"Prefactor\" scale=\"1e-09\" value=\"10\"/>\\n\\\n\t\t\t<parameter free=\"1\" max=\"-1.0\" min=\"-5.0\" name=\"Index\" scale=\"1.0\" value=\"-2.1\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"2000.0\" min=\"30.0\" name=\"Scale\" scale=\"1.0\" value=\"100.0\"/>\\n\\\n\t\t\t</spectrum>\\n\\\n\t\t\t<spatialModel type=\"SkyDirFunction\">\\n\\\n\t\t\t<parameter free=\"0\" max=\"360\" min=\"-360\" name=\"RA\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"90\" min=\"-90\" name=\"DEC\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t</spatialModel>\\n\\\n\t\t\t</source>\\n\\\n\t\t\t</source_library>\\n' % (ra,dec,ra,dec))\n\n\t\t\t\t\n\n\t\toutputFile.close()\n\t\n\t\twith open(\"%s_diffrsp.log\" % (identity), 'w') as outsyputFile: #run diffrsp if you have an extended source.\n\t\t\tsubprocess.call(['%s' %(cfg.pythoncommand),'gtdiffrsp.py', '%s' %(region_filtered),'%s' %(spacecraft), '%s' %inputmodel, '%s' %(response),'%s' %identity ],stdout=outsyputFile)\n\t\t\t\n\t\twith open(ExtendedList,\"a+\") as outsyFile:\n\t\t\toutsyFile.write(\"%d %f %f %d %d %f\\n\" %(healpix,ra,dec,week1,week2,distance))\n\t\t\t\t\t\n\tif numberofextendedsources==0: #if there is no extended source\n\t\toutputFile=open('%s' %(inputmodel), 'w') #write to inputmodel, \"your\" source\n\t\twith open('%s' %(fermisources),'r') as infile:\n\t\t\tfor line in infile:\n\t\t\t\tif line.startswith('</source_library>')==False:\n\t\t\t\t\toutputFile.write(line)\n\t\t\t\t\t\n\t\t\t\n\n\t\toutputFile.write('\\n\\\n\t\t\t<!-- My sources -->\\n\\\n\t\t\t<source name=\"%f_%f\" type=\"PointSource\">\\n\\\n\t\t\t<spectrum type=\"PowerLaw\">\\n\\\n\t\t\t<parameter free=\"1\" max=\"1000.0\" min=\"0.001\" name=\"Prefactor\" scale=\"1e-09\" value=\"10\"/>\\n\\\n\t\t\t<parameter free=\"1\" max=\"-1.0\" min=\"-5.0\" name=\"Index\" scale=\"1.0\" value=\"-2.1\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"2000.0\" min=\"30.0\" name=\"Scale\" scale=\"1.0\" value=\"100.0\"/>\\n\\\n\t\t\t</spectrum>\\n\\\n\t\t\t<spatialModel type=\"SkyDirFunction\">\\n\\\n\t\t\t<parameter free=\"0\" max=\"360\" min=\"-360\" name=\"RA\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t<parameter free=\"0\" max=\"90\" min=\"-90\" name=\"DEC\" scale=\"1.0\" value=\"%f\"/>\\n\\\n\t\t\t</spatialModel>\\n\\\n\t\t\t</source>\\n\\\n\t\t\t</source_library>\\n' % (ra,dec,ra,dec))\n\n\t\toutputFile.close()\n\tif numberofextendedsources>1:\n\t\twith open(OthersList,\"a+\") as outsyFile:\n\t\t\toutsyFile.write(\"%d %f %f %d %d %f\\n\" %(healpix,ra,dec,week1,week2,distance))\n\t\n\tif numberofextendedsources==1:\n\t\toutsyputFile=open(extendedlog,'w') #write the number of extended sources and name in a file\n\t\toutsyputFile.write(\"%s\\n\\\n \t%s\"%(numberofextendedsources,extendedsource))\n\t\toutsyputFile.close()\n\n\tif numberofextendedsources !=1:\n\t\toutsyputFile=open(extendedlog,'w') #write the number of extended sources and name in a file\n\t\toutsyputFile.write(\"%s\" %(numberofextendedsources))\n\t\toutsyputFile.close()", "def _toFile(self):\n pass", "def to_xml(self):\n xml_strings = ['<code_object name=\"%s\">' % self.name]\n xml_strings.append(' <type value=\"%s\" />' % self.object_type)\n xml_strings.append(' <source>')\n for source_line in self.source:\n xml_strings.append(source_line[1])\n xml_strings.append(' </source>')\n xml_strings.append('</code_object>')\n return \"\".join(xml_strings)", "def toGML(self):\n raise NotImplementedError", "def xml_path(self):\n return self.__xml_path", "def addXSLTStyleSheet(self, content, URI):\n if self.PDFreactorConfiguration.in1[\"XSLTStyleSheets\"] == None:\n self.PDFreactorConfiguration.in1[\"XSLTStyleSheets\"] = []\n stylesArray = {'content':content, 'URI':URI}\n self.PDFreactorConfiguration.in1[\"XSLTStyleSheets\"].append(stylesArray)\n else:\n stylesArray = {'content':content, 'URI':URI}\n self.PDFreactorConfiguration.in1[\"XSLTStyleSheets\"].append(stylesArray)", "def generate_xml(self, provisioning):\n ET.SubElement(self.root, 'generator').text = __revision__\n ET.SubElement(self.root, 'generated_at').text = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n return self.provisioning2xml(provisioning)", "def write(self, filename=None, as_type='json'):\n if not filename:\n filename = self.uri\n self.create_output_dir(filename)\n if as_type == 'json':\n with open(filename, 'w') as outfile:\n outfile.write(self.transform_data(outformat=formats.JSON))\n elif as_type == 'shapefile':\n self.data.to_file(filename)\n else:\n raise NotImplementedError('{} not a valid type'.format(as_type))\n return self.uri", "def exportXml ( w, xml ):\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n rawText = xml\n pattern = re.compile (r'[^\\S ]+')\n text = re.sub ( pattern, \"\", rawText )\n reparsed = MD.parseString ( text )\n w.write ( reparsed.toprettyxml ( indent = \"\\t\", encoding = \"UTF-8\" ) )" ]
[ "0.63645554", "0.61521673", "0.5836575", "0.58352655", "0.58135927", "0.56414604", "0.5629727", "0.56010044", "0.55846775", "0.55454504", "0.552621", "0.5517293", "0.55145675", "0.5513024", "0.54559094", "0.54535353", "0.5428844", "0.5306545", "0.52981937", "0.52909887", "0.5273646", "0.5273093", "0.52350694", "0.5203055", "0.5196621", "0.5184456", "0.5181642", "0.5176734", "0.5165694", "0.5157047", "0.5149118", "0.51351523", "0.5134564", "0.5129334", "0.5102372", "0.50735724", "0.50731003", "0.50602835", "0.5060231", "0.5058721", "0.505865", "0.5041597", "0.50304365", "0.502216", "0.500992", "0.5005544", "0.5003966", "0.49984807", "0.4978735", "0.49759305", "0.49617678", "0.49560267", "0.49416786", "0.4941091", "0.48928908", "0.48899034", "0.4876396", "0.4873777", "0.48664466", "0.48653346", "0.48529604", "0.4847998", "0.4846511", "0.48457986", "0.4830783", "0.48295173", "0.48291942", "0.4823059", "0.4819926", "0.48160383", "0.48131976", "0.48079303", "0.48054472", "0.48024672", "0.48019445", "0.4796694", "0.4796694", "0.47783184", "0.47743228", "0.47683898", "0.47650102", "0.4760641", "0.47599688", "0.47595832", "0.47556973", "0.47491798", "0.47482026", "0.4747561", "0.47429985", "0.4734219", "0.47341862", "0.4733793", "0.4726138", "0.4723482", "0.47102904", "0.47020122", "0.4701303", "0.46902362", "0.4684389", "0.4683553" ]
0.54010373
17
Return XSL file test for data existance
def xslTestText(self): return u'normalize-space(./%s)' % self.name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self):\n try:\n f = StringIO.StringIO(self.content)\n dom = XTree.parse(f)\n xslt = XTree.parse(self.stylesheet)\n transform = XTree.XSLT(xslt)\n newdom = transform(dom)\n except IOError:\n print \"Xml or Xsl file not found!\"\n return False\n return XTree.tostring(newdom, pretty_print=True)", "def test_01_FindXml(self):", "def test_input_output(self, filename):\n with open(sample_xml(filename), \"rb\") as xml_file:\n xml_output_expected = xml_file.read()\n root, doctype_dict = xmlio.parse(sample_xml(filename), return_doctype_dict=True)\n self.assertEqual(xmlio.output(root, None, doctype_dict), xml_output_expected)", "def test_parse_source_xml(self):\n\n # Path to the source xml\n file = self.xmlfilepath + os.sep + \"sourcefile.xml\"\n\n # Path to non existent source file\n in_file = self.xmlfilepath + os.sep + \"sourcefile.pwg\"\n\n # Test for correct data\n # NOTE : For this test case to pass the source xml file should be\n # present in the download path\n self.assertEqual(\n parse_source_xml(file),\n (\n \"DLTINS_20210117_01of01.zip\",\n \"http://firds.esma.europa.eu/firds/DLTINS_20210117_01of01.zip\",\n ),\n )\n\n # Test for incorrect data\n self.assertEqual(parse_source_xml(in_file), None)", "def test_check_if_output_file_exists():\n input_file = os.path.join(os.getcwd(), 'tests', 'input_test_file.docx')\n output_file = os.path.join(os.getcwd(), 'tests', 'output_test_file.txt')\n\n questions_parser = QuestionsParser()\n questions_parser.main(argv=['-i', input_file, '-o', output_file])\n assert os.path.exists(output_file)\n os.unlink(output_file)", "def test_hasLocation(self):\n cases = [\n (self.test_eac + \"NE00601.xml\", False),\n (self.test_eac + \"NE00100.xml\", True),\n (self.test_eac + \"NE00201.xml\", True),\n (self.test_eac + \"NE01302.xml\", True),\n (self.test_eac + \"NE01101.xml\", False),\n (self.test_eac + \"NE00916.xml\", False),\n (self.test_eac + \"NE00201.xml\", True),\n ]\n for case in cases:\n source, expected = case\n doc = EacCpf.EacCpf(source,'http://www.example.com')\n self.assertNotEqual(doc, None)\n result = doc.hasLocation()\n self.assertNotEqual(result, None)\n self.assertEqual(result, expected)", "def test_xml_files_with_missing_info():\n\n # Test when k is missing from constant type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/k_const.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when A is missing from Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/A_arr.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when E is missing from Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/E_arr.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when A is missing from modified Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/A_mod_arr.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when b is missing from modified Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/b_mod_arr.xml\"\n parser = XMLParser(xml_filename)\n\n # Test when E is missing from modified Arrhenius type reaction\n with pytest.raises(ValueError):\n xml_filename = \"tests/test_xml_files/E_mod_arr.xml\"\n parser = XMLParser(xml_filename)", "def test_does_validate_valid_xml_file(self):\n xml_file = join(\n getcwd(), 'testdata', 'newstest2019-defr-src-ts.de.FIXED.xml'\n )\n doc = valitest.ValidatableTestSet(xml_file)\n self.assertEqual(doc.setid, \"newstest2019\")\n self.assertEqual(doc.srclang, \"any\")", "def __call__(self, f):\n tree = f.build_etree(lxml=True)\n return self.xslt(tree)", "def testFilesExist(self):\n \n for year in range(2007,2013):\n self.assertTrue(os.path.exists(\"./IncomeHistogram_\"+ str(year)+\".pdf\"), \"A histogram didn't save to output.\")\n self.assertTrue(os.path.exists(\"./LogIncomeHistogram_\"+ str(year)+\".pdf\"), \"A histogram didn't save to output.\")\n self.assertTrue(os.path.exists(\"./IncomeBoxplot(log)_\"+ str(year)+\".pdf\"), \"A boxplot didn't save to output.\") \n self.assertTrue(os.path.exists(\"./results.txt\"), \"Results file doesn't exist.\")", "def test_defaultFile(self):\n found = cesmEnvLib.checkFile(\"./test_checkXMLvar.py\", \"read\")\n self.assertTrue(found)", "def test_xml_file(self):\n response = client.result(False, 'xml', 'unittest', file = 'test_file.csv')\n root = ET.fromstring(response)\n first_name = root[0][0][0].text\n self.assertEqual(first_name,'John', 'Should print John')\n nationality = '<nationality>' in response\n self.assertFalse(nationality, 'Nationality should not be present')", "def test_01_FindXml(self):\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)\n # sprint(PrettyFormatAny.form(self.m_root_xml, 'A3-01-A - Entire Xml'))\n self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection', 'XML - No Controllers section')\n # print(PrettyFormatAny.form(self.m_xml.controller_sect, 'A3-01-B - All Controllers Xml'))\n self.assertEqual(self.m_xml.controller.tag, 'Controller', 'XML - No Controller section')\n # print(PrettyFormatAny.form(self.m_xml.controller, 'A3-01-C - First Controller Xml'))", "def check_for_data():\n if not (os.path.exists(ep.get_test_data_path()) or os.path.exists(ep.get_dbn_weight_path())):\n return False\n return True", "def test_case18(self):\n\n result = self.graph1.studentExists(\"student1\")\n\n self.assertTrue(result)", "def test_getExistDates(self):\n cases = [\n (self.test_eac + \"NE01201.xml\",\"1858-01-01T00:00:00Z\",\"1935-08-21T00:00:00Z\"),\n (self.test_eac + \"NE00300.xml\",\"1960-01-01T00:00:00Z\",\"1977-12-31T00:00:00Z\"),\n (self.test_eac + \"NE01500.xml\",\"1981-01-01T00:00:00Z\",\"1981-12-31T00:00:00Z\")\n ]\n for case in cases:\n source, expected_from_date, expected_to_date = case\n doc = EacCpf.EacCpf(source, 'http://www.example.com')\n self.assertNotEqual(doc, None)\n fromDate, toDate = doc.getExistDates()\n self.assertEqual(fromDate, expected_from_date)\n self.assertEqual(toDate, expected_to_date)", "def test_case17(self):\n\n result = self.graph1.studentExists(\"student5\")\n\n self.assertFalse(result)", "def _check_deprecated_data_xml_node(self):\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath(\"/odoo\") \\\n if not isinstance(doc, string_types) else []\n children, data_node = ((odoo_nodes[0].getchildren(),\n odoo_nodes[0].findall('data'))\n if odoo_nodes else ([], []))\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append((\"%s:%s\" % (xml_file, lineno)))\n if self.msg_args:\n return False\n return True", "def test_getData(self):\n cases = [\n self.test_eac + 'NE01201.xml',\n self.test_eac + 'NE00201.xml',\n self.test_eac + 'NE00300.xml',\n self.test_eac + 'NE00500.xml',\n ]\n for case in cases:\n doc = EacCpf.EacCpf(case, 'http://www.example.com/metadata.xml', 'http://www.example.com/presentation.html')\n self.assertNotEqual(doc, None)\n result = doc.getData()\n self.assertNotEqual(result, None)\n self.assertGreater(len(result), 0)", "def _CheckFileExistsWithData(self, logs, graph):\n self.assertTrue(graph in logs, 'File %s was not output.' % graph)\n self.assertTrue(logs[graph], 'File %s did not contain data.' % graph)", "def checkAllFilesGenerated(self):\n root = get_exhale_root(self)\n containmentFolder = self.getAbsContainmentFolder()\n for node in root.all_nodes:\n if node.kind in [\"enumvalue\", \"group\"]:\n continue\n gen_file_path = os.path.join(containmentFolder, node.file_name)\n self.assertTrue(\n os.path.isfile(gen_file_path),\n \"File for {kind} node with refid=[{refid}] not generated to [{gen_file_path}]!\".format(\n kind=node.kind, refid=node.refid, gen_file_path=gen_file_path\n )\n )", "def test_assemble_xml_file_A1(self):\n self.maxDiff = None\n\n fh = StringIO()\n worksheet = Worksheet()\n worksheet._set_filehandle(fh)\n cell_format = Format({\"xf_index\": 1})\n\n # No format. Should be ignored.\n worksheet.write_blank(\"A1\", None)\n\n worksheet.write_blank(\"C2\", None, cell_format)\n\n worksheet.select()\n worksheet._assemble_xml_file()\n\n exp = _xml_to_list(\n \"\"\"\n <?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n <worksheet xmlns=\"http://schemas.openxmlformats.org/spreadsheetml/2006/main\" xmlns:r=\"http://schemas.openxmlformats.org/officeDocument/2006/relationships\">\n <dimension ref=\"C2\"/>\n <sheetViews>\n <sheetView tabSelected=\"1\" workbookViewId=\"0\"/>\n </sheetViews>\n <sheetFormatPr defaultRowHeight=\"15\"/>\n <sheetData>\n <row r=\"2\" spans=\"3:3\">\n <c r=\"C2\" s=\"1\"/>\n </row>\n </sheetData>\n <pageMargins left=\"0.7\" right=\"0.7\" top=\"0.75\" bottom=\"0.75\" header=\"0.3\" footer=\"0.3\"/>\n </worksheet>\n \"\"\"\n )\n\n got = _xml_to_list(fh.getvalue())\n\n self.assertEqual(got, exp)", "def resultExist(probName,algoName,fitName,inst,s,c,n,k,q,w,m,t,e):\n if probName == 'NKQ':\n nameOfF = './result/'+probName+'-'+algoName+'-F'+fitName+'-M'+m+'-I'+str(inst)+'-S'+str(s)+'-W'+str(w)+'-N'+str(n)+'-K'+str(k)+'-C'+str(c)+'-Q'+str(q)+'-T'+str(t)+'-E'+str(e)+'.txt'\n elif probName == 'NK' or probName == 'NonNK':\n nameOfF = './result/'+probName+'-'+algoName+'-F'+fitName+'-C'+str(c)+'-I'+str(inst)+'-S'+str(s)+'-W'+str(w)+'-N'+str(n)+'-K'+str(k)+'-E'+str(e)+'.txt'\n\n if os.path.isfile(nameOfF)==True:\n print nameOfF, 'exists!!!'\n return os.path.isfile(nameOfF)", "def test_exists_false(self):\n self.assertFalse(Sample.exists('Not_a_Sample', self.sample_template))", "def test_non_regression(self):\n main(\"Source_mobile.xml\", [[\"engine\", \"A320.xml\", \"A320.csv\"]], \"Resultat.xml\", gui=False)\n compare_xml_results(\"Resultat.xml\", \"Reference.xml\", self)", "def main():\n\n parser = argparse.ArgumentParser(description='NetXML to XSLX')\n parser.add_argument(\n 'input', nargs='+',\n help='One or more netxml files, space separated.'\n )\n parser.add_argument(\n '-o', '--output', metavar='output', default='netxml.xlsx',\n help='Output file path. Defaults to \"./netxml.xslx\" if left blank.'\n )\n parser.add_argument(\n '-d', '--dir', action='store_true', default=False,\n help=(\n 'Use when supplying a directory of netxml files instead of direct '\n 'file references.'\n )\n )\n args = parser.parse_args()\n\n out_path = Path(args.output).resolve()\n if out_path.is_file():\n print(f'{out_path} already exists. Quitting...')\n return\n\n input_paths = []\n for i in args.input:\n try:\n input_path = Path(i).resolve()\n except Exception as e:\n print(\n f'Could not resolve the file path for {i}. It will be skipped'\n )\n if args.dir and input_path.is_dir():\n netxml_glob = input_path.glob('*.netxml')\n for path in netxml_glob:\n input_paths.append(path)\n elif not input_path.is_file():\n thing = 'directory' if args.dir else 'file'\n print(f'{i} is not a {thing}. Skipping...')\n continue\n else:\n input_paths.append(input_path)\n # Create the in-memory Excel Workbook\n wb = Workbook()\n networks_sheet = wb.active\n networks_sheet.title = 'Wireless Networks'\n # Add the title row for the networks WorkSheet\n networks_sheet.append(\n [\n 'BSSID', 'ESSID', 'Hidden', 'Channel', 'Signal Strength', 'Open',\n 'WEP', 'WPA', 'WPA2', 'WPS', 'Auth', 'TKIP', 'AES', 'Manufacturer',\n 'No. Clients', 'Latitude', 'Longitude'\n ]\n )\n clients_sheet = wb.create_sheet(title='Clients')\n # Add the title row for the clients WorkSheet\n clients_sheet.append(\n ['MAC', 'Manufacturer', 'Signal Strength', 'BSSID', 'ESSID']\n )\n networks_list = []\n clients_list = []\n for input_path in input_paths:\n _parse_netxml(input_path, networks_list, clients_list)\n # Ensure no duplicates end up in the spreadsheets.\n unique_networks = set(networks_list)\n unique_clients = set(clients_list)\n\n # Add the results of all files to the spreadsheets\n for row in unique_networks:\n networks_sheet.append(row)\n\n # Add total row\n for row in unique_clients:\n clients_sheet.append(row)\n # Turn the resulting tables in to Excel \"Tables\"\n _create_table(networks_sheet, 'Networks')\n _create_table(clients_sheet, 'Clients')\n # Create totals WorkSheet\n totals_sheet = wb.create_sheet(title='Totals')\n _populate_totals(totals_sheet, networks_sheet)\n _create_table(totals_sheet, 'Totals')\n\n wb.save(str(out_path))", "def test_report_definition(self):\n self.model = self.scan.model\n self.model.save()\n new_model = pycotools3.tasks.CopasiMLParser(self.copasi_file).xml\n reports = new_model.find('{http://www.copasi.org/static/schema}ListOfReports')\n check = False\n for report in reports:\n if report.attrib['name'] == 'parameter_estimation':\n check = True\n self.assertTrue(check)", "def test_ifFileExists():\n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"file\" in testConfig.config and \"file_locations\" in testConfig.config:\n print \"File In Location: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfFileExistsInPossibleLocations, testConfig.config\n elif \"file\" in testConfig.config:\n print \"File: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfFileExists, testConfig.config", "def test_report_definition(self):\n self.model = self.scan.model\n self.model.save()\n new_model = pycotools3.tasks.CopasiMLParser(self.copasi_file).xml\n reports = new_model.find('{http://www.copasi.org/static/schema}ListOfReports')\n check = False\n for report in reports:\n if report.attrib['name'] == 'Time-Course':\n check = True\n self.assertTrue(check)", "def test_exists_false(self):\n self.assertFalse(PrepSample.exists('Not_a_Sample', self.prep_template))", "def test_exists_true(self):\n self.assertTrue(SampleTemplate.exists(self.test_study.id))", "def test_exists_false(self):\n self.assertFalse(SampleTemplate.exists(self.new_study.id))", "def test_xml_exist(xml_parser):\n\n assert xml_parser.get_dict()", "def are_evaluations_created(path, number_of_evaluations):\n evaluation_ids = []\n try:\n with open(\"%s%sevaluations\" % (path, os.sep)) as evaluations_file:\n for line in evaluations_file:\n evaluation = line.strip()\n try:\n evaluation_id = bigml.api.get_evaluation_id(evaluation)\n evaluation_ids.append(evaluation_id)\n except ValueError:\n return False, evaluation_ids\n if len(evaluation_ids) == number_of_evaluations:\n return True, evaluation_ids\n else:\n return False, evaluation_ids\n except IOError:\n return False, evaluation_ids", "def validate(file_in) :\n\tname = str(file_in.name)\n\tif name[-4:] != \".xml\" and name[-4:] != \".XML\" :\n\t\treturn False\n\txsd = open('wcdb/WorldCrises.xsd.xml', 'r')\n\txmlFile = open('wcdb/temp.xml', 'w')\n\txmlFile.write(file_in.read())\n\txmlFile = open('wcdb/temp.xml', 'r')\n\ttry:\n\t\tpsvi = pyxsval.parseAndValidate(\"wcdb/temp.xml\",\n\t\t\t\"wcdb/WorldCrises.xsd.xml\", xmlIfClass=pyxsval.XMLIF_ELEMENTTREE)\n\t\ttree = psvi.getTree()\n\texcept pyxsval.XsvalError, e:\n\t\treturn 'Validation aborted. ' + str(e)\n\texcept GenXmlIfError, e:\n\t\treturn 'Parsing aborted. ' + str(e)\n\texcept Exception as e:\n\t\t# catch all\n\t\treturn 'Exception. ' + str(e)\n\t#handle invalid case\n\treturn tree", "def xslText(self):\n return u'<xsl:if test=\"normalize-space(./%s)\">%s'\\\n '<xsl:value-of select=\"./%s\"/>%s</xsl:if>' % \\\n (self.name, xslEscape(self.prefix), self.name,\n xslEscape(self.suffix))", "def test_assemble_xml_file_write(self):\n self.maxDiff = None\n\n fh = StringIO()\n worksheet = Worksheet()\n worksheet._set_filehandle(fh)\n cell_format = Format({\"xf_index\": 1})\n\n # No format. Should be ignored.\n worksheet.write(0, 0, None)\n\n worksheet.write(1, 2, None, cell_format)\n\n worksheet.select()\n worksheet._assemble_xml_file()\n\n exp = _xml_to_list(\n \"\"\"\n <?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n <worksheet xmlns=\"http://schemas.openxmlformats.org/spreadsheetml/2006/main\" xmlns:r=\"http://schemas.openxmlformats.org/officeDocument/2006/relationships\">\n <dimension ref=\"C2\"/>\n <sheetViews>\n <sheetView tabSelected=\"1\" workbookViewId=\"0\"/>\n </sheetViews>\n <sheetFormatPr defaultRowHeight=\"15\"/>\n <sheetData>\n <row r=\"2\" spans=\"3:3\">\n <c r=\"C2\" s=\"1\"/>\n </row>\n </sheetData>\n <pageMargins left=\"0.7\" right=\"0.7\" top=\"0.75\" bottom=\"0.75\" header=\"0.3\" footer=\"0.3\"/>\n </worksheet>\n \"\"\"\n )\n\n got = _xml_to_list(fh.getvalue())\n\n self.assertEqual(got, exp)", "def example_xml_file43():\n return load_xml('datacite-v4.3-full-example.xml')", "def _get_eps_xml(self):\n format_path = os.path.join(os.path.dirname(__file__), \"formats\")\n\n # loop through files where filename starts with \"eps_ascat\".\n for filename in fnmatch.filter(os.listdir(format_path), \"eps_ascat*\"):\n doc = etree.parse(os.path.join(format_path, filename))\n file_extension = doc.xpath(\"//file-extensions\")[0].getchildren()[0]\n\n format_version = doc.xpath(\"//format-version\")\n for elem in format_version:\n major = elem.getchildren()[0]\n minor = elem.getchildren()[1]\n\n # return the xml file matching the metadata of the datafile.\n if major.text == self.mphr[\"FORMAT_MAJOR_VERSION\"] and \\\n minor.text == self.mphr[\"FORMAT_MINOR_VERSION\"] and \\\n self.mphr[\n \"PROCESSING_LEVEL\"] in file_extension.text and \\\n self.mphr[\"PRODUCT_TYPE\"] in file_extension.text:\n return os.path.join(format_path, filename)", "def _check_file_exists_helper(self, report_path, filename):\n\n if not check_data_exists(report_path, [filename]):\n raise AssertionError(\n \"{} does not exist in location {}\".format(\n filename, report_path\n )\n )", "def _check_xml_syntax_error(self):\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((\n xml_file, result.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True", "def verified_excel_file(store, institute_list, temp_excel_dir):\n document_lines = []\n written_files = 0\n today = datetime.datetime.now().strftime('%Y-%m-%d')\n LOG.info('Creating verified variant document..')\n\n for cust in institute_list:\n verif_vars = store.verified(institute_id=cust)\n LOG.info('Found {} verified variants for customer {}'.format(len(verif_vars), cust))\n\n if not verif_vars:\n continue\n unique_callers = set()\n for var_type, var_callers in CALLERS.items():\n for caller in var_callers:\n unique_callers.add(caller.get('id'))\n cust_verified = export_verified_variants(verif_vars, unique_callers)\n\n document_name = '.'.join([cust, '_verified_variants', today]) + '.xlsx'\n workbook = Workbook(os.path.join(temp_excel_dir,document_name))\n Report_Sheet = workbook.add_worksheet()\n\n # Write the column header\n row = 0\n for col,field in enumerate(VERIFIED_VARIANTS_HEADER + list(unique_callers)):\n Report_Sheet.write(row,col,field)\n\n # Write variant lines, after header (start at line 1)\n for row, line in enumerate(cust_verified,1): # each line becomes a row in the document\n for col, field in enumerate(line): # each field in line becomes a cell\n Report_Sheet.write(row,col,field)\n workbook.close()\n\n if os.path.exists(os.path.join(temp_excel_dir,document_name)):\n written_files += 1\n\n return written_files", "def datafileexist(filename):\n filePath = os.path.join(pathtofolder(), \"datas\", filename)\n fileFormat = '.csv'\n return os.path.exists(f'{filePath+fileFormat}')", "def _valid_rootnode_file(self, xml_filepath: str) -> bool:\n\t\troot_name: str = get_xml_file(xml_filepath).tag\n\t\t# Iterate over RootNodes\n\t\tfor rootnode in self.root_nodes:\n\t\t\t# Check is filenames are identical\n\t\t\tif root_name == rootnode.name:\n\t\t\t\trootnode.subfiles.add(xml_filepath)\n\t\t\t\t# Return False, match is found\n\t\t\t\treturn False\n\n\t\t# Return True if no match found.\n\t\treturn True", "def test_get_workflow_definition_xml(self):\n pass", "def test_01_FindXml(self):\n self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)\n self.assertEqual(self.m_xml.controller_sect.tag, 'ControllerSection', 'XML - No Controllers section')\n self.assertEqual(self.m_xml.controller.tag, 'Controller', 'XML - No Controller section')", "def test_validate_invalid(self):\r\n self.assertEqual(get_tree_and_validate(self.invalid_xml, open(self.SCHEMA, 'r').read()), 0)", "def test_schema_exists(self):\n return exclusions.open()", "def example_xml_file41():\n return load_xml('datacite-v4.1-full-example.xml')", "def test_generate_report():\n # Calling helper function to create data\n data = helper_create_data()\n cat_vars = ['C1', 'C2', 'C3', 'C4']\n num_vars = ['N1', 'N2', 'N3']\n\n # Positive test case: Checking whether the function runs properly or not\n assert eda.generate_report(data, cat_vars, num_vars), \\\n \"Expected True but False returned\"\n\n # Negative test case: Checking whether the function returns False\n # fr wrong output\n assert not eda.generate_report(data, cat_vars, \"String Input\"), \\\n \"Expected False but True returned\"", "def test_report_definition(self):\n self.model = self.scan.model\n self.model.save()\n new_model = pycotools3.tasks.CopasiMLParser(self.copasi_file).xml\n reports = new_model.find('{http://www.copasi.org/static/schema}ListOfReports')\n check = False\n for report in reports:\n if report.attrib['name'] == 'profile_likelihood':\n check = True\n self.assertTrue(check)", "def test_xml_direct(self): \n response = client.result(True, 'xml', 'unittest', test_data = self.test_data)\n root = ET.fromstring(response)\n first_name = root[0][0][0].text\n self.assertEqual(first_name,'John', 'Should print John')\n nationality = '<nationality>' in response\n self.assertFalse(nationality, 'Nationality should not be present')", "def check_xshear_output(self):\n lens_nchunk=self['lens_conf']['nchunk']\n tilenames=scat.get_tilenames(self['source_conf']['scat_table'])\n\n ntile=len(tilenames)\n for lens_chunk in xrange(lens_nchunk):\n print(\" checking chunk: %d/%d\" % (lens_chunk+1, lens_nchunk))\n for i,tilename in enumerate(tilenames):\n # first check if this source catalog exists\n if self._scat_exists(tilename):\n job=XShearWQJob(self['run'],\n lens_chunk,\n tilename)\n info=job.get_info()\n if not os.path.exists(info['output_file']):\n print(\"missing output:\",info['output_file'])", "def test_assemble_xml_file(self):\n self.maxDiff = None\n\n fh = StringIO()\n worksheet = Worksheet()\n worksheet._set_filehandle(fh)\n worksheet.str_table = SharedStringTable()\n worksheet.select()\n cell_format1 = Format({\"xf_index\": 1})\n cell_format2 = Format({\"xf_index\": 2})\n\n worksheet.merge_range(\"B3:C3\", \"Foo\", cell_format1)\n worksheet.merge_range(\"A2:D2\", \"\", cell_format2)\n\n worksheet.select()\n worksheet._assemble_xml_file()\n\n exp = _xml_to_list(\n \"\"\"\n <?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n <worksheet xmlns=\"http://schemas.openxmlformats.org/spreadsheetml/2006/main\" xmlns:r=\"http://schemas.openxmlformats.org/officeDocument/2006/relationships\">\n <dimension ref=\"A2:D3\"/>\n <sheetViews>\n <sheetView tabSelected=\"1\" workbookViewId=\"0\"/>\n </sheetViews>\n <sheetFormatPr defaultRowHeight=\"15\"/>\n <sheetData>\n <row r=\"2\" spans=\"1:4\">\n <c r=\"A2\" s=\"2\"/>\n <c r=\"B2\" s=\"2\"/>\n <c r=\"C2\" s=\"2\"/>\n <c r=\"D2\" s=\"2\"/>\n </row>\n <row r=\"3\" spans=\"1:4\">\n <c r=\"B3\" s=\"1\" t=\"s\">\n <v>0</v>\n </c>\n <c r=\"C3\" s=\"1\"/>\n </row>\n </sheetData>\n <mergeCells count=\"2\">\n <mergeCell ref=\"B3:C3\"/>\n <mergeCell ref=\"A2:D2\"/>\n </mergeCells>\n <pageMargins left=\"0.7\" right=\"0.7\" top=\"0.75\" bottom=\"0.75\" header=\"0.3\" footer=\"0.3\"/>\n </worksheet>\n \"\"\"\n )\n\n got = _xml_to_list(fh.getvalue())\n\n self.assertEqual(got, exp)", "def xslTestText(self):\n return u'normalize-space(ancestor::*/%s)' % self.name", "def test_exist(self):\n with self.assertRaises(IncompetentQiitaDeveloperError):\n MetadataTemplate.exists(self.study)", "def missing_results(self):\n self.calc_progress()\n\n def no_result_exists(x):\n return not os.path.isfile(\n os.path.join(self.location, \"results\", RSLT_NM.format(x))\n )\n\n return tuple(filter(no_result_exists, range(1, self.num_batches + 1)))", "def ruleset_create_success_hr():\n with open(\n os.path.join(TEST_DATA_DIRECTORY, \"create_ruleset_success_hr.md\")\n ) as file:\n f = file.read()\n return f", "def test_validation_no_schema(self):\n TT = TEITransformer(scenario='drama')\n tei_path = \"tests/test_user_schema/tei.xml\"\n TT.load_tei(tei_path)\n with self.assertRaises(ValueError):\n TT.transform(output_format=\"html\")", "def example_xml_file42():\n return load_xml('datacite-v4.2-full-example.xml')", "def xslTestText(self):\n return u'normalize-space(child::*/%s)' % self.name", "def validate_file(self):\n print \"\\n******\"\n print \" Done creating file. Validation messages follow.\"\n missing_nodes = {'group': [], 'dataset': []}\n custom_nodes = {'group': [], 'dataset': []}\n for ns in self.id_lookups:\n for id in self.id_lookups[ns]:\n for path in self.id_lookups[ns][id]:\n qty = self.id_lookups[ns][id][path]['qty']\n type = self.id_lookups[ns][id][path]['type']\n count = len(self.id_lookups[ns][id][path]['created'])\n if qty in ('!', '+') and count == 0:\n missing_nodes[type].append(\"%s:%s/%s\" % (ns, path, id))\n for path, node_list in self.all_nodes.iteritems():\n for root_node in node_list:\n self.validate_nodes(root_node, missing_nodes, custom_nodes)\n self.report_problems(missing_nodes, \"missing\")\n self.report_problems(custom_nodes, \"custom\")\n if self.custom_attributes:\n count = len(self.custom_attributes)\n print \"%i nodes with custom attributes\" % len(self.custom_attributes)\n if count > 20:\n print \"Only first 20 shown;\"\n names = self.custom_attributes.keys()[0:min(20, count)]\n nlist = []\n for name in names:\n nlist.append(name+ \"->\" +str(self.custom_attributes[name]))\n print nlist\n else:\n print \"No custom attributes. Good.\"", "def generate_expected_file(self, expected_file, xml_name):\n\t\tlogging.info('Gerando arquivo de documentos esperados')\n\t\tcontent = self.read_xml(xml_name)\n\n\t\twith open(expected_file, 'w', newline='') as csvfile:\n\t\t\tfieldnames = ['QueryNumber', 'DocNumber', 'DocVotes']\n\t\t\twriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n\t\t\twriter.writeheader()\n\t\t\tfor index in range(0, len(content['QueryNumber'])):\n\t\t\t\tcount_results = 0\n\t\t\t\tlogging.info('Escrevendo documentos da consulta '+str(index+1)+'/'+str(len(content['QueryNumber'])))\n\t\t\t\tfor result in content['Records'][index]:\n\t\t\t\t\twriter.writerow({'QueryNumber': content['QueryNumber'][index], 'DocNumber': result[0], \n\t\t\t\t\t\t\t\t\t 'DocVotes': result[1]})\n\t\t\t\t\tcount_results += 1\n\t\t\t\t\tif count_results == int(content['Results'][index]): break", "def example_xml_file():\n return load_xml('datacite-v3.1-full-example.xml')", "def test_predicate1(self):\n xpb = XPathBuilder()\n xp = xpb.action.source[xpb.attr('project') == 'bar']\n exp = '/action/source[@project = \"bar\"]'\n self.assertEqual(xp.tostring(), exp)", "def test_xml_safety_flag(self):\r\n\r\n self._setstaff_login()\r\n response = self._add_edx4edx()\r\n self.assertIn('GIT_IMPORT_WITH_XMLMODULESTORE', response.content)\r\n\r\n def_ms = modulestore()\r\n course = def_ms.courses.get('{0}/edx4edx_lite'.format(\r\n os.path.abspath(settings.DATA_DIR)), None)\r\n self.assertIsNone(course)", "def test_incomplete_xml(self):\n self.__opener.contents = '<Report></Report>>'\n self.assertEqual(-1, self.__uft.failed_tests('url'))", "def check_data():\n check_docs(\"Training\")\n check_docs(\"dev\")\n check_docs(\"Test\")", "def is_good_enough_xml(self, resp):\n content_type = resp.headers['Content-Type'].lower()\n \n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('xml') > -1)", "def test_XmlDump_compare_single(self):\n self._compare_variants(False)", "def sniff( self, filename ):\n\n f = open( filename, \"r\" )\n firstlines = \"\".join( f.readlines(5) )\n f.close()\n\n if \"phyloxml\" in firstlines:\n return True\n return False", "def _run_test_and_get_xml(self, flag):\n\n xml_fhandle, xml_fname = tempfile.mkstemp()\n os.close(xml_fhandle)\n\n try:\n binary = self._get_helper()\n args = [binary, flag, '--xml_output_file=%s' % xml_fname]\n ret = subprocess.call(args)\n self.assertEqual(ret, 0)\n\n xml = ElementTree.parse(xml_fname).getroot()\n finally:\n os.remove(xml_fname)\n\n return xml", "def load_data_from_xsl(file_name):\n\tnlp_data = pd.read_excel(file_name, sheet_name=0, header=0, usecols=[1, 2, 3],\n\t converters={'bug_id': str, 'summary': str, 'description': str})\n\tnlp_data.fillna(' ', inplace=True)\n\n\t# nlp_data['description'] = nlp_data['description'].map(lambda x: clean_str(x+''))\n\n\treturn nlp_data", "def test_predicate2(self):\n xpb = XPathBuilder()\n xp = xpb.action.source.where(xpb.attr('project').equals('bar'))\n exp = '/action/source[@project = \"bar\"]'\n self.assertEqual(xp.tostring(), exp)", "def verifyFileExists(self, fileDir, fileName):\n # check that file exists\n fpath = fileDir.child(fileName)\n self.assertTrue(fpath.exists())\n\n # check that the output files have some content\n fcontents = fpath.getContent()\n self.assertTrue(len(fcontents) > 0)\n\n # check that the html files are at least html-ish\n # this is not a terribly rigorous check\n if fpath.path.endswith(\".html\"):\n self.assertIn(b\"<body\", fcontents)", "def evaluate(self, xml_gold_path, xml_output_path):\n\n # Go through all files in xml_gold_path directory\n for file in os.listdir(xml_gold_path):\n\n # Set path to file\n file = xml_gold_path+file\n\n # Open files only, ignore subdirectories\n if os.path.isfile(file) and file.lower().endswith('.xml'):\n\n # Open xml files\n chapter_input_gold = open(file, 'r', encoding='utf8')\n chapter_input_test = open(xml_output_path+os.path.split(file)[-1], 'r', encoding='utf8')\n\n # Check if filenams are the same\n chapter_input_gold_name = os.path.split(chapter_input_gold.name)[-1]\n chapter_input_test_name = os.path.split(chapter_input_test.name)[-1]\n\n if chapter_input_gold_name == chapter_input_test_name:\n\n # Console log\n chapter_input_gold_name = chapter_input_gold.name\n chapter_input_test_name = chapter_input_test.name\n #print('Calculating score for: ' + chapter_input_gold_name + ' and: ' + chapter_input_test_name)\n\n # Process xml input file with BeautifulSoup\n chapter_input_gold = BeautifulSoup(chapter_input_gold, 'xml')\n chapter_input_test = BeautifulSoup(chapter_input_test, 'xml')\n\n # Empty variables for collecting Target scores\n target_precision_scores = 0\n target_recall_scores = 0\n target_f1_scores = 0\n target_jaccard_scores = 0\n\n # Empty variables for collecting Focus scores\n focus_precision_scores = 0\n focus_recall_scores = 0\n focus_f1_scores = 0\n focus_jaccard_scores = 0\n\n # Empty variables for collecting Negated scores\n negated_precision_scores = 0\n negated_recall_scores = 0\n negated_f1_scores = 0\n negated_jaccard_scores = 0\n\n # Empty variables for collecting Scope scores\n scope_precision_scores = 0\n scope_recall_scores = 0\n scope_f1_scores = 0\n scope_jaccard_scores = 0\n\n # Count sentences and frames\n sentence_count = 0\n gold_frames_count = 0\n test_frames_count = 0\n\n scope_gold_frames_count = 0\n #scope_test_frames_count = 0\n\n # Find all Gold and Test Sentences\n sentences_gold = chapter_input_gold.find_all('s')\n sentences_test = chapter_input_test.find_all('s')\n\n #targets_gold = chapter_input_gold.find_all('target')\n #targets_test = chapter_input_test.find_all('target')\n\n scope_gold_frames = chapter_input_gold.find_all('fe', {'name' : SCOPE_TAG_NAME})\n scope_gold_frames_count = len(scope_gold_frames)\n\n scope_test_frames = chapter_input_test.find_all('fe', {'name' : SCOPE_TAG_NAME})\n scope_test_frames_count = len(scope_test_frames)\n\n # Exit if number of sentences != between Gold and Test files\n if len(sentences_gold) != len(sentences_test):\n raise SystemExit(print('Number of sentences between Gold and Test files does not match.\\nGold:',\n len(sentences_gold), 'Test:', len(sentences_test)))\n\n # Zip Gold and Test Sentences\n for s_gold, s_test in zip(sentences_gold, sentences_test):\n\n sentence_count = sentence_count + 1\n\n gold_frames = s_gold.find_all('frame', {'name' : NEGATION_FRAME_NAME})\n test_frames = s_test.find_all('frame', {'name' : NEGATION_FRAME_NAME})\n\n gold_frames_count = gold_frames_count + len(gold_frames)\n test_frames_count = test_frames_count + len(test_frames)\n\n for item in zip(gold_frames, test_frames):\n\n #print('\\n=========')\n #print('\\nFrame:', item[0].get('id'))\n\n target_gold_list = []\n target_test_list = []\n\n focus_gold_list = []\n focus_test_list = []\n\n negated_gold_list = []\n negated_test_list = []\n\n scope_gold_list = []\n scope_test_list = []\n\n # Flatten a nested list of fenodes\n def flatten(nested_list):\n \"\"\" Flatten a nested list of fenodes \"\"\"\n t_l = []\n for i in nested_list:\n if not isinstance(i, list):\n t_l.append(i)\n else:\n t_l.extend(flatten(i))\n return t_l\n\n # Target\n if item[0].find('target'):\n target_gold = item[0].find('target')\n target_gold_fenode_id = target_gold.find('fenode').get('idref')\n target_gold_word = s_gold.find(id=target_gold_fenode_id).get('word').lower()\n\n try:\n target_test = item[1].find('target')\n target_test_fenode__id = target_test.find('fenode').get('idref')\n target_test_word = s_test.find(id=target_test_fenode__id).get('word').lower()\n except:\n target_test_word = ''\n\n elif item[1].find('target'):\n target_test = item[1].find('target')\n target_test_fenode__id = target_test.find('fenode').get('idref')\n target_test_word = s_test.find(id=target_test_fenode__id).get('word').lower()\n\n try:\n target_gold = item[0].find('target')\n target_gold_fenode_id = target_gold.find('fenode').get('idref')\n target_gold_word = s_gold.find(id=target_gold_fenode_id).get('word').lower()\n except:\n target_gold_word = ''\n\n target_gold_list.append(target_gold_word)\n target_test_list.append(target_test_word)\n\n # Sort lists\n sorted_target_gold_list = sorted(flatten(target_gold_list))\n sorted_target_test_list = sorted(flatten(target_test_list))\n\n #print('\\nTarget [Gold]:', sorted_target_gold_list)\n #print('Target [Test]:', sorted_target_test_list)\n\n\n # Focus\n if item[0].find('fe', {'name' : FOCUS_TAG_NAME}):\n focus_gold = item[0].find('fe', {'name' : FOCUS_TAG_NAME})\n try:\n focus_gold_fenode_id = focus_gold.find('fenode').get('idref')\n focus_gold_word = s_gold.find(id=focus_gold_fenode_id).get('word').lower()\n except:\n focus_gold_word = ''\n if item[1].find('fe', {'name' : FOCUS_TAG_NAME}):\n focus_test = item[1].find('fe', {'name' : FOCUS_TAG_NAME})\n try:\n focus_test_fenode_id = focus_test.find('fenode').get('idref')\n focus_test_word = s_test.find(id=focus_test_fenode_id).get('word').lower()\n except:\n focus_test_word = ''\n else:\n focus_test_word = ''\n\n elif item[1].find('fe', {'name' : FOCUS_TAG_NAME}):\n focus_test = item[1].find('fe', {'name' : FOCUS_TAG_NAME})\n try:\n focus_test_fenode_id = focus_test.find('fenode').get('idref')\n focus_test_word = s_test.find(id=focus_test_fenode_id).get('word').lower()\n except:\n focus_test_word = ''\n if item[0].find('fe', {'name' : FOCUS_TAG_NAME}):\n focus_gold = item[0].find('fe', {'name' : FOCUS_TAG_NAME})\n focus_gold_fenode_id = focus_gold.find('fenode').get('idref')\n try:\n focus_gold_word = s_gold.find(id=focus_gold_fenode_id).get('word').lower()\n except AttributeError:\n focus_gold_word = ''\n else:\n focus_gold_word = ''\n\n focus_gold_list.append(focus_gold_word)\n focus_test_list.append(focus_test_word)\n\n # Sort lists\n sorted_focus_gold_list = sorted(flatten(focus_gold_list))\n sorted_focus_test_list = sorted(flatten(focus_test_list))\n\n #print('\\nFocus [Gold]:', sorted_focus_gold_list)\n #print('Focus [Test]:', sorted_focus_test_list)\n\n\n # Negated\n if item[0].find('fe', {'name' : NEGATED_TAG_NAME}):\n negated_gold = item[0].find('fe', {'name' : NEGATED_TAG_NAME})\n negated_gold_fenode_id = negated_gold.find('fenode').get('idref')\n try:\n negated_gold_word = s_gold.find(id=negated_gold_fenode_id).get('word').lower()\n except AttributeError:\n negated_gold_word = ''\n if item[1].find('fe', {'name' : NEGATED_TAG_NAME}):\n negated_test = item[1].find('fe', {'name' : NEGATED_TAG_NAME})\n try:\n negated_test_fenode_id = negated_test.find('fenode').get('idref')\n negated_test_word = s_test.find(id=negated_test_fenode_id).get('word').lower()\n except:\n negated_test_word = ''\n else:\n negated_test_word = ''\n\n elif item[1].find('fe', {'name' : NEGATED_TAG_NAME}):\n negated_test = item[1].find('fe', {'name' : NEGATED_TAG_NAME})\n try:\n negated_test_fenode_id = negated_test.find('fenode').get('idref')\n negated_test_word = s_test.find(id=negated_test_fenode_id).get('word').lower()\n except:\n negated_test_word = ''\n if item[0].find('fe', {'name' : NEGATED_TAG_NAME}):\n negated_gold = item[0].find('fe', {'name' : NEGATED_TAG_NAME})\n negated_gold_fenode_id = negated_gold.find('fenode').get('idref')\n try:\n negated_gold_word = s_gold.find(id=negated_gold_fenode_id).get('word').lower()\n except AttributeError:\n negated_gold_word = ''\n else:\n negated_gold_word = ''\n else:\n negated_test_word = ''\n negated_gold_word = ''\n\n negated_gold_list.append(negated_gold_word)\n negated_test_list.append(negated_test_word)\n\n # Sort lists\n sorted_negated_gold_list = sorted(flatten(negated_gold_list))\n sorted_negated_test_list = sorted(flatten(negated_test_list))\n\n #print('\\nNegated [Gold]:', sorted_negated_gold_list)\n #print('Negated [Test]:', sorted_negated_test_list)\n\n\n # Resolve Terminals if Scope on a complex graph\n def resolve_non_terminals(idref):\n \"\"\" This function resolves a complex gold graph to\n a simple flat list of tokens.\n \"\"\"\n nonterminal = s_gold.find(id=idref)\n edges = nonterminal.find_all('edge')\n edge_words = []\n for edge in edges:\n e_id = edge.get('idref')\n if s_gold.find(id=e_id).get('word') is not None:\n try:\n edge_word = s_gold.find(id=e_id).get('word').lower()\n edge_words.append(edge_word)\n except:\n pass\n if s_gold.find(id=e_id).get('word') is None:\n edge_words.append(resolve_non_terminals(e_id))\n\n return edge_words\n\n def resolve_non_terminals_test(idref):\n \"\"\" This function resolves a complex test graph to\n a simple flat list of tokens.\n \"\"\"\n nonterminal = s_test.find(id=idref)\n edges = nonterminal.find_all('edge')\n edge_words = []\n for edge in edges:\n e_id = edge.get('idref')\n if s_test.find(id=e_id).get('word') is not None:\n try:\n edge_word = s_test.find(id=e_id).get('word').lower()\n edge_words.append(edge_word)\n except:\n pass\n if s_test.find(id=e_id).get('word') is None:\n edge_words.append(resolve_non_terminals(e_id))\n\n return edge_words\n\n # Scope\n if item[0].find('fe', {'name' : SCOPE_TAG_NAME}):\n scope_gold = item[0].find('fe', {'name' : SCOPE_TAG_NAME})\n scope_gold_fenodes = scope_gold.find_all('fenode')\n for s_g in scope_gold_fenodes:\n s_id = s_g.get('idref')\n if s_gold.find(id=s_id).get('word') is not None:\n try:\n scope_word = s_gold.find(id=s_id).get('word').lower()\n scope_gold_list.append(scope_word)\n except:\n pass\n if s_gold.find(id=s_id).get('word') is None:\n scope_gold_list.append(resolve_non_terminals(s_id))\n else:\n pass\n\n if item[1].find('fe', {'name' : SCOPE_TAG_NAME}):\n scope_test = item[1].find('fe', {'name' : SCOPE_TAG_NAME})\n scope_test_fenodes = scope_test.find_all('fenode')\n for s_t in scope_test_fenodes:\n s_id = s_t.get('idref')\n if s_test.find(id=s_id).get('word') is not None:\n try:\n scope_word = s_test.find(id=s_id).get('word').lower()\n scope_test_list.append(scope_word)\n except:\n pass\n elif s_test.find(id=s_id).get('word') is None:\n scope_test_list.append(resolve_non_terminals_test(s_id))\n else:\n scope_test_list.append('')\n\n elif item[1].find('fe', {'name' : SCOPE_TAG_NAME}):\n scope_test = item[1].find('fe', {'name' : SCOPE_TAG_NAME})\n scope_test_fenodes = scope_test.find_all('fenode')\n for s_t in scope_test_fenodes:\n s_id = s_t.get('idref')\n if s_test.find(id=s_id).get('word') is not None:\n try:\n scope_word = s_test.find(id=s_id).get('word').lower()\n scope_test_list.append(scope_word)\n except:\n pass\n if s_test.find(id=s_id).get('word') is None:\n scope_test_list.append(resolve_non_terminals_test(s_id))\n else:\n pass\n\n if item[0].find('fe', {'name' : SCOPE_TAG_NAME}):\n scope_gold = item[1].find('fe', {'name' : SCOPE_TAG_NAME})\n scope_gold_fenodes = scope_gold.find_all('fenode')\n for s_g in scope_gold_fenodes:\n s_id = s_g.get('idref')\n if s_gold.find(id=s_id).get('word') is not None:\n try:\n scope_word = s_gold.find(id=s_id).get('word').lower()\n scope_gold_list.append(scope_word)\n except:\n pass\n if s_gold.find(id=s_id).get('word') is None:\n scope_gold_list.append(resolve_non_terminals(s_id))\n else:\n pass\n else:\n scope_gold_list.append('')\n\n # Sort lists\n sorted_scope_gold_list = sorted(flatten(scope_gold_list))\n sorted_scope_test_list = sorted(flatten(scope_test_list))\n\n #print('\\nScope [Gold]:', sorted_scope_gold_list)\n #print('Scope [Test]:', sorted_scope_test_list)\n\n # If lists are same length, check if items are same\n if len(sorted_scope_gold_list) == len(sorted_scope_test_list):\n sorted_scope_test_list_intersection = set(sorted_scope_gold_list).intersection(sorted_scope_test_list)\n sorted_scope_test_list_intersection = list(sorted_scope_test_list_intersection)\n if len(sorted_scope_test_list_intersection) < len(sorted_scope_test_list):\n difference = len(sorted_scope_test_list) - len(sorted_scope_test_list_intersection)\n empty_element = 0\n\n while empty_element < difference:\n sorted_scope_test_list_intersection.append('')\n empty_element = empty_element + 1\n \n sorted_scope_test_list = sorted_scope_test_list_intersection\n\n # If lists are different lengths, add empty elements\n elif len(sorted_scope_gold_list) > len(sorted_scope_test_list):\n difference = len(sorted_scope_gold_list) - len(sorted_scope_test_list)\n empty_element = 0\n\n while empty_element < difference:\n sorted_scope_test_list.append('')\n empty_element = empty_element + 1\n\n elif len(sorted_scope_test_list) > len(sorted_scope_gold_list):\n difference = len(sorted_scope_test_list) - len(sorted_scope_gold_list)\n empty_element = 0\n\n while empty_element < difference:\n sorted_scope_gold_list.append('')\n empty_element = empty_element + 1\n\n\n # Align items in the lists for sklearn, set 1 for matched items, else set 0\n sorted_target_gold_list_normalized = [1 if element in sorted_target_gold_list and not element == \"\" else 0 for element in sorted_target_gold_list]\n sorted_target_test_list_normalized = [1 if element in sorted_target_gold_list else 0 for element in sorted_target_test_list]\n\n sorted_focus_gold_list_normalized = [1 if element in sorted_focus_gold_list and not element == \"\" else 0 for element in sorted_focus_gold_list]\n sorted_focus_test_list_normalized = [1 if element in sorted_focus_gold_list else 0 for element in sorted_focus_test_list]\n\n sorted_negated_gold_list_normalized = [1 if element in sorted_negated_gold_list and not element == \"\" else 0 for element in sorted_negated_gold_list]\n sorted_negated_test_list_normalized = [1 if element in sorted_negated_gold_list else 0 for element in sorted_negated_test_list]\n\n sorted_scope_gold_list_normalized = [1 if element in sorted_scope_gold_list and not element == \"\" else 0 for element in sorted_scope_gold_list]\n sorted_scope_test_list_normalized = [1 if element in sorted_scope_gold_list else 1 if not element == \"\" else 0 for element in sorted_scope_test_list]\n\n #print(sorted_scope_gold_list_normalized)\n #print(sorted_scope_test_list_normalized)\n\n\n # Sklearn calculations\n #target_precision_scores = target_precision_scores + precision_score(sorted_target_gold_list_normalized, sorted_target_test_list_normalized, average='weighted')\n #target_recall_scores = target_recall_scores + recall_score(sorted_target_gold_list_normalized, sorted_target_test_list_normalized, average='weighted')\n target_f1_scores = target_f1_scores + f1_score(sorted_target_gold_list_normalized, sorted_target_test_list_normalized, average='weighted')\n #target_jaccard_scores = target_jaccard_scores + jaccard_similarity_score(sorted_target_gold_list, sorted_target_test_list)\n\n #focus_precision_scores = focus_precision_scores + precision_score(sorted_focus_gold_list_normalized, sorted_focus_test_list_normalized, average='weighted')\n #focus_recall_scores = focus_recall_scores + recall_score(sorted_focus_gold_list_normalized, sorted_focus_test_list_normalized, average='weighted')\n focus_f1_scores = focus_f1_scores + f1_score(sorted_focus_gold_list_normalized, sorted_focus_test_list_normalized, average='weighted')\n #focus_jaccard_scores = focus_jaccard_scores + jaccard_similarity_score(sorted_focus_gold_list, sorted_focus_test_list)\n\n #negated_precision_scores = negated_precision_scores + precision_score(sorted_negated_gold_list_normalized, sorted_negated_test_list_normalized, average='weighted')\n #negated_recall_scores = negated_recall_scores + recall_score(sorted_negated_gold_list_normalized, sorted_negated_test_list_normalized, average='weighted')\n negated_f1_scores = negated_f1_scores + f1_score(sorted_negated_gold_list_normalized, sorted_negated_test_list_normalized, average='weighted')\n #negated_jaccard_scores = negated_jaccard_scores + jaccard_similarity_score(sorted_negated_gold_list, sorted_negated_test_list)\n\n scope_precision_scores = scope_precision_scores + precision_score(sorted_scope_gold_list_normalized, sorted_scope_test_list_normalized, average='weighted')\n scope_recall_scores = scope_recall_scores + recall_score(sorted_scope_gold_list_normalized, sorted_scope_test_list_normalized, average='weighted')\n scope_f1_scores = scope_f1_scores + f1_score(sorted_scope_gold_list_normalized, sorted_scope_test_list_normalized, average='weighted')\n scope_jaccard_scores = scope_jaccard_scores + jaccard_similarity_score(sorted_scope_gold_list, sorted_scope_test_list)\n\n\n print('\\n=============================')\n print('====== EVALUATION for:', chapter_input_test_name, '======')\n print('Total Sentences:', sentence_count,\n '\\nNegation Gold frames:', gold_frames_count,\n '\\nNegation Test frames:', test_frames_count, '\\n')\n\n print('----- CUEWORDS -----')\n #print('Precision:\\t', target_precision_scores / gold_frames_count)\n #print('Recall:\\t', target_recall_scores / gold_frames_count)\n print('F1 score:\\t', target_f1_scores / gold_frames_count)\n #print('Jaccard similarity:\\t', target_jaccard_scores / gold_frames_count)\n\n print('\\n----- FOCUS -----')\n #print('Precision:\\t', focus_precision_scores / gold_frames_count)\n #print('Recall:\\t', focus_recall_scores / gold_frames_count)\n print('F1 score:\\t', focus_f1_scores / gold_frames_count)\n #print('Jaccard similarity:\\t', focus_jaccard_scores / gold_frames_count)\n\n print('\\n----- NEGATED -----')\n #print('Precision:\\t', negated_precision_scores / gold_frames_count)\n #print('Recall:\\t', negated_recall_scores / gold_frames_count)\n print('F1 score:\\t', negated_f1_scores / gold_frames_count)\n #print('Jaccard similarity:\\t', negated_jaccard_scores / gold_frames_count)\n\n print('\\n----- SCOPE -----\\nScope Gold frames:', scope_gold_frames_count, '\\nScope Test frames:', scope_test_frames_count, '\\n')\n print('Precision:\\t', scope_precision_scores / scope_test_frames_count)\n print('Recall:\\t', scope_recall_scores / scope_test_frames_count)\n print('F1 score:\\t', scope_f1_scores / scope_test_frames_count)\n print('Jaccard similarity:\\t', scope_jaccard_scores / scope_test_frames_count)\n\n print('Done!')", "def is_failed(doc_dict):\n\n results_xml = os.listdir(os.path.join(config.TOC_OCR_RESULTS, doc_dict['name']))\n \n print(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} INFO (OCR): Results directory contents for {os.path.join(config.TOC_OCR_RESULTS,doc_dict['name'])}:\")\n print(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} INFO (OCR): {results_xml}\")\n\n if len(results_xml) == 0:\n raise IOError(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} ERROR (OCR): Result XML files not found in {os.path.join(config.TOC_OCR_RESULTS, doc_dict['name'])}...\")\n\n for item in results_xml:\n # open XML file and parse it as an ordered dict\n print(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} INFO (OCR): Found result file: {item}\")\n print(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} INFO (OCR): Opening result file {os.path.join(config.TOC_OCR_RESULTS, doc_dict['name'], item)}...\")\n with open(os.path.join(config.TOC_OCR_RESULTS, doc_dict['name'], item), mode='rb') as f:\n xml = xmltodict.parse(xml_input=f)\n # print(\"OCR XML: \", xml)\n\n # find XmlResult in the ordered dictionary created by parsing XML file\n result_generator = utility.find_item_in_response(data=xml, key='@IsFailed')\n\n # find IsFailed property in XmlResult ordered dict\n for found_value in result_generator:\n # is_failed_generator = utility.find_item_in_response(data=result, key='@IsFailed')\n #\n # # check the value of IsFailed property\n # for found_value in is_failed_generator:\n # print(\"IS FAILED: \", found_value)\n if found_value == 'true':\n print(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} INFO (OCR): TRUE RESULT FOUND VALUE: {found_value}\")\n return True\n else:\n print(f\"{format(datetime.now(), '%Y-%m-%d %H:%M:%S')} INFO (OCR ): FALSE RESULT FOUND VALUE: {found_value}\")\n return False", "def test_install_set_existing(self):\n expected = copy.deepcopy(test_xdata)\n expected.find(\"Text\").text = \"Changed content\"\n self._install([lxml.etree.Element(\"Set\", path=\"Test/Text/#text\",\n value=\"Changed content\")],\n expected)", "def test_load():\n t = PandasTransformer()\n os.makedirs(target_dir, exist_ok=True)\n t.parse(os.path.join(resource_dir, \"x1n.csv\"))\n t.parse(os.path.join(resource_dir, \"x1e.csv\"))\n t.report()\n t.save(os.path.join(target_dir, 'x1copy'))\n # w = GraphMLTransformer(t.graph)\n # w.save(os.path.join(target_dir, \"x1n.graphml\"))", "def test_write(self):\n cases = {\n self.test_eac + \"NE00401.xml\": True,\n self.test_eac + \"NE01501.xml\": False,\n self.test_eac + \"NE01302.xml\": True,\n }\n metadata_url = 'http://www.example.com/metadata.xml'\n presentation_url = 'http://www.example.com/presentation.html'\n for case in cases:\n doc = EacCpf.EacCpf(case, metadata_url, presentation_url)\n self.assertNotEqual(doc, None)\n path = doc.write(self.temp)\n self.assertEquals(os.path.exists(path), True)\n # read the file and try to extract the attributes\n try:\n tree = etree.parse(path)\n ns = {\n EacCpf.DOC_KEY: EacCpf.DOC_NS,\n EacCpf.ESRC_KEY: EacCpf.ESRC_NS,\n }\n # get the url to the metadata file\n metadata = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":metadata\", namespaces=ns)\n self.assertNotEqual(metadata, None)\n self.assertEqual(metadata[0], metadata_url)\n # get the url to the presentation file\n presentation = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":presentation\", namespaces=ns)\n self.assertNotEqual(presentation, None)\n self.assertEqual(presentation[0], presentation_url)\n # get the url to the source file\n source = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":source\", namespaces=ns)\n self.assertNotEqual(source, None)\n self.assertEqual(source[0], case)\n except:\n msg = \"Failed to complete parsing of {0}\".format(case)\n self.log.error(msg, exc_info=True)\n self.fail(msg)", "def recipe12_8():\n from xml.parsers.xmlproc import utils, xmlval, xmldtd\n def validate_xml_file(xml_filename, app=None, dtd_filename=None):\n # build validating parser object with appropriate error handler\n parser=xmlval.Validator()\n parser.set_error_handler(utils.ErrorPrinter(parser))\n if dtd_filename is None:\n # DTD fiel specified, laod and set it as the DTD to use\n dtd=xmldtd.load_dtd(dtd_filename)\n parser.val.dtd = parser.dtd = parser.ent = dtd\n if app is not None:\n # Application processing requested, set application object\n parser.set_application(app)\n # everything being set correctly, finally perform the parsing\n parser.parse_resource(xml_filename) \n # if XML data is in a string s, use instead\n # parser.feed(s)\n # parser.close(s)", "def test_missing_shx(self):\n path = os.path.join(BASE_DIR, \"tests\", \"fixtures\", \"missing_shx.zip\")\n zip_file = zipfile.ZipFile(path)\n\n with self.assertRaises(MissingFiles) as context:\n get_shapefile(zip_file)\n the_exception = context.exception\n self.assertEqual(MISSING_FILE, the_exception.message)", "def data_available(dataset_name=None):\r\n for file_list in data_resources[dataset_name]['files']:\r\n for file in file_list:\r\n if not os.path.exists(os.path.join(data_path, dataset_name, file)):\r\n return False\r\n return True", "def test_exists_false(self):\n self.assertFalse(PrepTemplate.exists(2))", "def create_xml_regression(lfiles, lsbj, foxml):\n\n impl = xml.dom.minidom.getDOMImplementation()\n doc = impl.createDocument(None, \"some_tag\", None)\n top_element = doc.documentElement\n\n e = doc.createElement('subject')\n e.setAttribute('id', 'case')\n\n for i, fn in enumerate(lfiles):\n v = doc.createElement('visit')\n v.setAttribute('id', \"subj{}\".format(i))\n\n f = doc.createElement('filename')\n f.setAttribute('object_id', \"face\")\n t = doc.createTextNode(fn)\n f.appendChild(t)\n\n a = doc.createElement('age')\n x = doc.createTextNode(str(lsbj[i][\"age\"]))\n a.appendChild(x)\n\n\n v.appendChild(f)\n v.appendChild(a)\n e.appendChild(v)\n\n top_element.appendChild(e)\n\n with open(foxml, \"w\") as fo:\n fo.write(doc.toprettyxml())", "def _set_path_to_xml(self):\n\n self._path_to_xml = Path(__file__).parent / Path(XML_TABLE_PATH.format(self._instrument,\n self._original_file_type))\n\n if not self._path_to_xml.exists():\n raise ValueError(INVALID_FILE_TYPE_ERROR.format(self._original_file_type, self._instrument))", "def isExist(data):\n return True/False", "def test_output_exists():\n global out_dir, cor_dir\n assert(path.exists(path.join(out_dir, 'oshea_similarity.json')))", "def test_resourcesXML(self):\n fileName = self.mktemp()\n fp = FilePath(fileName)\n fp.setContent(oldResourcesFormat)\n upgradeResourcesXML(fp)\n self.assertEquals(fp.getContent(), newResourcesFormat)", "def test_generate_sample_sheet(self):\n pass", "def test_negative_file_and_xml(self):\n xml_object = ET.parse(self.xmlfile)\n with self.assertRaises((IOError, OSError)):\n glymur.jp2box.XMLBox(filename=self.xmlfile, xml=xml_object)", "def example_xml43(example_xml_file41):\n return etree.fromstring(example_xml_file43.encode('utf-8'))", "def test_duplicate_standard_name(self):\n # Setup test\n infilename = os.path.join(_SAMPLE_FILES_DIR, \"reg_good_simple.xml\")\n filename = os.path.join(_TMP_DIR, \"reg_duplicate_standard_name.xml\")\n out_source_name = \"physics_types_duplicate_standard_name\"\n out_source = os.path.join(_TMP_DIR, out_source_name + '.F90')\n out_meta = os.path.join(_TMP_DIR, out_source_name + '.meta')\n remove_files([out_source, out_meta])\n tree, root = read_xml_file(infilename)\n # Change output filename\n for obj in root:\n oname = obj.get('name')\n if (obj.tag == 'file') and (oname == 'physics_types_simple'):\n obj.set('name', out_source_name)\n new_var = ET.SubElement(obj, \"variable\")\n new_var.set(\"local_name\", \"french_fries\")\n new_var.set(\"standard_name\", \"latitude\")\n new_var.set(\"units\", \"radians\")\n new_var.set(\"type\", \"real\")\n new_var.set(\"kind\", \"kind_phys\")\n dims_elem = ET.SubElement(new_var, \"dimensions\")\n dims_elem.text = 'horizontal_dimension'\n break\n # End if\n # End for\n tree.write(filename)\n\n # Run test\n with self.assertRaises(ValueError) as verr:\n _ = gen_registry(filename, 'eul', {}, _TMP_DIR, 2,\n _SRC_MOD_DIR, _CAM_ROOT,\n loglevel=logging.ERROR,\n error_on_no_validate=True)\n # End with\n # Check exception message\n emsg = \"duplicate variable standard_name, 'latitude' from \"\n emsg += \"'french_fries' in 'physics_types_duplicate_standard_name'\"\n emsg += \", already defined with local_name, 'latitude'\"\n self.assertEqual(emsg, str(verr.exception))\n # Make sure no output files were created\n self.assertFalse(os.path.exists(out_meta))\n self.assertFalse(os.path.exists(out_source))", "def check_file_exist(self):\n return False", "def test_read_file():\n z = XPIManager(get_path('xpi/install_rdf_only.xpi'))\n assert z.read('install.rdf') is not None", "def test_predicate4(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar.where(xpb.attr('name').not_equals('abc'))\n xp = xp.where(xpb.attr('x').equals('foo'))\n exp = '/foo/bar[@name != \"abc\"][@x = \"foo\"]'\n self.assertEqual(xp.tostring(), exp)", "def test_verify_unzip(self):\n assert os.path.exists(\n os.path.join(\n settings.MEDIA_ROOT,\n \"indices\",\n \"test-index\",\n \"data\",\n \"sample.txt\"\n )\n )", "def test_exists_true(self):\n self.assertTrue(Sample.exists(self.sample_id, self.sample_template))", "def test_xmloutput_view(self):\n print 'Running %s ...' % getName()\n \n self.sequenceListingFixture.create_sequence_instance(self.sequenceListing)\n \n response = self.client.get(reverse('sequencelistings:xmloutput', args=[self.sequenceListing.pk, ]))\n self.assertEqual(response.status_code, 200)\n# test that the page returns expected html contents\n# self.assertContains(response, '%s.xml' % self.sequenceListing.fileName)\n self.assertContains(response, self.sequenceListing.fileName)", "def _need_generate(paths):\r\n if not os.path.exists(paths.generated_dir):\r\n return True\r\n\r\n if not os.path.exists(paths.index_file):\r\n return True\r\n\r\n # Use the index file to determine if regeneration is necessary\r\n with open(paths.index_file, 'r',newline='\\n') as index_file:\r\n indexed = [item for item in\r\n index_file.read().split('\\n') if len(item) != 0 and\r\n not item.startswith(\"#\")]\r\n return indexed != paths.resource_files" ]
[ "0.5755427", "0.5572019", "0.5549192", "0.5519365", "0.5482828", "0.5464173", "0.5427079", "0.54160964", "0.536601", "0.5338898", "0.52344614", "0.5227911", "0.51953274", "0.5182521", "0.51584786", "0.51524824", "0.51514745", "0.51503146", "0.511027", "0.5074203", "0.5046354", "0.503389", "0.50189185", "0.501048", "0.50082016", "0.50032926", "0.4969945", "0.4964399", "0.49625763", "0.49601397", "0.49531683", "0.4920993", "0.49191195", "0.49034697", "0.4864428", "0.4853922", "0.48519278", "0.48500398", "0.48478904", "0.48434386", "0.48360553", "0.483164", "0.48283836", "0.48276672", "0.4826746", "0.48221388", "0.4820197", "0.4818247", "0.48045155", "0.48015872", "0.4796322", "0.47905782", "0.47900566", "0.47882983", "0.47780943", "0.47775993", "0.4774293", "0.47632882", "0.47608277", "0.47599152", "0.47550705", "0.47497493", "0.4741375", "0.4734563", "0.47324103", "0.47287878", "0.47215125", "0.47187984", "0.4716064", "0.47110233", "0.4711012", "0.4706559", "0.47056934", "0.47030586", "0.47023386", "0.4697539", "0.46970925", "0.46966684", "0.46914452", "0.46881047", "0.46803975", "0.46758932", "0.46708822", "0.46706775", "0.4667462", "0.46618056", "0.46566275", "0.4652985", "0.46504042", "0.4645476", "0.46431816", "0.463976", "0.46339816", "0.46335384", "0.4632274", "0.46320617", "0.4631103", "0.46305624", "0.46282983", "0.46265066" ]
0.46658367
85
Any format, prefix, suffix, html info in attrs dict
def __init__(self, name, attrs={}): TextFormat.__init__(self, name, attrs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def html_attrs(attrs):\n html = \"\"\n for a in attrs.items():\n if a[1]:\n html = html + \"%s=\\\"%s\\\" \"%(a)\n return html", "def attr(*attrs: ATTRIBUTE) -> str:\n return PyHTML.attr(*attrs)", "def attrs(context):\n result = \"\"\n for key, value in context.flatten().items():\n if key not in [\"True\", \"False\", \"None\", \"content\", \"element\"]:\n if \"hx_\" in key:\n key = key.replace(\"_\", \"-\")\n result += f' {key}=\"{value}\"'\n return mark_safe(result)", "def __get_attr_format (self, attrs):\r\n format = { \r\n 'editor': None,\r\n 'min': None,\r\n 'max': None,\r\n 'step': None,\r\n 'subtype': None,\r\n 'flags': None,\r\n 'enums': None\r\n }\r\n\r\n for attr in attrs: \r\n attr_type = attr[\"type\"]\r\n if \"editor\" == attr_type:\r\n format['editor'] = attr[\"value\"] \r\n if \"min\" == attr_type:\r\n format['min'] = attr[\"value\"] \r\n if \"max\" == attr_type:\r\n format['max'] = attr[\"value\"] \r\n if \"default\" == attr_type:\r\n format['default'] = attr[\"value\"] \r\n if \"step\" == attr_type:\r\n format['step'] = attr[\"value\"]\r\n if \"subtype\" == attr_type:\r\n format['subtype'] = attr[\"value\"]\r\n if \"flags\" == attr_type:\r\n format['flags'] = attr['value']\r\n if \"enums\" == attr_type:\r\n format['enums'] = attr['value']\r\n\r\n return format", "def _formatAttributes(self, attr=None, allowed_attrs=None, **kw):\n\n # Merge the attr dict and kw dict into a single attributes\n # dictionary (rewriting any attribute names, extracting\n # namespaces, and merging some values like css classes).\n attributes = {} # dict of key=(namespace,name): value=attribute_value\n if attr:\n for a, v in attr.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n if kw:\n for a, v in kw.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n\n # Add title attribute if missing, but it has an alt.\n if ('html', 'alt') in attributes and ('html', 'title') not in attributes:\n attributes[('html', 'title')] = attributes[('html', 'alt')]\n\n # Force both lang and xml:lang to be present and identical if\n # either exists. The lang takes precedence over xml:lang if\n # both exist.\n #if ('html', 'lang') in attributes:\n # attributes[('xml', 'lang')] = attributes[('html', 'lang')]\n #elif ('xml', 'lang') in attributes:\n # attributes[('html', 'lang')] = attributes[('xml', 'lang')]\n\n # Check all the HTML attributes to see if they are known and\n # allowed. Ignore attributes if in non-HTML namespaces.\n if allowed_attrs:\n for name in [key[1] for key in attributes if key[0] == 'html']:\n if name in _common_attributes or name in allowed_attrs:\n pass\n elif name.startswith('on'):\n pass # Too many event handlers to enumerate, just let them all pass.\n else:\n # Unknown or unallowed attribute.\n err = 'Illegal HTML attribute \"%s\" passed to formatter' % name\n raise ValueError(err)\n\n # Finally, format them all as a single string.\n if attributes:\n # Construct a formatted string containing all attributes\n # with their values escaped. Any html:* namespace\n # attributes drop the namespace prefix. We build this by\n # separating the attributes into three categories:\n #\n # * Those without any namespace (should only be xmlns attributes)\n # * Those in the HTML namespace (we drop the html: prefix for these)\n # * Those in any other non-HTML namespace, including xml:\n\n xmlnslist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if not k[0]]\n htmllist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] == 'html']\n otherlist = ['%s:%s=\"%s\"' % (k[0], k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] and k[0] != 'html']\n\n # Join all these lists together in a space-separated string. Also\n # prefix the whole thing with a space too.\n htmllist.sort()\n otherlist.sort()\n all = [''] + xmlnslist + htmllist + otherlist\n return ' '.join(all)\n return ''", "def get_format_attrs(self, name, field, alt_field_info={}):\n # important_props = ('initial', 'autofocus', 'widget')\n if name in alt_field_info:\n field = deepcopy(field)\n for prop, value in alt_field_info[name].items():\n setattr(field, prop, value)\n initial = field.initial\n initial = initial() if callable(initial) else initial\n attrs, result = {}, []\n if initial and not isinstance(field.widget, Textarea):\n attrs['value'] = str(initial)\n data_val = self.form.data.get(get_html_name(self.form, name), None)\n if data_val not in ('', None):\n attrs['value'] = data_val\n attrs.update(field.widget_attrs(field.widget))\n result = ''.join(f'{key}=\"{val}\" ' for key, val in attrs.items())\n if getattr(field, 'autofocus', None):\n result += 'autofocus '\n if issubclass(self.form.__class__, FormOverrideMixIn):\n # TODO: Expand for actual output when using FormOverrideMixIn, or a sub-class of it.\n result += '%(attrs)s' # content '%(attrs)s'\n else:\n result = '%(attrs)s' + result # '%(attrs)s' content\n return result", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def render_attr(key, value, attr_format='{key}=\"{value}\"'):\n\n if not key or ' ' in key:\n raise InvalidAttribute('Invalid name \"{}\"'.format(key))\n\n if value:\n if type(value) is RawNode:\n value = str(value)\n else:\n value = html.escape(str(value))\n\n return attr_format.format(key=key, value=value)\n\n return key", "def make_attrs(self, mixed):\n if isinstance(mixed, dict):\n return ''.join('%s=\"%s\" ' % (k, v) for k, v in mixed.items())\n return str(mixed)", "def gen_tag_attrs(self, *a, **kw):\n return gen_tag_attrs(self, *a, **kw)", "def attributes(self):\n _attrs = [\"label\"]\n if self.attrs:\n _attrs.append(\"attrs\")\n return _attrs", "def back_to_tag(tag, attrs):\n sol = '<' + tag\n for (prop, val) in attrs:\n sol += ' ' + prop + '=\"' + val + '\"'\n sol += '>'\n return sol", "def add_attrs(value, arg):\n try:\n # Split list on comma\n kv_pairs = arg.split(\",\")\n except ValueError:\n raise template.TemplateSyntaxError(\n \"add_attrs requires as an argument a string in the format 'key:value, key1:value1, key2:value2...'\"\n )\n\n\n # Create dictionary\n html_attrs = dict()\n\n # Clean items and add attribute pairs to dictionary\n for item in kv_pairs:\n item = item.strip()\n k, v = item.split(\":\")\n html_attrs.update({k.strip():v.strip()})\n\n return value.as_widget(attrs=html_attrs)", "def _wrap_attr(attr):\n return '={0},'.format(attr)", "def string_for_attrs(attrs):\n if not attrs: return ''\n return ''.join(' %s=\"%s\"' % (attr, value) for attr, value in attrs)", "def attr(*attrs: ATTRIBUTE) -> str:\n formatted = []\n for attr_ in attrs:\n if isinstance(attr_, str):\n formatted.append(attr_)\n elif isinstance(attr_, tuple) and len(attr_) == 2:\n formatted.append(f'{attr_[0]}=\"{attr_[1]}\"')\n else:\n raise ValueError(f\"Bad attribute: {attr_}\")\n return \" \".join(formatted)", "def attrs(self):\n return self.size, self.propSuffix, self.specified", "def attrs(**kwds):\n\n def decorate(f):\n for k in kwds:\n setattr(f, k, kwds[k])\n return f\n\n return decorate", "def dot_node_attrs(self):\n\n lbl_name = '%s' % self.format_name(True, True, 24)\n lbl_acc = '<font point-size=\"8.0\">%s</font>' % self.format_id()\n label = self.node_label_fmt % (self.url(), self.name,\n lbl_name, lbl_acc)\n\n node_attrs = {'label': label}\n return node_attrs", "def attrs(*attributes):\n return ';'.join([ str(i) for i in attributes ])", "def ATTRIBUTE():\n return \"author\", \"title\", \"publisher\", \"shelf\", \"category\", \"subject\"", "def getAttributeInfoDictionary(attr, format=None):\n format = format or _getDocFormat(attr)\n return {'name': attr.getName(),\n 'doc': renderText(attr.getDoc() or '', format=format)}", "def attrsToString(self, attrs):\n string = \"\"\n # for every attribut\n for attr in attrs:\n # converts its name and value to string and adds this to string\n string += \" {}=\\\"{}\\\"\".format(attr[0], attr[1])\n # no exception!\n print(\"Das Attribut ist zu lang!\") if len(attr) > 2 else None\n return string", "def format_attr(attr: str) -> str:\r\n prefix = query_params[Toml.REMOVE_PREFIX]\r\n suffix = query_params[Toml.REMOVE_SUFFIX]\r\n prefix_len = len(prefix)\r\n suffix_len = len(suffix)\r\n stripped = attr.strip()\r\n if stripped[:prefix_len] == prefix:\r\n stripped = stripped[prefix_len:]\r\n if stripped[-suffix_len:] == suffix:\r\n stripped = stripped[:-suffix_len]\r\n return constcase(stripped).replace('__', '_')", "def handleAttributes(text, parent):\r\n def attributeCallback(match):\r\n parent.set(match.group(1), match.group(2).replace('\\n', ' '))\r\n return ATTR_RE.sub(attributeCallback, text)", "def _attrs(self, element, attrs):\n for attr, val in list(attrs.items()):\n element.setAttribute(attr, val)\n return element", "def date_attrs(name):\n attrs = battrs(name)\n attrs.update({'class': 'form-control datepicker'})\n return attrs", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def extract_attrs(attr_string):\n attributes = {}\n for name, val in FIND_ATTRS.findall(attr_string):\n val = (\n val.replace(\"&lt;\", \"<\")\n .replace(\"&gt;\", \">\")\n .replace(\"&quot;\", '\"')\n .replace(\"&amp;\", \"&\")\n )\n attributes[name] = val\n return attributes", "def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):\n attrs = dict(base_attrs, **kwargs)\n if extra_attrs:\n attrs.update(extra_attrs)\n return attrs", "def get_attrs(foreground, background, style):\n return foreground + (background << 4) + style", "def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def extend_attribute_dictionary(attributedict, ns, name, value):\n\n key = ns, name\n if value is None:\n if key in attributedict:\n del attributedict[key]\n else:\n if ns == 'html' and key in attributedict:\n if name == 'class':\n # CSS classes are appended by space-separated list\n value = attributedict[key] + ' ' + value\n elif name == 'style':\n # CSS styles are appended by semicolon-separated rules list\n value = attributedict[key] + '; ' + value\n elif name in _html_attribute_boolflags:\n # All attributes must have a value. According to XHTML those\n # traditionally used as flags should have their value set to\n # the same as the attribute name.\n value = name\n attributedict[key] = value", "def _attribs(self, name=None, description=None):\n a = {}\n if name:\n a['name'] = name\n if description:\n a['description'] = description\n return a", "def _base_attrs(self, service):\n keys = ['name', 'desc', 'url']\n return {name:getattr(service, name, None) for name in keys}", "def addattrs(field, my_attrs):\n my_attrs = my_attrs.split(',')\n my_attrs = dict([attr.split('=') for attr in my_attrs])\n return field.as_widget(attrs=my_attrs)", "def attrs(xml):\r\n return lxml.html.fromstring(xml).attrib", "def parse_tag_attrs(tag_str, options_d=None, font_d=None, case=\"\", **kwargs):\n attr_b = kwargs.pop(\"attr\", \"\")\n auto_b = kwargs.pop(\"auto\", False)\n font_d = kwargs.pop(\"font_d\", font_d or {})\n options_d = kwargs.pop(\"options_d\", options_d or {})\n case = kwargs.pop(\"case\", case)\n widget = kwargs.pop(\"widget\", None)\n text_w = kwargs.pop(text_s, None)\n bad_opts = []\n # INTs: height repeatdelay repeatinterval underline width; size fun fov\n for keyval in split_attrs(tag_str):\n if \"=\" in keyval:\n key, val = keyval.split(\"=\")\n val = unquote(val)\n elif keyval:\n key, val = keyval, None\n else:\n continue\n key = key.lower()\n key2, key3, key4 = key[:2], key[:3], key[:4]\n lowval = val.lower() if val else val\n key = unalias(key)\n kalias = alias(key)\n if val == \"None\": # in ('False', 'None') #\n pass\n elif key3 in (\n bg_s,\n background_s[:3],\n fg_s,\n foreground_s[:3],\n ) or kalias in (bg_s, fg_s):\n options_d.update(**{key: val})\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n options_d.update(**{key: val})\n if auto_b and compound_s not in options_d:\n options_d.update(compound=tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],) or kalias == bd_s:\n options_d.update(borderwidth=val)\n elif key4 in (command_s[:4], compound_s[:4],) or kalias in (\n command_as,\n compound_as,\n ):\n options_d.update(**{key: val})\n elif (\n key2 in (height_s[:2], width_s[:2])\n or key3 in (repeatdelay_s[:3], repeatinterval_s[:3])\n or kalias\n in (height_as, width_as, repeatdelay_as, repeatinterval_as)\n ):\n options_d.update(**{key: int(val)})\n elif (\n key2 in (cursor_s[:2],)\n or key3 == font_s[:3]\n or kalias in (cursor_as, font_as)\n ):\n options_d.update(**{key: val})\n elif key2 in (\"r\", relief_s[:2],) or kalias == relief_as:\n options_d.update(relief=val)\n if auto_b and borderwidth_s not in options_d and val != tk.FLAT:\n options_d.update(borderwidth=str(1))\n elif key2 == underline_s[:2] or kalias == underline_as:\n options_d.update(underline=-1 if val is None else int(val))\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ) or kalias in (selectbackground_as, selectforeground_as):\n options_d.update(**{key: val})\n # special for fonts\n elif key2 in (family_s[:2],) or kalias == family_as:\n font_d[family_s] = val\n elif key2 in (size_s[:2],) or kalias == size_as:\n try:\n font_d[size_s] = int(val)\n except ValueError:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: ERROR Setting Font Size to %r\" % val,\n Raise=True,\n )\n elif key3 in (bold_as, tk_font.BOLD[:3]) or kalias == bold_as:\n font_d[weight_s] = (\n tk_font.BOLD\n if str(val) not in (\"0\", \"False\",)\n else tk_font.NORMAL\n )\n elif key2 in (weight_s[:2],) or kalias == weight_as:\n font_d[weight_s] = val\n elif key2 in (italic_as, tk_font.ITALIC[:2]) or kalias == italic_as:\n font_d[slant_s] = (\n tk_font.ITALIC\n if str(val) not in (\"0\", \"False\",)\n else tk_font.ROMAN\n )\n elif key2 in (slant_s[:2],) or kalias == slant_as:\n font_d[slant_s] = val\n elif (\n key3 in (funderline_as, funderline_s[:3])\n or kalias == funderline_as\n ):\n font_d[underline_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n elif (\n key3 in (foverstrike_as, foverstrike_s[:3])\n or kalias == foverstrike_as\n ):\n font_d[overstrike_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n # special \"case\" implementation\n elif key3 in (case_s[:3],) or kalias == case_as:\n for s in (upper_s, capitalize_s, lower_s, title_s, swapcase_s):\n if s.startswith(lowval):\n case = s if s != capitalize_s else upper_s\n break\n elif (\n key2 == upper_s[:2]\n or key3 in (capitalize_s[:3],)\n or kalias in (upper_as, capitalize_as)\n ):\n if str(val) not in (\"0\", \"False\",):\n case = upper_s\n elif key2 in (lower_s[:2],) or kalias == lower_as:\n if str(val) not in (\"0\", \"False\",):\n case = lower_s\n elif key2 == title_s[:2] or kalias == title_as:\n if str(val) not in (\"0\", \"False\",):\n case = title_s\n elif key2 == swapcase_s[:2] or kalias == swapcase_as:\n if str(val) not in (\"0\", \"False\",):\n case = swapcase_s\n elif key in ():\n bad_opts.append((key, val))\n else:\n options_d.update(**{key: val})\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n if attr_b:\n return (\n case\n if attr_b == case_s\n else options_d.get(attr_b, font_d.get(attr_b))\n )\n return options_d, font_d, case", "def gen_tag_attrs(widget=None, options_d=None, font=None, case=None, **kwargs):\n auto_b = kwargs.get(\"auto\", False)\n case = kwargs.get(case_s, case)\n extend_b = kwargs.get(\"extend\", False)\n font = kwargs.pop(\"font\", font or {})\n index_i = kwargs.pop(\"index\", None)\n kmode_s = kwargs.get(\"kmode\", \"\") # a=alias, o=options, ''=unchanged\n options_d = kwargs.pop(\"options\", options_d or {})\n pare_b = kwargs.get(\"pare\", True)\n widget = kwargs.pop(\"widget\", widget)\n text_w = kwargs.get(text_s, None)\n recurse_b = kwargs.pop(\"recurse\", widget and isinstance(widget, TTWidget))\n fmt_s = \"\"\n font_d = {}\n w_font_d, w_options_d = {}, {}\n if index_i is not None and widget is None:\n raise Exception(\"Cannot set 'index' when 'widget' is None\")\n if widget: # and isinstance(widget, TTWidget): #\n excludes_t = () if widget.emulation_b else ()\n w_options_d = {\n k: v[-1]\n for k, v in widget.config().items()\n if len(v) == 5 and str(v[-1]) != str(v[-2]) and k not in excludes_t\n }\n try:\n w_options_d[case_s] = widget.case\n except AttributeError:\n pass\n w_font = widget.cget(font_s) # w_options_d.pop(font_s, None)\n w_font_d = get_font_dict(w_font) if w_font else {}\n if pare_b and w_font_d:\n def_w_font = widget.config(font_s)[-2]\n def_w_font_d = get_font_dict(def_w_font)\n w_font_d = pare_dict(w_font_d, def_w_font_d)\n if font:\n if isinstance(font, str):\n try:\n font = tk_font.nametofont(font)\n except tk.TclError:\n pass\n elif type(font) in (list, tuple):\n font = tk_font.Font(font=font)\n if isinstance(font, tk_font.Font):\n font = font.actual()\n if isinstance(font, dict):\n font_d = font\n if case: # is not None:\n options_d = _merge_dicts(options_d, dict(case=case))\n d = _merge_dicts(\n w_options_d,\n convert_font_dict_to_ttoptions_dict(w_font_d),\n options_d,\n convert_font_dict_to_ttoptions_dict(font_d),\n kwargs,\n )\n bad_opts = []\n for key, val in d.items():\n key = key.lower()\n if key in (\"auto\", \"extend\", \"kmode\", \"pare\",): # text_s, ): #\n continue\n key2, key3, key4 = key[:2], key[:3], key[:4]\n kalias = alias(key)\n koption = unalias(key)\n if kmode_s:\n if kmode_s[0] == \"a\": # alias\n keyout = kalias\n kfunc = alias\n auto_cpd, auto_bd = compound_as, bd_s\n elif kmode_s[0] == \"o\": # option\n keyout = koption\n kfunc = unalias\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n else:\n keyout = key\n kfunc = str\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n if val:\n val = quote(val)\n if (\n key3 in (bg_s, background_s[:3], fg_s, foreground_s[:3])\n or key2 == underline_s[:2]\n or kalias in (bg_s, fg_s, underline_as)\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_cpd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_cpd, tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],):\n if \"%s=%s \" % (auto_bd, 1) in fmt_s:\n if val != 1:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_bd, 1), \"%s=%s \" % (keyout, val)\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key4 in (compound_s[:4],) or kalias == compound_as:\n if \"%s=%s \" % (auto_cpd, tk.CENTER) in fmt_s:\n if val != tk.CENTER:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_cpd, tk.CENTER),\n \"%s=%s \" % (keyout, val),\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == cursor_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == font_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, get_named_font(val))\n elif key2 in (relief_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_bd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_bd, 1)\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sbd_s,\n selectborderwidth_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n # special for fonts\n elif key2 in (family_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (size_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (weight_s[:2],):\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.BOLD),\n 1\n if isinstance(val, str) and val.lower() == tk_font.BOLD\n else 0,\n )\n elif key2 == slant_s[:2]:\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.ITALIC),\n 1\n if isinstance(val, str) and val.lower() == tk_font.ITALIC\n else 0,\n )\n elif key3 in (funderline_as, funderline_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(funderline_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n elif key3 in (foverstrike_as, foverstrike_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(foverstrike_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n # special \"case\" implementation\n elif key3 == case_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(case_s), val)\n elif key2 == upper_s[:2] or key3 == capitalize_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(upper_s), val)\n elif key2 in (lower_s[:2], title_s[:2], swapcase_s[:2]):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key in ():\n bad_opts.append((key, val))\n elif key in (text_s, text_as):\n if extend_b or widget:\n fmt_s += \"%s=%s \" % (keyout, val)\n else:\n # bad_opts.append((key, val))\n fmt_s += \"%s=%s \" % (keyout, val)\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n fmt = fmt_s.strip()\n if widget and isinstance(widget, TTWidget) and recurse_b:\n fmt = [\n fmt,\n ]\n for _, gathering in widget._get_kids(items=True):\n child = gathering[\"label\"]\n case = gathering.get(case_s, \"\")\n kid_options = {\n k: v[-1]\n for k, v in child.config().items()\n if len(v) == 5\n and str(v[-1]) != str(v[-2])\n and (k, v[-1]) not in w_options_d.items()\n and not (k in label_override_d and str(v[-1]) == \"0\")\n } #\n cf = kid_options.pop(font_s, None)\n cdf = child.config(font_s)[-2]\n if cf != cdf:\n c_font_d = pare_dict(get_font_dict(cf), get_font_dict(cdf))\n else:\n c_font_d = {}\n if case:\n kid_options.update(case=case)\n fmt.append(\n gen_tag_attrs(options=kid_options, font=c_font_d, **kwargs)\n )\n return fmt if index_i is None else fmt[index_i]", "def format_attributes(attributes):\n return ';'.join([k + '=' + v for k, v in attributes.items()])", "def handle_meta(self, tag, attrs):\n ad = {}\n for tup in attrs:\n ad[tup[0]] = tup[1]\n if 'name' in ad.keys() \\\n and 'keywords' == ad['name'] \\\n and 'content' in ad.keys():\n self.filetype = ad['content']\n if 'name' in ad.keys() \\\n and 'description' == ad['name']:\n self.description = 'present'\n if 'charset' in ad.keys():\n self.charset = 'present'", "def add_attributes(self, attrs):\n self.attrs.add_container(attrs)", "def set_attrs(dict, elem, attrs):\n for attr in attrs:\n if attr in elem.keys():\n dict[attr] = elem.get(attr)", "def __init__(self, attrs: Dict[str, Any]) -> None:\n self.attrs = attrs", "def prepare_node_attrs(self):", "def get_attribute_data(self, attrs):\n return {\n 'id': attrs['data-id'],\n }", "def get_attrs(self):\n req_attrv = self._ptr.contents.attrv\n attrs = {}\n if bool(req_attrv):\n i = 0\n while 1:\n s = bytestostr(req_attrv[i])\n i += 1\n if s == None:\n break\n try:\n k, v = s.split(\"=\", 1)\n attrs[k] = v\n except:\n pass\n return attrs", "def attkey_to_SVG_attribs(self,k):\n atts= k.split('@')\n o= ''\n acodes= {'C':'stroke','W':'stroke-width','S':'stroke-dasharray','O':'stroke-opacity'}\n for a in atts:\n if a[0] in acodes:\n o+= '%s=\"%s\" ' % (acodes[a[0]],a[1:])\n# elif a[0] == 'S': # Maybe do something special like this.\n# o+= 'stroke-dasharray=\"%\" ' % a[1:]\n return o", "def add_attributes(self, attrs):\n self.attrs.add_attributes(attrs)", "def fix_attributes(string):\n defs = re.compile('<dl class=\"attribute\">(?P<descrip>.*?)</dl>',flags=re.DOTALL)\n name = re.compile('<code class=\"descclassname\">(?P<name>[^<]*)</code>')\n prefix = ''\n remain = string\n \n match = defs.search(remain)\n while match:\n prefix += remain[:match.start(1)]\n prefsub = ''\n remnsub = remain[match.start(1):match.end(1)]\n descrip = name.search(remnsub)\n if descrip:\n prefix += remnsub[:descrip.start()]\n prefix += remnsub[descrip.end():]\n prefix += remain[match.end(1):match.end(0)]\n else:\n prefix += remain[match.start(1):match.end(0)]\n remain = remain[match.end(0):]\n match = defs.search(remain)\n return prefix+remain", "def a_attr_dict (self) :\n return dict (href = self.abs_href)", "def attributes(table,attrs): \n if isinstance(table,Table):\n table.html_attributes = attrs\n return table", "def read_attribs(self):\n\n attribs = {}\n while self.index < self.length:\n self.ignore_whitespaces()\n if self.xtext[self.index] == '>':\n break\n name = self.read_until('=')\n self.index += 1\n self.read_until('\"')\n self.index += 1\n value = self.read_until('\"')\n self.index += 1\n\n attribs[name] = value\n\n return attribs", "def extensible_attributes():\n return 'extensibleattributedef?'", "def transform_attributes(attrs):\n transformed = {}\n for key, value in attrs.items():\n if key in [\"raw_message\", \"text\"]:\n transformed[\"raw_content\"] = value\n elif key in [\"diaspora_handle\", \"sender_handle\", \"author\"]:\n transformed[\"handle\"] = value\n elif key == \"recipient_handle\":\n transformed[\"target_handle\"] = value\n elif key == \"parent_guid\":\n transformed[\"target_guid\"] = value\n elif key == \"first_name\":\n transformed[\"name\"] = value\n elif key == \"image_url\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"large\"] = value\n elif key == \"image_url_small\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"small\"] = value\n elif key == \"image_url_medium\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"medium\"] = value\n elif key == \"tag_string\":\n transformed[\"tag_list\"] = value.replace(\"#\", \"\").split(\" \")\n elif key == \"bio\":\n transformed[\"raw_content\"] = value\n elif key == \"searchable\":\n transformed[\"public\"] = True if value == \"true\" else False\n elif key == \"target_type\":\n transformed[\"entity_type\"] = DiasporaRetraction.entity_type_from_remote(value)\n elif key == \"remote_photo_path\":\n transformed[\"remote_path\"] = value\n elif key == \"remote_photo_name\":\n transformed[\"remote_name\"] = value\n elif key == \"status_message_guid\":\n transformed[\"linked_guid\"] = value\n transformed[\"linked_type\"] = \"Post\"\n elif key in BOOLEAN_KEYS:\n transformed[key] = True if value == \"true\" else False\n elif key in DATETIME_KEYS:\n try:\n # New style timestamps since in protocol 0.1.6\n transformed[key] = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n # Legacy style timestamps\n transformed[key] = datetime.strptime(value, \"%Y-%m-%d %H:%M:%S %Z\")\n elif key in INTEGER_KEYS:\n transformed[key] = int(value)\n else:\n transformed[key] = value or \"\"\n return transformed", "def tag(func):\n @functools.wraps(func)\n def wrapper(**kwargs):\n\n name = func.__name__\n\n if kwargs:\n \n try:\n\n check_text = kwargs['text']\n del kwargs['text']\n \n \n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n open('index.html', 'a+').write(f\"{check_text}\")\n open('index.html', 'a+').write(f\"</{name}>\")\n\n except KeyError:\n\n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n else:\n\n open('index.html', 'a+').write(f\"\\n<{name}>\")\n\n\n func(**kwargs)\n \n return wrapper", "def getAttrsDict(attrs):\r\n attrsDict = json.loads(re.sub('/\\\"(?!(,\\s\"|}))','\\\\\"',attrs).replace(\"\\t\",\" \").replace(\"\\n\",\" \")) if len(attrs)>0 else {}\r\n return attrsDict", "def parse_tag_attrs(\n self, tags_str, options_d=None, font_d=None, case=\"\", **kwargs\n ):\n return parse_tag_attrs(\n tags_str,\n options_d,\n font_d,\n case,\n widget=self,\n text=getattr(self, \"debug_text\", None),\n **kwargs\n )", "def _get_annotation_data_attr(self, index, el):\r\n\r\n data_attrs = {}\r\n attrs_map = {\r\n 'body': 'data-comment-body',\r\n 'title': 'data-comment-title',\r\n 'problem': 'data-problem-id'\r\n }\r\n\r\n for xml_key in attrs_map.keys():\r\n if xml_key in el.attrib:\r\n value = el.get(xml_key, '')\r\n html_key = attrs_map[xml_key]\r\n data_attrs[html_key] = {'value': value, '_delete': xml_key}\r\n\r\n return data_attrs", "def get_attributes(self) -> Dict[str, str]:\n pass", "def transform(attrs: dict) -> dict:\n\n pass", "def get_html_element_attributes(self):\n html_element_attributes = {\n 'class': self.css_classes or False, # Fall back to false to avoid class=\"\"\n }\n if self.should_render_as_link():\n html_element_attributes['href'] = self.url\n return html_element_attributes", "def create_descr(self, attr_name):", "def _checkTableAttr(self, attrs, prefix):\n if not attrs:\n return {}\n\n result = {}\n s = [] # we collect synthesized style in s\n for key, val in attrs.items():\n # Ignore keys that don't start with prefix\n if prefix and key[:len(prefix)] != prefix:\n continue\n key = key[len(prefix):]\n val = val.strip('\"')\n # remove invalid attrs from dict and synthesize style\n if key == 'width':\n s.append(\"width: %s\" % val)\n elif key == 'height':\n s.append(\"height: %s\" % val)\n elif key == 'bgcolor':\n s.append(\"background-color: %s\" % val)\n elif key == 'align':\n s.append(\"text-align: %s\" % val)\n elif key == 'valign':\n s.append(\"vertical-align: %s\" % val)\n # Ignore unknown keys\n if key not in self._allowed_table_attrs[prefix]:\n continue\n result[key] = val\n st = result.get('style', '').split(';')\n st = '; '.join(st + s)\n st = st.strip(';')\n st = st.strip()\n if not st:\n try:\n del result['style'] # avoid empty style attr\n except:\n pass\n else:\n result['style'] = st\n #logging.debug(\"_checkTableAttr returns %r\" % result)\n return result", "def set_attrs(self, username, attrs):\n pass", "def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,\r\n prettyPrint=False, indentLevel=0):\r\n\r\n encodedName = self.toEncoding(self.name, encoding)\r\n\r\n attrs = []\r\n if self.attrs:\r\n for key, val in self.attrs:\r\n fmt = '%s=\"%s\"'\r\n if isString(val):\r\n if self.containsSubstitutions and '%SOUP-ENCODING%' in val:\r\n val = self.substituteEncoding(val, encoding)\r\n\r\n # The attribute value either:\r\n #\r\n # * Contains no embedded double quotes or single quotes.\r\n # No problem: we enclose it in double quotes.\r\n # * Contains embedded single quotes. No problem:\r\n # double quotes work here too.\r\n # * Contains embedded double quotes. No problem:\r\n # we enclose it in single quotes.\r\n # * Embeds both single _and_ double quotes. This\r\n # can't happen naturally, but it can happen if\r\n # you modify an attribute value after parsing\r\n # the document. Now we have a bit of a\r\n # problem. We solve it by enclosing the\r\n # attribute in single quotes, and escaping any\r\n # embedded single quotes to XML entities.\r\n if '\"' in val:\r\n fmt = \"%s='%s'\"\r\n if \"'\" in val:\r\n # TODO: replace with apos when\r\n # appropriate.\r\n val = val.replace(\"'\", \"&squot;\")\r\n\r\n # Now we're okay w/r/t quotes. But the attribute\r\n # value might also contain angle brackets, or\r\n # ampersands that aren't part of entities. We need\r\n # to escape those to XML entities too.\r\n val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)\r\n\r\n attrs.append(fmt % (self.toEncoding(key, encoding),\r\n self.toEncoding(val, encoding)))\r\n close = ''\r\n closeTag = ''\r\n if self.isSelfClosing:\r\n close = ' /'\r\n else:\r\n closeTag = '</%s>' % encodedName\r\n\r\n indentTag, indentContents = 0, 0\r\n if prettyPrint:\r\n indentTag = indentLevel\r\n space = (' ' * (indentTag-1))\r\n indentContents = indentTag + 1\r\n contents = self.renderContents(encoding, prettyPrint, indentContents)\r\n if self.hidden:\r\n s = contents\r\n else:\r\n s = []\r\n attributeString = ''\r\n if attrs:\r\n attributeString = ' ' + ' '.join(attrs)\r\n if prettyPrint:\r\n s.append(space)\r\n s.append('<%s%s%s>' % (encodedName, attributeString, close))\r\n if prettyPrint:\r\n s.append(\"\\n\")\r\n s.append(contents)\r\n if prettyPrint and contents and contents[-1] != \"\\n\":\r\n s.append(\"\\n\")\r\n if prettyPrint and closeTag:\r\n s.append(space)\r\n s.append(closeTag)\r\n if prettyPrint and closeTag and self.nextSibling:\r\n s.append(\"\\n\")\r\n s = ''.join(s)\r\n return s", "def attributes(self):\n _attrs = [\"type\", \"name\", \"value\"]\n if self.confidence is not None:\n _attrs.append(\"confidence\")\n if self.constant:\n _attrs.append(\"constant\")\n if self.tags:\n _attrs.append(\"tags\")\n\n return _attrs", "def Attribute(name, value=None):\r\n if value:\r\n return '{}=\"{}\"'.format(name, value)\r\n else:\r\n return ''", "def convert_attributes(cls, attrs):\n return {}", "def get_switched_form_field_attrs(self, prefix, input_type, name):\n attributes = {'class': 'switched', 'data-switch-on': prefix + 'field'}\n attributes['data-' + prefix + 'field-' + input_type] = name\n return attributes", "def AttributeString(self) -> str:", "def AttributeString(self) -> str:", "def proc_attr(inp):\n dic = {}\n for att in inp.attrs.keys():\n if getattr(inp.attrs[att], \"dtype\", None) is None:\n dic[att] = inp.attrs[att]\n elif inp.attrs[att].dtype.char == 'S':\n dic[att] = [\n x.strip() for x in inp.attrs[att].tostring().decode('ascii').split(',')\n ]\n else:\n dic[att] = (\n inp.attrs[att][0]\n if isinstance(inp.attrs[att],np.ndarray) and\n inp.attrs[att].size==1\n else inp.attrs[att]\n )\n return dic\n pass", "def add_attr(self, section_name: str, attr_name: str, value: str) -> None:\n pass", "def _get_attrs_symbols():\n return {\n 'True', 'False', 'None', # those are identifiers in Python 2.7\n 'self',\n 'parent',\n 'id',\n 'uid',\n 'context',\n 'context_today',\n 'active_id',\n 'active_ids',\n 'allowed_company_ids',\n 'current_company_id',\n 'active_model',\n 'time',\n 'datetime',\n 'relativedelta',\n 'current_date',\n 'abs',\n 'len',\n 'bool',\n 'float',\n 'str',\n 'unicode',\n }", "def widget_attrs(self, widget):\n\n attrs = super(RelateField, self).widget_attrs(widget)\n\n attrs.update({'content_type': self.content_types})\n\n return attrs", "def attributes(doc, header, renderer=Attribute, item_class=DefinitionItem):\n items = doc.extract_items(item_class)\n lines = []\n renderer = renderer()\n for item in items:\n renderer.item = item\n lines += renderer.to_rst()\n lines.append('')\n return lines", "def info_from_entry(self, entry):\n info = super().info_from_entry(entry)\n info[ATTR_NAME] = info[ATTR_PROPERTIES]['Name'].replace('\\xa0', ' ')\n return info", "def img(self, **kwargs):\n attrs = ''\n for item in kwargs.items():\n if not item[0] in IMGATTRS:\n raise AttributeError, 'Invalid img tag attribute: %s'%item[0]\n attrs += '%s=\"%s\" '%item\n return '<img src=\"%s\" %s>'%(str(self),attrs)", "def gen_tag_attrs(self, *a, **kw):\n if kw.get(\"widget\", sentinel) is not None:\n raise Exception(\n \"TTToolTip.gen_tag_attrs(): 'widget' keyword must be set\"\n \" to None\"\n )\n return gen_tag_attrs(None, *a, **kw)", "def init_attrs(self):\n raise NotImplementedError", "def _arg_attr(identifier, attr1, attr2):\n return attr1 if identifier.startswith('t') else attr2", "def _get_var_attrs(var):\n\n generic_dict = {'instrument': '', 'valid_range': (-1e+35,1e+35),\n 'missing_value': -9999, 'height': '',\n 'standard_name': '', 'group_name': '',\n 'serial_number': ''}\n\n generic_dict.update(attrs_dict[var])\n return generic_dict", "def parseAttrLine(line):\n\tpre, post = line.strip().split(':')\n\tnumber, attr = pre.strip().split('.')\n\tattr = attr.strip().replace('%','').replace(' ', '-')\n\tvals = [clean(x) for x in post.strip().strip('.').split(',')]\n\treturn {'num':int(number), 'attr':clean(attr), 'vals':vals}", "def _style_to_basic_html_attributes(self, element, style_content,\n force=False):\n if style_content.count('}') and \\\n style_content.count('{') == style_content.count('{'):\n style_content = style_content.split('}')[0][1:]\n\n attributes = {}\n for rule in style_content.split(';'):\n split = rule.split(':')\n if len(split) != 2:\n continue\n key = split[0].strip()\n value = split[1]\n\n if key == 'text-align':\n attributes['align'] = value.strip()\n elif key == 'background-color':\n attributes['bgcolor'] = value.strip()\n elif key == 'width' or key == 'height':\n value = value.strip()\n if value.endswith('px'):\n value = value[:-2]\n attributes[key] = value\n\n for key, value in list(attributes.items()):\n if key in element.attrib and not force or key in self.disable_basic_attributes:\n # already set, don't dare to overwrite\n continue\n element.attrib[key] = value", "def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}", "def domAttributesToString( node ):\n strOut = \"node has %d attribute(s):\\n\" % node.attributes.length;\n for i in range(node.attributes.length):\n attr = node.attributes.item(i);\n strOut += \"- %s:'%s'\\n\" % (attr.name, attr.value );\n return strOut;", "def dict_with_attrs2(*args):\n class CustomDict(object):\n __slots__ = args\n __dict__ = {}\n\n def __init__(self, *args, **kwargs):\n super(CustomDict, self).__init__()\n if args:\n self.__dict__.update(*args)\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __getattr__(self, name):\n return self.__dict__[name]\n\n return CustomDict", "def attributes(self):\n attrs_ = [\"name\", \"type\"]\n if self.exclusive:\n attrs_.append(\"exclusive\")\n if self.default is not None:\n attrs_.append(\"default\")\n\n return attrs_", "def replace_tag_attributes(code_attrs, tag, tag_attrs):\n\n new_attrs = code_attrs.copy()\n for key, value in tag_attrs.items():\n if key in new_attrs:\n new_attrs[key] = new_attrs[key].replace(tag, value)\n\n return new_attrs", "def _iterattrs(self, handle=\"\"):\n if not handle:\n handle = self.handle\n attr = gv.firstattr(handle)\n while gv.ok(attr):\n yield gv.nameof(attr), decode_page(gv.getv(handle, attr))\n attr = gv.nextattr(handle, attr)", "def as_html(self):\n return mark_safe(\" \".join([\n self.attribute_template % (k, escape(v if not callable(v) else v()))\n for k, v in six.iteritems(self) if not v in EMPTY_VALUES]))", "def strpatt(self, name):\n return name.replace(\"att.\", \"\")", "def format_link(attrs: Dict[tuple, str], new: bool = False):\n try:\n p = urlparse(attrs[(None, 'href')])\n except KeyError:\n # no href, probably an anchor\n return attrs\n\n if not any([p.scheme, p.netloc, p.path]) and p.fragment:\n # the link isn't going anywhere, probably a fragment link\n return attrs\n\n c = urlparse(settings.SITE_URL)\n if p.netloc != c.netloc:\n # link is external - secure and mark\n attrs[(None, 'target')] = '_blank'\n attrs[(None, 'class')] = attrs.get((None, 'class'), '') + ' external'\n attrs[(None, 'rel')] = 'nofollow noopener noreferrer'\n\n return attrs", "def extractAttrs(obj, justLabel=False, dictName=''):\n return extractAttrsCore(obj, {}, justLabel, dictName)", "def parseAttrs(self,attrs,date_type):\n\tattrs=copy.copy(attrs) #make sure we don't change user/group attributes\n \tattr_holders=self.getAttrHolders(attrs)\n\tmap(lambda x:x.setDateType(date_type),attr_holders)\n\tmap(lambda x:attrs.update(x.getParsedDic()),attr_holders)\n\treturn attrs", "def add_attributes(self, attrs):\n for attr in attrs:\n self.add_attribute(attr)", "def _parse_attr(self, attr_proto):\n attrs = {}\n for a in attr_proto:\n for f in ['f', 'i', 's']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['floats', 'ints', 'strings']:\n if list(getattr(a, f)):\n assert a.name not in attrs, \"Only one type of attr is allowed\"\n attrs[a.name] = tuple(getattr(a, f))\n for f in ['t', 'g']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['tensors', 'graphs']:\n if list(getattr(a, f)):\n raise NotImplementedError(\"Filed {} is not supported in mxnet.\".format(f))\n if a.name not in attrs:\n raise ValueError(\"Cannot parse attribute: \\n{}\\n.\".format(a))\n return attrs" ]
[ "0.735201", "0.6754294", "0.67166066", "0.67071074", "0.66780305", "0.65807486", "0.6522693", "0.6522693", "0.65187657", "0.6471306", "0.6269984", "0.62653935", "0.6153201", "0.6090701", "0.60323846", "0.60278016", "0.6011661", "0.60042846", "0.59841794", "0.5941162", "0.59205276", "0.5918955", "0.59121054", "0.5903962", "0.5884743", "0.5876164", "0.5857109", "0.5851559", "0.583173", "0.58274394", "0.5816038", "0.58061635", "0.5784312", "0.5755998", "0.5755998", "0.57360405", "0.57051307", "0.5701552", "0.5687975", "0.5650812", "0.5618766", "0.561154", "0.5605911", "0.56030387", "0.5602799", "0.55926436", "0.5587559", "0.5571399", "0.5567558", "0.55631375", "0.555545", "0.5550559", "0.55490625", "0.55470836", "0.55410224", "0.5519966", "0.55098814", "0.5492064", "0.547102", "0.5470936", "0.54692423", "0.5467515", "0.54661024", "0.54518676", "0.54405665", "0.5438651", "0.54003173", "0.5388153", "0.5382598", "0.5375904", "0.5375076", "0.53706104", "0.5359634", "0.5354708", "0.5354708", "0.5331472", "0.5324531", "0.53227526", "0.5316361", "0.5309617", "0.5308968", "0.53067", "0.5306182", "0.5299369", "0.52990687", "0.5287107", "0.52791494", "0.5277907", "0.5276578", "0.52742803", "0.5270845", "0.52608305", "0.52524847", "0.5244876", "0.5239417", "0.5234171", "0.5224983", "0.5215326", "0.521457", "0.5212088", "0.5203955" ]
0.0
-1
Any format, prefix, suffix, html info in attrs dict
def __init__(self, name, attrs={}): TextFormat.__init__(self, name, attrs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def html_attrs(attrs):\n html = \"\"\n for a in attrs.items():\n if a[1]:\n html = html + \"%s=\\\"%s\\\" \"%(a)\n return html", "def attr(*attrs: ATTRIBUTE) -> str:\n return PyHTML.attr(*attrs)", "def attrs(context):\n result = \"\"\n for key, value in context.flatten().items():\n if key not in [\"True\", \"False\", \"None\", \"content\", \"element\"]:\n if \"hx_\" in key:\n key = key.replace(\"_\", \"-\")\n result += f' {key}=\"{value}\"'\n return mark_safe(result)", "def __get_attr_format (self, attrs):\r\n format = { \r\n 'editor': None,\r\n 'min': None,\r\n 'max': None,\r\n 'step': None,\r\n 'subtype': None,\r\n 'flags': None,\r\n 'enums': None\r\n }\r\n\r\n for attr in attrs: \r\n attr_type = attr[\"type\"]\r\n if \"editor\" == attr_type:\r\n format['editor'] = attr[\"value\"] \r\n if \"min\" == attr_type:\r\n format['min'] = attr[\"value\"] \r\n if \"max\" == attr_type:\r\n format['max'] = attr[\"value\"] \r\n if \"default\" == attr_type:\r\n format['default'] = attr[\"value\"] \r\n if \"step\" == attr_type:\r\n format['step'] = attr[\"value\"]\r\n if \"subtype\" == attr_type:\r\n format['subtype'] = attr[\"value\"]\r\n if \"flags\" == attr_type:\r\n format['flags'] = attr['value']\r\n if \"enums\" == attr_type:\r\n format['enums'] = attr['value']\r\n\r\n return format", "def _formatAttributes(self, attr=None, allowed_attrs=None, **kw):\n\n # Merge the attr dict and kw dict into a single attributes\n # dictionary (rewriting any attribute names, extracting\n # namespaces, and merging some values like css classes).\n attributes = {} # dict of key=(namespace,name): value=attribute_value\n if attr:\n for a, v in attr.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n if kw:\n for a, v in kw.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n\n # Add title attribute if missing, but it has an alt.\n if ('html', 'alt') in attributes and ('html', 'title') not in attributes:\n attributes[('html', 'title')] = attributes[('html', 'alt')]\n\n # Force both lang and xml:lang to be present and identical if\n # either exists. The lang takes precedence over xml:lang if\n # both exist.\n #if ('html', 'lang') in attributes:\n # attributes[('xml', 'lang')] = attributes[('html', 'lang')]\n #elif ('xml', 'lang') in attributes:\n # attributes[('html', 'lang')] = attributes[('xml', 'lang')]\n\n # Check all the HTML attributes to see if they are known and\n # allowed. Ignore attributes if in non-HTML namespaces.\n if allowed_attrs:\n for name in [key[1] for key in attributes if key[0] == 'html']:\n if name in _common_attributes or name in allowed_attrs:\n pass\n elif name.startswith('on'):\n pass # Too many event handlers to enumerate, just let them all pass.\n else:\n # Unknown or unallowed attribute.\n err = 'Illegal HTML attribute \"%s\" passed to formatter' % name\n raise ValueError(err)\n\n # Finally, format them all as a single string.\n if attributes:\n # Construct a formatted string containing all attributes\n # with their values escaped. Any html:* namespace\n # attributes drop the namespace prefix. We build this by\n # separating the attributes into three categories:\n #\n # * Those without any namespace (should only be xmlns attributes)\n # * Those in the HTML namespace (we drop the html: prefix for these)\n # * Those in any other non-HTML namespace, including xml:\n\n xmlnslist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if not k[0]]\n htmllist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] == 'html']\n otherlist = ['%s:%s=\"%s\"' % (k[0], k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] and k[0] != 'html']\n\n # Join all these lists together in a space-separated string. Also\n # prefix the whole thing with a space too.\n htmllist.sort()\n otherlist.sort()\n all = [''] + xmlnslist + htmllist + otherlist\n return ' '.join(all)\n return ''", "def get_format_attrs(self, name, field, alt_field_info={}):\n # important_props = ('initial', 'autofocus', 'widget')\n if name in alt_field_info:\n field = deepcopy(field)\n for prop, value in alt_field_info[name].items():\n setattr(field, prop, value)\n initial = field.initial\n initial = initial() if callable(initial) else initial\n attrs, result = {}, []\n if initial and not isinstance(field.widget, Textarea):\n attrs['value'] = str(initial)\n data_val = self.form.data.get(get_html_name(self.form, name), None)\n if data_val not in ('', None):\n attrs['value'] = data_val\n attrs.update(field.widget_attrs(field.widget))\n result = ''.join(f'{key}=\"{val}\" ' for key, val in attrs.items())\n if getattr(field, 'autofocus', None):\n result += 'autofocus '\n if issubclass(self.form.__class__, FormOverrideMixIn):\n # TODO: Expand for actual output when using FormOverrideMixIn, or a sub-class of it.\n result += '%(attrs)s' # content '%(attrs)s'\n else:\n result = '%(attrs)s' + result # '%(attrs)s' content\n return result", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def render_attr(key, value, attr_format='{key}=\"{value}\"'):\n\n if not key or ' ' in key:\n raise InvalidAttribute('Invalid name \"{}\"'.format(key))\n\n if value:\n if type(value) is RawNode:\n value = str(value)\n else:\n value = html.escape(str(value))\n\n return attr_format.format(key=key, value=value)\n\n return key", "def make_attrs(self, mixed):\n if isinstance(mixed, dict):\n return ''.join('%s=\"%s\" ' % (k, v) for k, v in mixed.items())\n return str(mixed)", "def gen_tag_attrs(self, *a, **kw):\n return gen_tag_attrs(self, *a, **kw)", "def attributes(self):\n _attrs = [\"label\"]\n if self.attrs:\n _attrs.append(\"attrs\")\n return _attrs", "def back_to_tag(tag, attrs):\n sol = '<' + tag\n for (prop, val) in attrs:\n sol += ' ' + prop + '=\"' + val + '\"'\n sol += '>'\n return sol", "def add_attrs(value, arg):\n try:\n # Split list on comma\n kv_pairs = arg.split(\",\")\n except ValueError:\n raise template.TemplateSyntaxError(\n \"add_attrs requires as an argument a string in the format 'key:value, key1:value1, key2:value2...'\"\n )\n\n\n # Create dictionary\n html_attrs = dict()\n\n # Clean items and add attribute pairs to dictionary\n for item in kv_pairs:\n item = item.strip()\n k, v = item.split(\":\")\n html_attrs.update({k.strip():v.strip()})\n\n return value.as_widget(attrs=html_attrs)", "def _wrap_attr(attr):\n return '={0},'.format(attr)", "def string_for_attrs(attrs):\n if not attrs: return ''\n return ''.join(' %s=\"%s\"' % (attr, value) for attr, value in attrs)", "def attr(*attrs: ATTRIBUTE) -> str:\n formatted = []\n for attr_ in attrs:\n if isinstance(attr_, str):\n formatted.append(attr_)\n elif isinstance(attr_, tuple) and len(attr_) == 2:\n formatted.append(f'{attr_[0]}=\"{attr_[1]}\"')\n else:\n raise ValueError(f\"Bad attribute: {attr_}\")\n return \" \".join(formatted)", "def attrs(self):\n return self.size, self.propSuffix, self.specified", "def attrs(**kwds):\n\n def decorate(f):\n for k in kwds:\n setattr(f, k, kwds[k])\n return f\n\n return decorate", "def dot_node_attrs(self):\n\n lbl_name = '%s' % self.format_name(True, True, 24)\n lbl_acc = '<font point-size=\"8.0\">%s</font>' % self.format_id()\n label = self.node_label_fmt % (self.url(), self.name,\n lbl_name, lbl_acc)\n\n node_attrs = {'label': label}\n return node_attrs", "def attrs(*attributes):\n return ';'.join([ str(i) for i in attributes ])", "def ATTRIBUTE():\n return \"author\", \"title\", \"publisher\", \"shelf\", \"category\", \"subject\"", "def getAttributeInfoDictionary(attr, format=None):\n format = format or _getDocFormat(attr)\n return {'name': attr.getName(),\n 'doc': renderText(attr.getDoc() or '', format=format)}", "def attrsToString(self, attrs):\n string = \"\"\n # for every attribut\n for attr in attrs:\n # converts its name and value to string and adds this to string\n string += \" {}=\\\"{}\\\"\".format(attr[0], attr[1])\n # no exception!\n print(\"Das Attribut ist zu lang!\") if len(attr) > 2 else None\n return string", "def format_attr(attr: str) -> str:\r\n prefix = query_params[Toml.REMOVE_PREFIX]\r\n suffix = query_params[Toml.REMOVE_SUFFIX]\r\n prefix_len = len(prefix)\r\n suffix_len = len(suffix)\r\n stripped = attr.strip()\r\n if stripped[:prefix_len] == prefix:\r\n stripped = stripped[prefix_len:]\r\n if stripped[-suffix_len:] == suffix:\r\n stripped = stripped[:-suffix_len]\r\n return constcase(stripped).replace('__', '_')", "def handleAttributes(text, parent):\r\n def attributeCallback(match):\r\n parent.set(match.group(1), match.group(2).replace('\\n', ' '))\r\n return ATTR_RE.sub(attributeCallback, text)", "def _attrs(self, element, attrs):\n for attr, val in list(attrs.items()):\n element.setAttribute(attr, val)\n return element", "def date_attrs(name):\n attrs = battrs(name)\n attrs.update({'class': 'form-control datepicker'})\n return attrs", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def extract_attrs(attr_string):\n attributes = {}\n for name, val in FIND_ATTRS.findall(attr_string):\n val = (\n val.replace(\"&lt;\", \"<\")\n .replace(\"&gt;\", \">\")\n .replace(\"&quot;\", '\"')\n .replace(\"&amp;\", \"&\")\n )\n attributes[name] = val\n return attributes", "def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):\n attrs = dict(base_attrs, **kwargs)\n if extra_attrs:\n attrs.update(extra_attrs)\n return attrs", "def get_attrs(foreground, background, style):\n return foreground + (background << 4) + style", "def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def extend_attribute_dictionary(attributedict, ns, name, value):\n\n key = ns, name\n if value is None:\n if key in attributedict:\n del attributedict[key]\n else:\n if ns == 'html' and key in attributedict:\n if name == 'class':\n # CSS classes are appended by space-separated list\n value = attributedict[key] + ' ' + value\n elif name == 'style':\n # CSS styles are appended by semicolon-separated rules list\n value = attributedict[key] + '; ' + value\n elif name in _html_attribute_boolflags:\n # All attributes must have a value. According to XHTML those\n # traditionally used as flags should have their value set to\n # the same as the attribute name.\n value = name\n attributedict[key] = value", "def _attribs(self, name=None, description=None):\n a = {}\n if name:\n a['name'] = name\n if description:\n a['description'] = description\n return a", "def _base_attrs(self, service):\n keys = ['name', 'desc', 'url']\n return {name:getattr(service, name, None) for name in keys}", "def addattrs(field, my_attrs):\n my_attrs = my_attrs.split(',')\n my_attrs = dict([attr.split('=') for attr in my_attrs])\n return field.as_widget(attrs=my_attrs)", "def attrs(xml):\r\n return lxml.html.fromstring(xml).attrib", "def parse_tag_attrs(tag_str, options_d=None, font_d=None, case=\"\", **kwargs):\n attr_b = kwargs.pop(\"attr\", \"\")\n auto_b = kwargs.pop(\"auto\", False)\n font_d = kwargs.pop(\"font_d\", font_d or {})\n options_d = kwargs.pop(\"options_d\", options_d or {})\n case = kwargs.pop(\"case\", case)\n widget = kwargs.pop(\"widget\", None)\n text_w = kwargs.pop(text_s, None)\n bad_opts = []\n # INTs: height repeatdelay repeatinterval underline width; size fun fov\n for keyval in split_attrs(tag_str):\n if \"=\" in keyval:\n key, val = keyval.split(\"=\")\n val = unquote(val)\n elif keyval:\n key, val = keyval, None\n else:\n continue\n key = key.lower()\n key2, key3, key4 = key[:2], key[:3], key[:4]\n lowval = val.lower() if val else val\n key = unalias(key)\n kalias = alias(key)\n if val == \"None\": # in ('False', 'None') #\n pass\n elif key3 in (\n bg_s,\n background_s[:3],\n fg_s,\n foreground_s[:3],\n ) or kalias in (bg_s, fg_s):\n options_d.update(**{key: val})\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n options_d.update(**{key: val})\n if auto_b and compound_s not in options_d:\n options_d.update(compound=tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],) or kalias == bd_s:\n options_d.update(borderwidth=val)\n elif key4 in (command_s[:4], compound_s[:4],) or kalias in (\n command_as,\n compound_as,\n ):\n options_d.update(**{key: val})\n elif (\n key2 in (height_s[:2], width_s[:2])\n or key3 in (repeatdelay_s[:3], repeatinterval_s[:3])\n or kalias\n in (height_as, width_as, repeatdelay_as, repeatinterval_as)\n ):\n options_d.update(**{key: int(val)})\n elif (\n key2 in (cursor_s[:2],)\n or key3 == font_s[:3]\n or kalias in (cursor_as, font_as)\n ):\n options_d.update(**{key: val})\n elif key2 in (\"r\", relief_s[:2],) or kalias == relief_as:\n options_d.update(relief=val)\n if auto_b and borderwidth_s not in options_d and val != tk.FLAT:\n options_d.update(borderwidth=str(1))\n elif key2 == underline_s[:2] or kalias == underline_as:\n options_d.update(underline=-1 if val is None else int(val))\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ) or kalias in (selectbackground_as, selectforeground_as):\n options_d.update(**{key: val})\n # special for fonts\n elif key2 in (family_s[:2],) or kalias == family_as:\n font_d[family_s] = val\n elif key2 in (size_s[:2],) or kalias == size_as:\n try:\n font_d[size_s] = int(val)\n except ValueError:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: ERROR Setting Font Size to %r\" % val,\n Raise=True,\n )\n elif key3 in (bold_as, tk_font.BOLD[:3]) or kalias == bold_as:\n font_d[weight_s] = (\n tk_font.BOLD\n if str(val) not in (\"0\", \"False\",)\n else tk_font.NORMAL\n )\n elif key2 in (weight_s[:2],) or kalias == weight_as:\n font_d[weight_s] = val\n elif key2 in (italic_as, tk_font.ITALIC[:2]) or kalias == italic_as:\n font_d[slant_s] = (\n tk_font.ITALIC\n if str(val) not in (\"0\", \"False\",)\n else tk_font.ROMAN\n )\n elif key2 in (slant_s[:2],) or kalias == slant_as:\n font_d[slant_s] = val\n elif (\n key3 in (funderline_as, funderline_s[:3])\n or kalias == funderline_as\n ):\n font_d[underline_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n elif (\n key3 in (foverstrike_as, foverstrike_s[:3])\n or kalias == foverstrike_as\n ):\n font_d[overstrike_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n # special \"case\" implementation\n elif key3 in (case_s[:3],) or kalias == case_as:\n for s in (upper_s, capitalize_s, lower_s, title_s, swapcase_s):\n if s.startswith(lowval):\n case = s if s != capitalize_s else upper_s\n break\n elif (\n key2 == upper_s[:2]\n or key3 in (capitalize_s[:3],)\n or kalias in (upper_as, capitalize_as)\n ):\n if str(val) not in (\"0\", \"False\",):\n case = upper_s\n elif key2 in (lower_s[:2],) or kalias == lower_as:\n if str(val) not in (\"0\", \"False\",):\n case = lower_s\n elif key2 == title_s[:2] or kalias == title_as:\n if str(val) not in (\"0\", \"False\",):\n case = title_s\n elif key2 == swapcase_s[:2] or kalias == swapcase_as:\n if str(val) not in (\"0\", \"False\",):\n case = swapcase_s\n elif key in ():\n bad_opts.append((key, val))\n else:\n options_d.update(**{key: val})\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n if attr_b:\n return (\n case\n if attr_b == case_s\n else options_d.get(attr_b, font_d.get(attr_b))\n )\n return options_d, font_d, case", "def gen_tag_attrs(widget=None, options_d=None, font=None, case=None, **kwargs):\n auto_b = kwargs.get(\"auto\", False)\n case = kwargs.get(case_s, case)\n extend_b = kwargs.get(\"extend\", False)\n font = kwargs.pop(\"font\", font or {})\n index_i = kwargs.pop(\"index\", None)\n kmode_s = kwargs.get(\"kmode\", \"\") # a=alias, o=options, ''=unchanged\n options_d = kwargs.pop(\"options\", options_d or {})\n pare_b = kwargs.get(\"pare\", True)\n widget = kwargs.pop(\"widget\", widget)\n text_w = kwargs.get(text_s, None)\n recurse_b = kwargs.pop(\"recurse\", widget and isinstance(widget, TTWidget))\n fmt_s = \"\"\n font_d = {}\n w_font_d, w_options_d = {}, {}\n if index_i is not None and widget is None:\n raise Exception(\"Cannot set 'index' when 'widget' is None\")\n if widget: # and isinstance(widget, TTWidget): #\n excludes_t = () if widget.emulation_b else ()\n w_options_d = {\n k: v[-1]\n for k, v in widget.config().items()\n if len(v) == 5 and str(v[-1]) != str(v[-2]) and k not in excludes_t\n }\n try:\n w_options_d[case_s] = widget.case\n except AttributeError:\n pass\n w_font = widget.cget(font_s) # w_options_d.pop(font_s, None)\n w_font_d = get_font_dict(w_font) if w_font else {}\n if pare_b and w_font_d:\n def_w_font = widget.config(font_s)[-2]\n def_w_font_d = get_font_dict(def_w_font)\n w_font_d = pare_dict(w_font_d, def_w_font_d)\n if font:\n if isinstance(font, str):\n try:\n font = tk_font.nametofont(font)\n except tk.TclError:\n pass\n elif type(font) in (list, tuple):\n font = tk_font.Font(font=font)\n if isinstance(font, tk_font.Font):\n font = font.actual()\n if isinstance(font, dict):\n font_d = font\n if case: # is not None:\n options_d = _merge_dicts(options_d, dict(case=case))\n d = _merge_dicts(\n w_options_d,\n convert_font_dict_to_ttoptions_dict(w_font_d),\n options_d,\n convert_font_dict_to_ttoptions_dict(font_d),\n kwargs,\n )\n bad_opts = []\n for key, val in d.items():\n key = key.lower()\n if key in (\"auto\", \"extend\", \"kmode\", \"pare\",): # text_s, ): #\n continue\n key2, key3, key4 = key[:2], key[:3], key[:4]\n kalias = alias(key)\n koption = unalias(key)\n if kmode_s:\n if kmode_s[0] == \"a\": # alias\n keyout = kalias\n kfunc = alias\n auto_cpd, auto_bd = compound_as, bd_s\n elif kmode_s[0] == \"o\": # option\n keyout = koption\n kfunc = unalias\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n else:\n keyout = key\n kfunc = str\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n if val:\n val = quote(val)\n if (\n key3 in (bg_s, background_s[:3], fg_s, foreground_s[:3])\n or key2 == underline_s[:2]\n or kalias in (bg_s, fg_s, underline_as)\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_cpd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_cpd, tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],):\n if \"%s=%s \" % (auto_bd, 1) in fmt_s:\n if val != 1:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_bd, 1), \"%s=%s \" % (keyout, val)\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key4 in (compound_s[:4],) or kalias == compound_as:\n if \"%s=%s \" % (auto_cpd, tk.CENTER) in fmt_s:\n if val != tk.CENTER:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_cpd, tk.CENTER),\n \"%s=%s \" % (keyout, val),\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == cursor_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == font_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, get_named_font(val))\n elif key2 in (relief_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_bd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_bd, 1)\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sbd_s,\n selectborderwidth_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n # special for fonts\n elif key2 in (family_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (size_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (weight_s[:2],):\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.BOLD),\n 1\n if isinstance(val, str) and val.lower() == tk_font.BOLD\n else 0,\n )\n elif key2 == slant_s[:2]:\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.ITALIC),\n 1\n if isinstance(val, str) and val.lower() == tk_font.ITALIC\n else 0,\n )\n elif key3 in (funderline_as, funderline_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(funderline_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n elif key3 in (foverstrike_as, foverstrike_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(foverstrike_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n # special \"case\" implementation\n elif key3 == case_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(case_s), val)\n elif key2 == upper_s[:2] or key3 == capitalize_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(upper_s), val)\n elif key2 in (lower_s[:2], title_s[:2], swapcase_s[:2]):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key in ():\n bad_opts.append((key, val))\n elif key in (text_s, text_as):\n if extend_b or widget:\n fmt_s += \"%s=%s \" % (keyout, val)\n else:\n # bad_opts.append((key, val))\n fmt_s += \"%s=%s \" % (keyout, val)\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n fmt = fmt_s.strip()\n if widget and isinstance(widget, TTWidget) and recurse_b:\n fmt = [\n fmt,\n ]\n for _, gathering in widget._get_kids(items=True):\n child = gathering[\"label\"]\n case = gathering.get(case_s, \"\")\n kid_options = {\n k: v[-1]\n for k, v in child.config().items()\n if len(v) == 5\n and str(v[-1]) != str(v[-2])\n and (k, v[-1]) not in w_options_d.items()\n and not (k in label_override_d and str(v[-1]) == \"0\")\n } #\n cf = kid_options.pop(font_s, None)\n cdf = child.config(font_s)[-2]\n if cf != cdf:\n c_font_d = pare_dict(get_font_dict(cf), get_font_dict(cdf))\n else:\n c_font_d = {}\n if case:\n kid_options.update(case=case)\n fmt.append(\n gen_tag_attrs(options=kid_options, font=c_font_d, **kwargs)\n )\n return fmt if index_i is None else fmt[index_i]", "def format_attributes(attributes):\n return ';'.join([k + '=' + v for k, v in attributes.items()])", "def handle_meta(self, tag, attrs):\n ad = {}\n for tup in attrs:\n ad[tup[0]] = tup[1]\n if 'name' in ad.keys() \\\n and 'keywords' == ad['name'] \\\n and 'content' in ad.keys():\n self.filetype = ad['content']\n if 'name' in ad.keys() \\\n and 'description' == ad['name']:\n self.description = 'present'\n if 'charset' in ad.keys():\n self.charset = 'present'", "def add_attributes(self, attrs):\n self.attrs.add_container(attrs)", "def set_attrs(dict, elem, attrs):\n for attr in attrs:\n if attr in elem.keys():\n dict[attr] = elem.get(attr)", "def __init__(self, attrs: Dict[str, Any]) -> None:\n self.attrs = attrs", "def prepare_node_attrs(self):", "def get_attribute_data(self, attrs):\n return {\n 'id': attrs['data-id'],\n }", "def get_attrs(self):\n req_attrv = self._ptr.contents.attrv\n attrs = {}\n if bool(req_attrv):\n i = 0\n while 1:\n s = bytestostr(req_attrv[i])\n i += 1\n if s == None:\n break\n try:\n k, v = s.split(\"=\", 1)\n attrs[k] = v\n except:\n pass\n return attrs", "def attkey_to_SVG_attribs(self,k):\n atts= k.split('@')\n o= ''\n acodes= {'C':'stroke','W':'stroke-width','S':'stroke-dasharray','O':'stroke-opacity'}\n for a in atts:\n if a[0] in acodes:\n o+= '%s=\"%s\" ' % (acodes[a[0]],a[1:])\n# elif a[0] == 'S': # Maybe do something special like this.\n# o+= 'stroke-dasharray=\"%\" ' % a[1:]\n return o", "def add_attributes(self, attrs):\n self.attrs.add_attributes(attrs)", "def fix_attributes(string):\n defs = re.compile('<dl class=\"attribute\">(?P<descrip>.*?)</dl>',flags=re.DOTALL)\n name = re.compile('<code class=\"descclassname\">(?P<name>[^<]*)</code>')\n prefix = ''\n remain = string\n \n match = defs.search(remain)\n while match:\n prefix += remain[:match.start(1)]\n prefsub = ''\n remnsub = remain[match.start(1):match.end(1)]\n descrip = name.search(remnsub)\n if descrip:\n prefix += remnsub[:descrip.start()]\n prefix += remnsub[descrip.end():]\n prefix += remain[match.end(1):match.end(0)]\n else:\n prefix += remain[match.start(1):match.end(0)]\n remain = remain[match.end(0):]\n match = defs.search(remain)\n return prefix+remain", "def a_attr_dict (self) :\n return dict (href = self.abs_href)", "def attributes(table,attrs): \n if isinstance(table,Table):\n table.html_attributes = attrs\n return table", "def read_attribs(self):\n\n attribs = {}\n while self.index < self.length:\n self.ignore_whitespaces()\n if self.xtext[self.index] == '>':\n break\n name = self.read_until('=')\n self.index += 1\n self.read_until('\"')\n self.index += 1\n value = self.read_until('\"')\n self.index += 1\n\n attribs[name] = value\n\n return attribs", "def extensible_attributes():\n return 'extensibleattributedef?'", "def transform_attributes(attrs):\n transformed = {}\n for key, value in attrs.items():\n if key in [\"raw_message\", \"text\"]:\n transformed[\"raw_content\"] = value\n elif key in [\"diaspora_handle\", \"sender_handle\", \"author\"]:\n transformed[\"handle\"] = value\n elif key == \"recipient_handle\":\n transformed[\"target_handle\"] = value\n elif key == \"parent_guid\":\n transformed[\"target_guid\"] = value\n elif key == \"first_name\":\n transformed[\"name\"] = value\n elif key == \"image_url\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"large\"] = value\n elif key == \"image_url_small\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"small\"] = value\n elif key == \"image_url_medium\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"medium\"] = value\n elif key == \"tag_string\":\n transformed[\"tag_list\"] = value.replace(\"#\", \"\").split(\" \")\n elif key == \"bio\":\n transformed[\"raw_content\"] = value\n elif key == \"searchable\":\n transformed[\"public\"] = True if value == \"true\" else False\n elif key == \"target_type\":\n transformed[\"entity_type\"] = DiasporaRetraction.entity_type_from_remote(value)\n elif key == \"remote_photo_path\":\n transformed[\"remote_path\"] = value\n elif key == \"remote_photo_name\":\n transformed[\"remote_name\"] = value\n elif key == \"status_message_guid\":\n transformed[\"linked_guid\"] = value\n transformed[\"linked_type\"] = \"Post\"\n elif key in BOOLEAN_KEYS:\n transformed[key] = True if value == \"true\" else False\n elif key in DATETIME_KEYS:\n try:\n # New style timestamps since in protocol 0.1.6\n transformed[key] = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n # Legacy style timestamps\n transformed[key] = datetime.strptime(value, \"%Y-%m-%d %H:%M:%S %Z\")\n elif key in INTEGER_KEYS:\n transformed[key] = int(value)\n else:\n transformed[key] = value or \"\"\n return transformed", "def tag(func):\n @functools.wraps(func)\n def wrapper(**kwargs):\n\n name = func.__name__\n\n if kwargs:\n \n try:\n\n check_text = kwargs['text']\n del kwargs['text']\n \n \n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n open('index.html', 'a+').write(f\"{check_text}\")\n open('index.html', 'a+').write(f\"</{name}>\")\n\n except KeyError:\n\n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n else:\n\n open('index.html', 'a+').write(f\"\\n<{name}>\")\n\n\n func(**kwargs)\n \n return wrapper", "def getAttrsDict(attrs):\r\n attrsDict = json.loads(re.sub('/\\\"(?!(,\\s\"|}))','\\\\\"',attrs).replace(\"\\t\",\" \").replace(\"\\n\",\" \")) if len(attrs)>0 else {}\r\n return attrsDict", "def parse_tag_attrs(\n self, tags_str, options_d=None, font_d=None, case=\"\", **kwargs\n ):\n return parse_tag_attrs(\n tags_str,\n options_d,\n font_d,\n case,\n widget=self,\n text=getattr(self, \"debug_text\", None),\n **kwargs\n )", "def _get_annotation_data_attr(self, index, el):\r\n\r\n data_attrs = {}\r\n attrs_map = {\r\n 'body': 'data-comment-body',\r\n 'title': 'data-comment-title',\r\n 'problem': 'data-problem-id'\r\n }\r\n\r\n for xml_key in attrs_map.keys():\r\n if xml_key in el.attrib:\r\n value = el.get(xml_key, '')\r\n html_key = attrs_map[xml_key]\r\n data_attrs[html_key] = {'value': value, '_delete': xml_key}\r\n\r\n return data_attrs", "def get_attributes(self) -> Dict[str, str]:\n pass", "def transform(attrs: dict) -> dict:\n\n pass", "def get_html_element_attributes(self):\n html_element_attributes = {\n 'class': self.css_classes or False, # Fall back to false to avoid class=\"\"\n }\n if self.should_render_as_link():\n html_element_attributes['href'] = self.url\n return html_element_attributes", "def create_descr(self, attr_name):", "def _checkTableAttr(self, attrs, prefix):\n if not attrs:\n return {}\n\n result = {}\n s = [] # we collect synthesized style in s\n for key, val in attrs.items():\n # Ignore keys that don't start with prefix\n if prefix and key[:len(prefix)] != prefix:\n continue\n key = key[len(prefix):]\n val = val.strip('\"')\n # remove invalid attrs from dict and synthesize style\n if key == 'width':\n s.append(\"width: %s\" % val)\n elif key == 'height':\n s.append(\"height: %s\" % val)\n elif key == 'bgcolor':\n s.append(\"background-color: %s\" % val)\n elif key == 'align':\n s.append(\"text-align: %s\" % val)\n elif key == 'valign':\n s.append(\"vertical-align: %s\" % val)\n # Ignore unknown keys\n if key not in self._allowed_table_attrs[prefix]:\n continue\n result[key] = val\n st = result.get('style', '').split(';')\n st = '; '.join(st + s)\n st = st.strip(';')\n st = st.strip()\n if not st:\n try:\n del result['style'] # avoid empty style attr\n except:\n pass\n else:\n result['style'] = st\n #logging.debug(\"_checkTableAttr returns %r\" % result)\n return result", "def set_attrs(self, username, attrs):\n pass", "def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,\r\n prettyPrint=False, indentLevel=0):\r\n\r\n encodedName = self.toEncoding(self.name, encoding)\r\n\r\n attrs = []\r\n if self.attrs:\r\n for key, val in self.attrs:\r\n fmt = '%s=\"%s\"'\r\n if isString(val):\r\n if self.containsSubstitutions and '%SOUP-ENCODING%' in val:\r\n val = self.substituteEncoding(val, encoding)\r\n\r\n # The attribute value either:\r\n #\r\n # * Contains no embedded double quotes or single quotes.\r\n # No problem: we enclose it in double quotes.\r\n # * Contains embedded single quotes. No problem:\r\n # double quotes work here too.\r\n # * Contains embedded double quotes. No problem:\r\n # we enclose it in single quotes.\r\n # * Embeds both single _and_ double quotes. This\r\n # can't happen naturally, but it can happen if\r\n # you modify an attribute value after parsing\r\n # the document. Now we have a bit of a\r\n # problem. We solve it by enclosing the\r\n # attribute in single quotes, and escaping any\r\n # embedded single quotes to XML entities.\r\n if '\"' in val:\r\n fmt = \"%s='%s'\"\r\n if \"'\" in val:\r\n # TODO: replace with apos when\r\n # appropriate.\r\n val = val.replace(\"'\", \"&squot;\")\r\n\r\n # Now we're okay w/r/t quotes. But the attribute\r\n # value might also contain angle brackets, or\r\n # ampersands that aren't part of entities. We need\r\n # to escape those to XML entities too.\r\n val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)\r\n\r\n attrs.append(fmt % (self.toEncoding(key, encoding),\r\n self.toEncoding(val, encoding)))\r\n close = ''\r\n closeTag = ''\r\n if self.isSelfClosing:\r\n close = ' /'\r\n else:\r\n closeTag = '</%s>' % encodedName\r\n\r\n indentTag, indentContents = 0, 0\r\n if prettyPrint:\r\n indentTag = indentLevel\r\n space = (' ' * (indentTag-1))\r\n indentContents = indentTag + 1\r\n contents = self.renderContents(encoding, prettyPrint, indentContents)\r\n if self.hidden:\r\n s = contents\r\n else:\r\n s = []\r\n attributeString = ''\r\n if attrs:\r\n attributeString = ' ' + ' '.join(attrs)\r\n if prettyPrint:\r\n s.append(space)\r\n s.append('<%s%s%s>' % (encodedName, attributeString, close))\r\n if prettyPrint:\r\n s.append(\"\\n\")\r\n s.append(contents)\r\n if prettyPrint and contents and contents[-1] != \"\\n\":\r\n s.append(\"\\n\")\r\n if prettyPrint and closeTag:\r\n s.append(space)\r\n s.append(closeTag)\r\n if prettyPrint and closeTag and self.nextSibling:\r\n s.append(\"\\n\")\r\n s = ''.join(s)\r\n return s", "def attributes(self):\n _attrs = [\"type\", \"name\", \"value\"]\n if self.confidence is not None:\n _attrs.append(\"confidence\")\n if self.constant:\n _attrs.append(\"constant\")\n if self.tags:\n _attrs.append(\"tags\")\n\n return _attrs", "def Attribute(name, value=None):\r\n if value:\r\n return '{}=\"{}\"'.format(name, value)\r\n else:\r\n return ''", "def convert_attributes(cls, attrs):\n return {}", "def get_switched_form_field_attrs(self, prefix, input_type, name):\n attributes = {'class': 'switched', 'data-switch-on': prefix + 'field'}\n attributes['data-' + prefix + 'field-' + input_type] = name\n return attributes", "def AttributeString(self) -> str:", "def AttributeString(self) -> str:", "def proc_attr(inp):\n dic = {}\n for att in inp.attrs.keys():\n if getattr(inp.attrs[att], \"dtype\", None) is None:\n dic[att] = inp.attrs[att]\n elif inp.attrs[att].dtype.char == 'S':\n dic[att] = [\n x.strip() for x in inp.attrs[att].tostring().decode('ascii').split(',')\n ]\n else:\n dic[att] = (\n inp.attrs[att][0]\n if isinstance(inp.attrs[att],np.ndarray) and\n inp.attrs[att].size==1\n else inp.attrs[att]\n )\n return dic\n pass", "def add_attr(self, section_name: str, attr_name: str, value: str) -> None:\n pass", "def _get_attrs_symbols():\n return {\n 'True', 'False', 'None', # those are identifiers in Python 2.7\n 'self',\n 'parent',\n 'id',\n 'uid',\n 'context',\n 'context_today',\n 'active_id',\n 'active_ids',\n 'allowed_company_ids',\n 'current_company_id',\n 'active_model',\n 'time',\n 'datetime',\n 'relativedelta',\n 'current_date',\n 'abs',\n 'len',\n 'bool',\n 'float',\n 'str',\n 'unicode',\n }", "def widget_attrs(self, widget):\n\n attrs = super(RelateField, self).widget_attrs(widget)\n\n attrs.update({'content_type': self.content_types})\n\n return attrs", "def attributes(doc, header, renderer=Attribute, item_class=DefinitionItem):\n items = doc.extract_items(item_class)\n lines = []\n renderer = renderer()\n for item in items:\n renderer.item = item\n lines += renderer.to_rst()\n lines.append('')\n return lines", "def info_from_entry(self, entry):\n info = super().info_from_entry(entry)\n info[ATTR_NAME] = info[ATTR_PROPERTIES]['Name'].replace('\\xa0', ' ')\n return info", "def img(self, **kwargs):\n attrs = ''\n for item in kwargs.items():\n if not item[0] in IMGATTRS:\n raise AttributeError, 'Invalid img tag attribute: %s'%item[0]\n attrs += '%s=\"%s\" '%item\n return '<img src=\"%s\" %s>'%(str(self),attrs)", "def gen_tag_attrs(self, *a, **kw):\n if kw.get(\"widget\", sentinel) is not None:\n raise Exception(\n \"TTToolTip.gen_tag_attrs(): 'widget' keyword must be set\"\n \" to None\"\n )\n return gen_tag_attrs(None, *a, **kw)", "def init_attrs(self):\n raise NotImplementedError", "def _arg_attr(identifier, attr1, attr2):\n return attr1 if identifier.startswith('t') else attr2", "def _get_var_attrs(var):\n\n generic_dict = {'instrument': '', 'valid_range': (-1e+35,1e+35),\n 'missing_value': -9999, 'height': '',\n 'standard_name': '', 'group_name': '',\n 'serial_number': ''}\n\n generic_dict.update(attrs_dict[var])\n return generic_dict", "def parseAttrLine(line):\n\tpre, post = line.strip().split(':')\n\tnumber, attr = pre.strip().split('.')\n\tattr = attr.strip().replace('%','').replace(' ', '-')\n\tvals = [clean(x) for x in post.strip().strip('.').split(',')]\n\treturn {'num':int(number), 'attr':clean(attr), 'vals':vals}", "def _style_to_basic_html_attributes(self, element, style_content,\n force=False):\n if style_content.count('}') and \\\n style_content.count('{') == style_content.count('{'):\n style_content = style_content.split('}')[0][1:]\n\n attributes = {}\n for rule in style_content.split(';'):\n split = rule.split(':')\n if len(split) != 2:\n continue\n key = split[0].strip()\n value = split[1]\n\n if key == 'text-align':\n attributes['align'] = value.strip()\n elif key == 'background-color':\n attributes['bgcolor'] = value.strip()\n elif key == 'width' or key == 'height':\n value = value.strip()\n if value.endswith('px'):\n value = value[:-2]\n attributes[key] = value\n\n for key, value in list(attributes.items()):\n if key in element.attrib and not force or key in self.disable_basic_attributes:\n # already set, don't dare to overwrite\n continue\n element.attrib[key] = value", "def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}", "def domAttributesToString( node ):\n strOut = \"node has %d attribute(s):\\n\" % node.attributes.length;\n for i in range(node.attributes.length):\n attr = node.attributes.item(i);\n strOut += \"- %s:'%s'\\n\" % (attr.name, attr.value );\n return strOut;", "def dict_with_attrs2(*args):\n class CustomDict(object):\n __slots__ = args\n __dict__ = {}\n\n def __init__(self, *args, **kwargs):\n super(CustomDict, self).__init__()\n if args:\n self.__dict__.update(*args)\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __getattr__(self, name):\n return self.__dict__[name]\n\n return CustomDict", "def attributes(self):\n attrs_ = [\"name\", \"type\"]\n if self.exclusive:\n attrs_.append(\"exclusive\")\n if self.default is not None:\n attrs_.append(\"default\")\n\n return attrs_", "def replace_tag_attributes(code_attrs, tag, tag_attrs):\n\n new_attrs = code_attrs.copy()\n for key, value in tag_attrs.items():\n if key in new_attrs:\n new_attrs[key] = new_attrs[key].replace(tag, value)\n\n return new_attrs", "def _iterattrs(self, handle=\"\"):\n if not handle:\n handle = self.handle\n attr = gv.firstattr(handle)\n while gv.ok(attr):\n yield gv.nameof(attr), decode_page(gv.getv(handle, attr))\n attr = gv.nextattr(handle, attr)", "def as_html(self):\n return mark_safe(\" \".join([\n self.attribute_template % (k, escape(v if not callable(v) else v()))\n for k, v in six.iteritems(self) if not v in EMPTY_VALUES]))", "def strpatt(self, name):\n return name.replace(\"att.\", \"\")", "def format_link(attrs: Dict[tuple, str], new: bool = False):\n try:\n p = urlparse(attrs[(None, 'href')])\n except KeyError:\n # no href, probably an anchor\n return attrs\n\n if not any([p.scheme, p.netloc, p.path]) and p.fragment:\n # the link isn't going anywhere, probably a fragment link\n return attrs\n\n c = urlparse(settings.SITE_URL)\n if p.netloc != c.netloc:\n # link is external - secure and mark\n attrs[(None, 'target')] = '_blank'\n attrs[(None, 'class')] = attrs.get((None, 'class'), '') + ' external'\n attrs[(None, 'rel')] = 'nofollow noopener noreferrer'\n\n return attrs", "def extractAttrs(obj, justLabel=False, dictName=''):\n return extractAttrsCore(obj, {}, justLabel, dictName)", "def parseAttrs(self,attrs,date_type):\n\tattrs=copy.copy(attrs) #make sure we don't change user/group attributes\n \tattr_holders=self.getAttrHolders(attrs)\n\tmap(lambda x:x.setDateType(date_type),attr_holders)\n\tmap(lambda x:attrs.update(x.getParsedDic()),attr_holders)\n\treturn attrs", "def add_attributes(self, attrs):\n for attr in attrs:\n self.add_attribute(attr)", "def _parse_attr(self, attr_proto):\n attrs = {}\n for a in attr_proto:\n for f in ['f', 'i', 's']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['floats', 'ints', 'strings']:\n if list(getattr(a, f)):\n assert a.name not in attrs, \"Only one type of attr is allowed\"\n attrs[a.name] = tuple(getattr(a, f))\n for f in ['t', 'g']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['tensors', 'graphs']:\n if list(getattr(a, f)):\n raise NotImplementedError(\"Filed {} is not supported in mxnet.\".format(f))\n if a.name not in attrs:\n raise ValueError(\"Cannot parse attribute: \\n{}\\n.\".format(a))\n return attrs" ]
[ "0.735201", "0.6754294", "0.67166066", "0.67071074", "0.66780305", "0.65807486", "0.6522693", "0.6522693", "0.65187657", "0.6471306", "0.6269984", "0.62653935", "0.6153201", "0.6090701", "0.60323846", "0.60278016", "0.6011661", "0.60042846", "0.59841794", "0.5941162", "0.59205276", "0.5918955", "0.59121054", "0.5903962", "0.5884743", "0.5876164", "0.5857109", "0.5851559", "0.583173", "0.58274394", "0.5816038", "0.58061635", "0.5784312", "0.5755998", "0.5755998", "0.57360405", "0.57051307", "0.5701552", "0.5687975", "0.5650812", "0.5618766", "0.561154", "0.5605911", "0.56030387", "0.5602799", "0.55926436", "0.5587559", "0.5571399", "0.5567558", "0.55631375", "0.555545", "0.5550559", "0.55490625", "0.55470836", "0.55410224", "0.5519966", "0.55098814", "0.5492064", "0.547102", "0.5470936", "0.54692423", "0.5467515", "0.54661024", "0.54518676", "0.54405665", "0.5438651", "0.54003173", "0.5388153", "0.5382598", "0.5375904", "0.5375076", "0.53706104", "0.5359634", "0.5354708", "0.5354708", "0.5331472", "0.5324531", "0.53227526", "0.5316361", "0.5309617", "0.5308968", "0.53067", "0.5306182", "0.5299369", "0.52990687", "0.5287107", "0.52791494", "0.5277907", "0.5276578", "0.52742803", "0.5270845", "0.52608305", "0.52524847", "0.5244876", "0.5239417", "0.5234171", "0.5224983", "0.5215326", "0.521457", "0.5212088", "0.5203955" ]
0.0
-1
Return formatted text, properly escaped if not in titleMode
def formatOutput(self, storedText, titleMode, internal=False): try: text = GenNumber(storedText).numStr(self.format) except GenNumberError: text = _errorStr return TextFormat.formatOutput(self, text, titleMode, internal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def title(text, level=0):\n return '\\n' + text + '\\n' + '=-~_#%^' [level] * len(text) + '\\n\\n'", "def format_title(self, data):\n return data", "def output_plain_sep_title(title):\n print(f\"{plain_sep_mark}\\t{title}{plain_sep_mark}\")", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_text(downgrade_titles=False):", "def PROPER(text):\n return text.title()", "def title(self, string):\n return self.bold(string)", "def html_title(title):\n return '<center><h1>%s</h1></center>' % (title)", "def _get_title_text(self):\n return Text(\n self,\n self.settings.font_bold_filename,\n 96,\n self.settings.font_color,\n 'zuckbot',\n {'center': self.screen_rect.center},\n 0,\n -50,\n )", "def get_title_repr(self) -> str:\n try:\n return Title[self.title].value\n except (KeyError, ValueError):\n pass", "def editModeHeading(text):\n return u'<p style=\"editModeHeading\">%s</p>' % text", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def emph_text(text):\n\n if use_color():\n return colorama.Style.BRIGHT + text + colorama.Style.RESET_ALL\n else:\n return text", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()", "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "def formatOutput(self, storedText, titleMode, internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = ['<img src=\"%s\">' % escape(url, treedoc.escDict) for url\n in paths]\n return u'<br />'.join(results)", "def _text_formatting(bs4_tag):\n return bs4_tag.get_text().replace('\\n', '')", "def print_with_title(title, content, before='', after='', hl='='):\n cont_maxlen = max(len(s) for s in content.split('\\n'))\n hl_len = max(cont_maxlen, len(title))\n print('{}{}\\n{}\\n{}{}'.format(before, title, hl * hl_len, content, after))", "def title(self, txt):\n num = len(txt)\n ticks = \"=\" * num\n print(ticks)\n print(txt)\n print(ticks)", "def test_title(self):\n self.assertEquals(\"Title\\n=====\", trans(\"== Title ==\"))\n self.assertEquals(\"Title\\n-----\", trans(\"=== Title ===\"))\n self.assertEquals(\"#### Title\", trans(\"==== Title ====\"))\n self.assertEquals(\"##### Title\", trans(\"===== Title =====\"))", "def helptext(self):\n return \"\"", "def book_title(book_text):\n search = re.search(\"Title:(.*)\", book_text)\n title = search.group(1).replace(\"\\r\", \" \").strip()\n return title", "def get_title():", "def __str__(self):\n date_str = self.date.strftime(self.journal.config['timeformat'])\n title = date_str + \" \" + self.title\n body = self.body.strip()\n\n return \"{title}{sep}{body}\\n\".format(\n title=title,\n sep=\"\\n\" if self.body else \"\",\n body=body\n )", "def __str__(self) -> str:\n return textwrap.wrap(self.title, _POST_TITLE_MAX_LENGTH // 4)[0]", "def title_draw():\n nonlocal width\n widthTitle = len(self.str_title)\n if widthTitle > width:\n self.str_title = self.str_title[0:width-5] + '...'\n widthTitle = len(self.str_title)\n h_len = widthTitle + self.l_padding + self.r_padding\n top = ''.join(['┌'] + ['─' * h_len] + ['┐']) + '\\n'\n result = top + \\\n '│' + \\\n ' ' * self.l_padding + \\\n self.str_title + \\\n ' ' * self.r_padding + \\\n '│' + self.str_shadow + '\\n'\n offset = 2 + self.l_padding + len(self.str_title) + self.r_padding\n return result, offset", "def show_title():\r\n complement = (\r\n '\\n __ ')\r\n title = ('\\n _______ _______________ ____ _______ __ ___ _ _______/ /_ ____ _____ ____ ____ ')\r\n title += ('\\n / ___/ / / / ___/ ___/ _ \\/ __ \\/ ___/ / / / / _ \\| |/_/ ___/ __ \\/ __ `/ __ \\/ __ `/ _ \\ ')\r\n title += ('\\n/ /__/ /_/ / / / / / __/ / / / /__/ /_/ / / __/> </ /__/ / / / /_/ / / / / /_/ / __/ ')\r\n title += ('\\n\\___/\\__,_/_/ /_/ \\___/_/ /_/\\___/\\__, / \\___/_/|_|\\___/_/ /_/\\__,_/_/ /_/\\__, /\\___/ ')\r\n title += ('\\n /____/ /____/ ')\r\n # Add Styles\r\n break_line = ('-' * len(complement) + \"\\n\") * 2\r\n print(\"{}\\n{}\\n{}\\n\".format(break_line, title, break_line))", "def format_title(self, title):\n new_title = ''.join(word.lower().strip('!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ ') for word in title)\n return new_title", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def pretty_title(title):\n output = '-' * 5 + ' ' + title + ' ' + '-' * 5\n return output", "def html_title(self, title=None):\r\n if title is None:\r\n return \"<title>PyBossa</title>\"\r\n else:\r\n return \"<title>PyBossa &middot; %s</title>\" % title", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def output_sep_title(title):\n print(f\"{sep_mark}\\t{title}{sep_mark}\")", "def make_main_title(self, end, end_center=False):\n main_title = r\"\\begin{center}\"\n if self.detector is not None:\n main_title += \"%s \"%self.detector\n if self.selection is not None:\n main_title += \"%s Event Selection \"%self.selection\n main_title += end\n if end_center:\n main_title += r\"\\end{center}\"\n return main_title", "def title(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"=\")))", "def get_title(self):\n if not hasattr(self, '_title'):\n self._title = 'NO TITLE'\n if self._title:\n title = _(self._title)\n title = title.replace('&', '&amp;') \n title = title.replace('\"', '&quot;')\n return title\n else:\n return u''", "def escape_if_needed(text, options):\n if hasattr(text, '__html__'):\n # Text has escape itself:\n return to_string(text.__html__())\n if need_to_escape(options):\n return escape(to_string(text))\n return to_string(text)", "def ftitle(self, text):\n return \"{} - {}\".format(self._app_name, text)", "def get_rst_title_char(level):\n chars = (u'=', u'-', u'`', u\"'\", u'.', u'~', u'*', u'+', u'^')\n if level < len(chars):\n return chars[level]\n return chars[-1]", "def process_title(self, title):\n\t\t# strip apostrophes\n\t\tif '\\'' in title:\n\t\t\ttitle = re.sub('\\'', '', title)\n\t\tif '.' in title:\n\t\t\ttitle = re.sub('.', '', title)\n\t\treturn title", "def pretty_title(title):\n output = '-' * 5 + ' ' + title.lower() + ' ' + '-' * 5\n return output", "def html(self):\n bop = ('<b>' if self._bold else '')\n iop = ('<i>' if self._italic else '')\n icl = ('</i>' if self._italic else '')\n bcl = ('</b>' if self._bold else '')\n txt = escape(self._text)\n s = '%s%s%s%s%s' % (bop, iop, txt, icl, bcl)\n return '%s' % s", "def escape_for_display(s) :\n if len(s) == 0 :\n return \"[EMPTY]\"\n return s.replace(\"\\n\",\"[NL]\").replace(\"\\t\",\"[TAB]\") #.replace(\" \",\"[SP]\") # Escape newlines so not to confuse debug output.", "def style_title(self) -> str:\n style_title = \"\"\".title\n {margin-bottom: 10px}\\n\"\"\"\n self.html_doc = self.html_doc + style_title\n return self.html_doc", "def getHTMLText(self, s):\r\n\r\n # Removes any \"<\" or \">\" from the text, and replaces line ends with <br> tags\r\n if s is not None:\r\n res = str(s)\r\n res = string.replace(res, \">\", \"&gt;\")\r\n res = string.replace(res, \"<\", \"&lt;\")\r\n res = string.replace(s, \"\\n\", \"<br style='mso-data-placement:same-cell;'/>\")\r\n else:\r\n res = \"\"\r\n\r\n # Inserts formatting tag around text, if defined\r\n if self.formatBeginTag:\r\n res = self.formatBeginTag + res + self.formatEndTag\r\n\r\n return res", "def escape_single_quotes(custom_data):\n # https://stackoverflow.com/questions/10569438/how-to-print-unicode-character-in-python\n # https://regex101.com/r/nM4bXf/1\n if re.search(\"(?<!u)'(?!:|}|,)\", custom_data.get('title_name', '')):\n z = re.sub(r\"(?<!u)'(?!:|}|,)\", '\\\\\\'', custom_data.get('title_name', None))\n\n custom_data['title_name'] = z\n return custom_data\n return custom_data", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def format_title(self, ticket_id, subject):\n # TODO: strip block tags?\n title = \"#%i %s\" % (ticket_id, subject)\n return title.strip()", "def subtitle(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"-\")))", "def title(self, value):\n if len(value):\n self._title = self._wrap_line(value, self._width)\n\n # Add a blank line\n self._title.append('')", "def emphasize(text: str, tablefmt: str | TableFormat, strong: bool = False) -> str:\n # formats a title for a table produced using tabulate,\n # in the formats tabulate understands\n if tablefmt in [\"html\", \"unsafehtml\", html_with_borders_tablefmt]: # type: ignore\n if strong:\n emph_text = f\"<strong>{text}</strong>\"\n else:\n emph_text = f\"<em>{text}</em>\"\n elif tablefmt in [\"latex\", \"latex_raw\", \"latex_booktabs\", \"latex_longtable\"]:\n if strong:\n emph_text = r\"\\textbf{\" + text + r\"}\"\n else:\n emph_text = r\"\\emph{\" + text + r\"}\"\n else: # use the emphasis for tablefmt == \"pipe\" (Markdown)\n star = \"**\" if strong else \"*\"\n emph_text = f\"{star}{text}{star}\"\n return emph_text", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def textual(title, ordering_field=None):\n def decorator(func):\n def wraps(self, obj):\n result = func(self, obj)\n return result if result else u'---'\n\n wraps.short_description = title\n wraps.allow_tags = True\n\n if ordering_field:\n wraps.admin_order_field = ordering_field\n\n return wraps\n return decorator", "def outputText(self, item, titleMode, internal=False):\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''", "def inclusive_title(self):\n return self.title + (\" %s\" % (self.episode_to_string(self.latest_season, self.latest_episode),) if self.is_series() else \"\")", "def group_title(self, group):\n group_title = group.getProperty('title')\n if self.short:\n splitted = group_title.split('(')\n if len(splitted) > 1:\n group_title = group_title.split('(')[-1][:-1]\n return html.escape(group_title)", "def outputText(self, item, titleMode, internal=False):\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)", "def format_heading(self, level, text):\n underlining = ['=', '-', '~', ][level-1] * len(text)\n return '%s\\n%s\\n\\n' % (text, underlining)", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)", "def formatted(self) -> str:\r\n ...", "def __str__(self):\n return \"{title}\".format(title=self.title)", "def gen_title_rst(txt):\n # Just add a few useful directives\n txt = \".. highlight:: cmake\\n\\n\" + txt\n return txt", "def _prettyfilename(self):\n return self.title", "def wrap_title(title, mpl_layout):\n fig = mpl_layout.canvas.figure\n ax = fig.axes[0]\n ext_pixels = ax.get_window_extent()\n ext_inches = ext_pixels.transformed(fig.dpi_scale_trans.inverted())\n magic_number = 10\n letters_per_line = int(ext_inches.width * magic_number)\n title_wrapped = '\\n'.join(textwrap.wrap(title, letters_per_line))\n return title_wrapped", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title} :\" + s\n\n return s", "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "def get_title(text, uuid=None):\n if uuid is not None:\n text += get_provenance_link(uuid)\n title = pn.Row(pn.pane.HTML('<h2>{}</h2>'.format(text)), align='start')\n\n return title", "def print_title(title):\n print \"\\n\"+\"#\"*32+\"\\n# \"+title+\"\\n\"+\"#\"*32+\"\\n\"", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''", "def transform(text: str) -> str:\n return text.title()", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n # add prefix/suffix within the executable path:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' %\n (escape(path, treedoc.escDict), altText or url))\n return u'<br />'.join(results)", "def complete_alt_title(self, obj):\n return str(obj)", "def clean_title(\r\n title: str,\r\n mode: Literal[\"soft\", \"hard\", \"safe\"],\r\n allow_dot: bool = False,\r\n n: Optional[int] = None,\r\n) -> str:\r\n ...", "def text(self) -> str:", "def get_title(self):\n\n title = ''\n doc = self.article.doc\n\n title_element = self.parser.getElementsByTag(doc, tag='title')\n # no title found\n if title_element is None or len(title_element) == 0:\n return title\n\n # title elem found\n title_text = self.parser.getText(title_element[0])\n used_delimeter = False\n\n # split title with |\n if '|' in title_text:\n title_text = self.split_title(title_text, PIPE_SPLITTER)\n used_delimeter = True\n\n # split title with -\n if not used_delimeter and '-' in title_text:\n title_text = self.split_title(title_text, DASH_SPLITTER)\n used_delimeter = True\n\n # split title with »\n if not used_delimeter and u'»' in title_text:\n title_text = self.split_title(title_text, ARROWS_SPLITTER)\n used_delimeter = True\n\n # split title with :\n if not used_delimeter and ':' in title_text:\n title_text = self.split_title(title_text, COLON_SPLITTER)\n used_delimeter = True\n\n title = MOTLEY_REPLACEMENT.replaceAll(title_text)\n return title", "def print_title(title):\n\n print(\"\\n\" + title)\n print(\"=\" * len(title))", "def format_rich_quote(rich_text_quote):\n rich_text = format_rich_text(rich_text_quote)\n return \"> \" + \"\\n> \".join(rich_text.split(\"\\n\")) + \"\\n\"", "def SearchableText(self):\n ctool = getToolByName(self, 'portal_cpscalendar')\n if getattr(ctool, 'event_fulltext_index', False):\n return '%s %s' % (self.title, self.description)\n return ''", "def __str__(self):\n t = Template(\n \"\"\"\n <h4>$title</h4>\n $imgs\n $footnotes\n <hr/>\"\"\")\n # Return result.\n return t.substitute({\n \"title\": self.title,\n \"imgs\": self.render_images(),\n \"footnotes\": self.render_footnotes()\n })", "def visit_title_reference(self, node):\n self.body.append('\\\\emph{\\\\textbf{')", "def render(resolve_unicode,\n title_force_uppercase,\n msdos_eol_style,\n output_encoding,\n omit_fields=[]):", "def inject_title(self,template,title):\n return re.sub('TITLE',title,template)", "def format_screen(self,str):\n # Paragraph continue\n par_re = re.compile(r'\\\\$',re.MULTILINE)\n str = par_re.sub('',str)\n return str", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def title_content(label=\"A title\"):\n return {'label':label}", "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def title_p(self):\n self.run_command('title_p')", "def title(self):\n # Use the first line of the articles text as title, if not title\n # exists.\n title = self._text[:min(32, self._text.find(\"\\n\"))]\n return title", "def numbered_title(self):\n return f\"{self.title}\"", "def numbered_title(self):\n return f\"{self.title}\"", "def render_text(self):\n if self.text_type == 1:\n return markdown.markdown(self.text)\n else:\n return self.text", "def dumps(self):\n\n if not self.numbering:\n num = '*'\n else:\n num = ''\n\n string = Command(self.latex_name + num, self.title).dumps()\n string += '%\\n' + self.dumps_content()\n\n return string", "def processTitle(title):\n\n cleaned = re.sub(r'[@#\\\"]+', '', title.lower().strip())\n cleaned = re.sub(r'\\(\\d{4}.*\\)', '', cleaned)\n cleaned = re.sub(r':.+', '', cleaned).strip()\n return cleaned", "def show(self) -> str:\n return f'[{self.font}]{self.text}[{self.font}]' if self.font else self.text" ]
[ "0.67517006", "0.6623557", "0.64947814", "0.6347113", "0.6307539", "0.621596", "0.6210496", "0.60684896", "0.60674477", "0.60663515", "0.60421175", "0.6019259", "0.59935653", "0.59802073", "0.59790826", "0.595393", "0.5948588", "0.5939195", "0.590317", "0.5872387", "0.58521676", "0.5838757", "0.5835408", "0.5834278", "0.5832544", "0.58303535", "0.58232164", "0.58196765", "0.5818879", "0.581837", "0.58134586", "0.58123326", "0.57893336", "0.5777435", "0.5773666", "0.5759935", "0.57562524", "0.57514244", "0.5736761", "0.5721786", "0.57156", "0.5693657", "0.56579095", "0.56524575", "0.56516933", "0.56416726", "0.5639766", "0.56235963", "0.5607828", "0.55989367", "0.5597865", "0.5593643", "0.55868447", "0.5576239", "0.55753696", "0.5570099", "0.556155", "0.55568874", "0.55474097", "0.5539662", "0.5532411", "0.5531814", "0.5512975", "0.5479672", "0.54774815", "0.54768354", "0.5473451", "0.54682344", "0.5464578", "0.54521894", "0.5445922", "0.5437787", "0.54369724", "0.5422958", "0.5415149", "0.5415149", "0.5399354", "0.539413", "0.53890395", "0.5382889", "0.5382856", "0.53564143", "0.535306", "0.53529805", "0.5352455", "0.5347083", "0.5333787", "0.5333257", "0.5332394", "0.5331696", "0.53306514", "0.53304696", "0.53293514", "0.5327383", "0.53269297", "0.53269297", "0.53238297", "0.53169096", "0.5314785", "0.5314103" ]
0.5630319
47
Return tuple of text in edit format and bool validity, using self.format
def formatEditText(self, storedText): try: return (GenNumber(storedText).numStr(self.format), True) except GenNumberError: return (storedText, not storedText)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def syntax_text():", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def as_tuple(self, selected: bool) -> tuple:\n if self.check_ans:\n current = self.num.ljust(2) + self.answer\n else:\n current = self.num.ljust(2)+self.current.rjust(1)\n return ((\n \"fg:\"+self.style_fg + \" bg:\" # apply foreground color\n + (self.style_bg_selected if selected else self.style_bg),\n current\n if self.answer != \".\" else \"▐\" + \"█\" + \"▌\"), ('bg:#fefefe fg:#000', \"│\"))", "def test_is_entry_formatted(self):\n\n valid_formats = test_case_data.get('valid_formats')\n for i, valid_entry in enumerate(test_case_data.get('valid_entries')):\n entry = [value.strip() for value in valid_entry.split(',')]\n format_fields = valid_formats[i].split(',')\n valid = self.parser._is_entry_formatted(entry, format_fields)\n self.assertTrue(valid, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [value.strip() for value in invalid_entry.split(',')]\n for f in valid_formats:\n format_fields = f.split(',')\n entry_dict = self.parser._is_entry_formatted(entry, format_fields)\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def task_format_check():\n return {\"actions\": [[\"black\", HERE, \"--check\"]], \"verbosity\": 1}", "def reformat(ctx):\n pass", "def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()", "def human_readable(self):\n if self.no_flags_set():\n return \"no flags set\"\n else:\n flag_desc = []\n for name in (\"bookmarked\", \"for_validation\", \"candidate\", \"final causative\"):\n if getattr(self, \"flag_%s\" % name.replace(\" \", \"_\")):\n flag_desc.append(name)\n for name in (\"visual\", \"validation\", \"molecular\", \"phenotype_match\", \"summary\"):\n field = getattr(self, \"flag_%s\" % name)\n if field and field != \"empty\":\n flag_desc.append(\"%s rating is %s\" % (name.split(\"_\")[0], field))\n return \", \".join(flag_desc)", "def text(value):\n return True", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def getText(self):", "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def info(level):\n if level == 'basic':\n string = _(\"Basic markup\")\n text = _(\"Only basic text tags are available in this input field.\")\n elif level == 'rich':\n string = _(\"Rich markup\")\n text = _(\"Rich and basic text tags are available in this input field.\") \n elif level == 'full':\n string = _(\"Full markup\")\n text = _(\"Every tags are available in this input field.\") \n elif level == 'none':\n string = _(\"No markup\")\n text = _(\"No tags are available in this input field.\") \n\n if level != 'none':\n text = text + \" \" + _(\"Check the markup reminder in related documentation for a description of these tags.\")\n\n return '<span class=\"help\" title=' + quoteattr(text) \\\n + '><img src=\"' + settings.STATIC_MEDIA_URL \\\n + 'images/savane/common/misc.default/edit.png' \\\n + ' border=\"0\" class=\"icon\" alt=\"\" />' \\\n + string + '</span>'", "def test_format_permissions_and_docstring(self):\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\"],\n {\"some\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"permission formatted string\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\"\n ),\n )\n\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\", \"another permission\"],\n {\"some\": \"docstring\", \"another\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"- permission formatted string\\n\"\n \"- another permission\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\\n\"\n \"- **another** : docstring\"\n ),\n )", "def formatted(self) -> str:\r\n ...", "def __repr__(self) -> str:\r\n\r\n saida = \"Format: \"\r\n x = self.getformat()\r\n for _ in range(len(x)):\r\n saida = f\"{saida}{x[_]}\"\r\n if _ < len(x)-1:\r\n saida += \", \"\r\n saida += \"\\n\"\r\n return saida", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def get_format(self):\n format = QtGui.QTextCharFormat()\n\n # Set foreground color\n if self.foreground_color is not None:\n color = self.color_map[self.foreground_color][self.intensity]\n format.setForeground(QtGui.QColor(color))\n\n # Set background color\n if self.background_color is not None:\n color = self.color_map[self.background_color][self.intensity]\n format.setBackground(QtGui.QColor(color))\n\n # Set font weight/style options\n if self.bold:\n format.setFontWeight(QtGui.QFont.Bold)\n else:\n format.setFontWeight(QtGui.QFont.Normal)\n format.setFontItalic(self.italic)\n format.setFontUnderline(self.underline)\n\n return format", "def get_data_from_nonformat_text():\n pass", "def validate_format(self):\n raise NotImplementedError()", "def format(self) -> str:", "def individual_info(self, ctx: commands.Context, format: str) -> str:\n\t\tformat = self.__normalize(ctx, format)\n\t\ttip = self.formats[format]\n\t\theader_text = self.__header(format, tip)\n\t\thow_to = blockquote(tip.escaped)\n\t\tfooter_text = self.__footer(format)\n\t\treturn f\"{header_text}\\n\\n{how_to}\\n\\n{footer_text}\"", "def get_text(self):\n inp = \" \"\n if self.link_id:\n inp += \"LINK \" + self.link_id\n inp += self.status + ' '\n if self.node_id:\n inp += \"NODE \" + self.node_id + ' '\n if self.value:\n inp += self.control_type.name + ' ' + str(self.value) + ' '\n if self.time:\n inp += self.time + ' '\n if self.clock_time:\n inp += self.clock_time + ' '\n # TODO: research correct formatting of time, clock_time options\n return inp", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def fancyString(inVal, correctOutput, funcOutput):\r\n checkCorrect = \"Correct = \" + u'\\u2713'*(funcOutput == correctOutput) + 'X'*(funcOutput != correctOutput)\r\n # Check mark code from site below:\r\n # https://stackoverflow.com/questions/16676101/print-the-approval-sign-check-mark-u2713-in-python\r\n return \"Input(s) = {:<15} Output = {:<25} Your Output = {:<35} \".format(str(inVal), str(correctOutput), str(funcOutput)) + checkCorrect", "def get_help_text(self):\n requirements = \"Your password must contain at least: {min_length} \" \\\n \"character(s), {min_uppercase} uppercase letter(s), \" \\\n \"{min_lowercase} lowercase letter(s) and \" \\\n \"{min_digits} digit(s). \".format(\n min_length=self.min_length,\n min_uppercase=self.min_uppercase,\n min_lowercase=self.min_lowercase,\n min_digits=self.min_digits)\n return requirements", "def formatted(s):\n matches = re.findall(_format_re, normalize(s))\n if len(matches) == 1 and matches[0][0] != '':\n return matches[0][0]\n def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):\n if txt_none != '':\n return FORMAT_NONE, txt_none\n elif txt_sw != '':\n return FORMAT_SW, txt_sw\n elif txt_rem != '':\n return FORMAT_REM, txt_rem\n elif txt_em != '':\n return FORMAT_EM, txt_em\n elif txt_a != '':\n return FORMAT_A, txt_a\n return [to_fmt(*m) for m in matches]", "def verify_diagnostics_and_usage_text():\r\n msg, status = \"\", True\r\n try:\r\n sleep(10)\r\n if g.platform == 'android':\r\n sleep(3)\r\n 'verify end user license agreement label'\r\n flag1,msg = element_textvalidation('Diagnostics_usage_lbl','Diagnostics and Usage')\r\n sleep(4) \r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_Diagnostcs_nd_usage_txt)\r\n 'verify end user license agreement label'\r\n flag2,msg = element_textvalidation('Diagnostics_usage_txt',text_to_verify)\r\n \r\n flag = False if not (flag1 and flag2) else True\r\n else:\r\n\r\n \r\n 'Verify diagnostics usage text'\r\n flag1 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_lbl'))\r\n 'Verify diagnostics usage text'\r\n flag2 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_txt'))\r\n \r\n sleep(4) \r\n \r\n flag = False if not (flag1 and flag2) else True\r\n if not flag:\r\n return False, msg\r\n else:\r\n print \"License agreement screen name is displayed properly\"\r\n \r\n \r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return False, msg\r\n return True, msg", "def formatted_locator_information(self):\n info = 'May not follow-up.'\n if self.may_follow_up == 'Yes':\n info = (\n '{may_sms_follow_up}\\n'\n 'Cell: {subject_cell} {alt_subject_cell}\\n'\n 'Phone: {subject_phone} {alt_subject_phone}\\n'\n '').format(\n may_sms_follow_up='SMS permitted' if self.may_sms_follow_up == 'Yes' else 'NO SMS!',\n subject_cell='{} (primary)'.format(self.subject_cell) if self.subject_cell else '(none)',\n alt_subject_cell=self.subject_cell_alt,\n subject_phone=self.subject_phone or '(none)', alt_subject_phone=self.subject_phone_alt\n )\n if self.may_call_work == 'Yes':\n info = (\n '{info}\\n Work Contacts:\\n'\n '{subject_work_place}\\n'\n 'Work Phone: {subject_work_phone}\\n'\n '').format(\n info=info,\n subject_work_place=self.subject_work_place or '(work place not known)',\n subject_work_phone=self.subject_work_phone)\n if self.may_contact_someone == 'Yes':\n info = (\n '{info}\\n Contacts of someone else:\\n'\n '{contact_name} - {contact_rel}\\n'\n '{contact_cell} (cell), {contact_phone} (phone)\\n'\n '').format(\n info=info,\n contact_name=self.contact_name or '(name?)',\n contact_rel=self.contact_rel or '(relation?)',\n contact_cell=self.contact_cell or '(----)',\n contact_phone=self.contact_phone or '(----)'\n )\n if info:\n info = ('{info}'\n 'Physical Address:\\n{physical_address}').format(\n info=info, physical_address=self.physical_address)\n return info", "def html(self):\n bop = ('<b>' if self._bold else '')\n iop = ('<i>' if self._italic else '')\n icl = ('</i>' if self._italic else '')\n bcl = ('</b>' if self._bold else '')\n txt = escape(self._text)\n s = '%s%s%s%s%s' % (bop, iop, txt, icl, bcl)\n return '%s' % s", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text", "def TEXT(number, format_type):\n raise NotImplementedError()", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def text_to_display(level):\n if level == \"html\":\n return html_answers, html_text\n elif level == \"css\":\n return css_answers, css_text\n elif level == \"python\":\n return python_answers, python_text", "def __str__(self):\n struct_repr = \", \".join([\n \"type: \" + str(self.type),\n \"text: \" + str(self.text)\n ])\n\n return f\"StatusText: [{struct_repr}]\"", "def htmlFormat(self, text):\n txt_blocks = self._parser_block(lex_block(text))\n\n #XXX: Maybe there is a better solution, but I doubt\n #The problem is nested escapestyles\n escape_d = {}\n escapes = re.compile('\\[escapestyle\\] \\s* (?P<inner>(.|\\s)*?) \\s* \\[/escapestyle\\]', re.VERBOSE)\n def rem(mo):\n h_code = hash(mo.group(0))\n escape_d[h_code] = mo.group('inner')\n return '(<!%s!>)' % h_code\n txt_blocks = escapes.sub(rem, txt_blocks)\n\n txt_style = parser_style(lex_style(txt_blocks))\n\n eess = re.compile('\\(<!(-?\\d+)!>\\)')\n def back(mo):\n val = int(mo.group(1))\n if escape_d.has_key(val):\n return escape_d[val]\n return mo.group(0)\n txt_style = eess.sub(back, txt_style)\n\n return txt_style", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)", "def summary(self):\n if self.intact and self.valid:\n return 'INTACT:' + ','.join(self.summary_fields())\n else:\n return 'INVALID'", "def asformat(self, format):", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def _format_answer(self, text):\n text = str(text).replace('\\n', ' ')\n answer_width = 70\n pretty_text = '\\n\\t'.join(textwrap.wrap(text, answer_width))\n\n return pretty_text", "def test_match_entry_to_format(self):\n\n # matches valid entries with valid formats\n for valid_entry in test_case_data.get('valid_entries'):\n entry = [e.strip() for e in valid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertTrue(entry_dict, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [e.strip() for e in invalid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def _check_style(file_path, clang_format_bin):\n with open(file_path, 'r') as f:\n is_valid_header = f.read().startswith(CppFormatter.standard_header)\n\n cmd = [\n clang_format_bin,\n \"-style=file\",\n \"-output-replacements-xml\",\n file_path,\n ]\n result = subprocess.check_output(cmd).decode(\"utf-8\")\n if \"<replacement \" in result:\n is_valid_style = False\n else:\n is_valid_style = True\n return (is_valid_style, is_valid_header)", "def _format (color, style=''):\n _format = QtGui.QTextCharFormat()\n if color != '':\n _format.setForeground(getattr(QtCore.Qt, color))\n if 'bold' in style:\n _format.setFontWeight(QtGui.QFont.Bold)\n if 'italic' in style:\n _format.setFontItalic(True)\n return _format", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def __str__(self) -> str:\n if self.write_back is black.WriteBack.CHECK:\n reformatted = \"would be reformatted\"\n unchanged = \"would be left unchanged\"\n failed = \"would fail to reformat\"\n cleared = \"would be cleared\"\n else:\n reformatted = \"reformatted\"\n unchanged = \"left unchanged\"\n failed = \"failed to reformat\"\n cleared = \"cleared\"\n report = []\n if self.change_count:\n s = \"s\" if self.change_count > 1 else \"\"\n report.append(\n click.style(\n f\"{self.change_count} cell{s} {reformatted}\", bold=True\n )\n )\n if self.same_count:\n s = \"s\" if self.same_count > 1 else \"\"\n report.append(f\"{self.same_count} cell{s} {unchanged}\")\n if self.failure_count:\n s = \"s\" if self.failure_count > 1 else \"\"\n report.append(\n click.style(f\"{self.failure_count} cell{s} {failed}\", fg=\"red\")\n )\n if self.output_change_count:\n s = \"s\" if self.change_count > 1 else \"\"\n report.append(\n click.style(\n f\"{self.output_change_count} output{s} {cleared}\",\n bold=True,\n )\n )\n if self.output_same_count:\n s = \"s\" if self.same_count > 1 else \"\"\n report.append(f\"{self.output_same_count} output{s} {unchanged}\")\n return \", \".join(report) + \".\"", "def format(self, message):", "def rich(text):\n return full(text, False)", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))", "def _get_format_from_style(self, token, style):\n result = QtGui.QTextCharFormat()\n for key, value in style.style_for_token(token).items():\n if value:\n if key == 'color':\n result.setForeground(self._get_brush(value))\n elif key == 'bgcolor':\n result.setBackground(self._get_brush(value))\n elif key == 'bold':\n result.setFontWeight(QtGui.QFont.Bold)\n elif key == 'italic':\n result.setFontItalic(True)\n elif key == 'underline':\n result.setUnderlineStyle(\n QtGui.QTextCharFormat.SingleUnderline)\n elif key == 'sans':\n result.setFontStyleHint(QtGui.QFont.SansSerif)\n elif key == 'roman':\n result.setFontStyleHint(QtGui.QFont.Times)\n elif key == 'mono':\n result.setFontStyleHint(QtGui.QFont.TypeWriter)\n return result", "def testCheckRequiredFormat(self):\n plugin = gdrive_synclog.GoogleDriveSyncLogTextPlugin()\n\n file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()\n file_system_builder.AddFile('/file.txt', (\n b'2018-01-24 18:25:08,454 -0800 INFO pid=2376 7780:MainThread '\n b'logging_config.py:295 OS: Windows/6.1-SP1\\n'))\n\n file_entry = file_system_builder.file_system.GetFileEntryByPath('/file.txt')\n\n parser_mediator = self._CreateParserMediator(None, file_entry=file_entry)\n\n file_object = file_entry.GetFileObject()\n text_reader = text_parser.EncodedTextReader(file_object)\n text_reader.ReadLines()\n\n result = plugin.CheckRequiredFormat(parser_mediator, text_reader)\n self.assertTrue(result)", "def re_format(self):\n return self._re.pattern", "def text(self) -> str:", "def is_valid(self):\r\n for lineedit in self.lineedits:\r\n if lineedit in self.validate_data and lineedit.isEnabled():\r\n validator, invalid_msg = self.validate_data[lineedit]\r\n text = to_text_string(lineedit.text())\r\n if not validator(text):\r\n QMessageBox.critical(self, self.get_name(),\r\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\r\n QMessageBox.Ok)\r\n return False\r\n return True", "def _translate_fmts(self):\n fmt_info = []\n fmt_append = fmt_info.append\n \n isvalid = self._is_valid_fmt\n typlist = self._typlist\n isstrvar = self._isstrvar\n default_fmts = self._default_fmts\n \n for i, fmt in enumerate(self._fmtlist):\n fmt = fmt.strip()\n \n iscalendar = (fmt[1] == 't' or fmt[1:3] == '-t')\n \n if iscalendar or not isvalid(fmt):\n if isstrvar(i):\n wid = min(typlist[i], 10)\n fmt_append(('s', \"{{:>{}s}}\".format(wid), wid))\n continue\n else:\n fmt = default_fmts[typlist[i]]\n \n last_char = fmt[-1]\n if last_char == 's': # string\n m = STR_FMT_RE.match(fmt)\n align, _, wid = m.group(1), m.group(2), m.group(3)\n new_align = (\"<\" if align == \"-\" \n else \"^\" if align == \"~\" else \">\")\n new = \"\".join((\"{:\", new_align, wid, \"s}\"))\n fmt_append(('s', new, int(wid)))\n elif last_char == 'H' or last_char == 'L': # binary\n fmt_append((last_char, fmt, int(fmt[1:-1])))\n elif last_char == 'x': # hexadecimal\n fmt_append(('x', fmt, 21))\n elif last_char in {'f', 'g', 'e', 'c'}: # numeric\n m = NUM_FMT_RE.match(fmt)\n align, _, wid, delim, prec, type, com = (m.group(1), m.group(2), \n m.group(3), m.group(4),\n m.group(5), m.group(6),\n m.group(7))\n aln = \"<\" if align == \"-\" else \">\"\n sep = \",\" if com is not None else \"\"\n if type == \"g\" and int(prec) == 0:\n new = \"\".join((\"{:\", aln, wid, sep, type, \"}\"))\n else:\n new = \"\".join((\"{:\", aln, wid, sep, \".\", prec, type, \"}\"))\n fmt_append((type, new, int(wid), delim, com))\n \n return fmt_info", "def validateWorkFormat(format):\n\n if not(format):\n return \"You must select a work format.\"", "def is_valid(self):\n for lineedit in self.lineedits:\n if lineedit in self.validate_data and lineedit.isEnabled():\n validator, invalid_msg = self.validate_data[lineedit]\n text = to_text_string(lineedit.text())\n if not validator(text):\n QMessageBox.critical(self, self.get_name(),\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\n QMessageBox.Ok)\n return False\n return True", "def help_for(self, flag: str) -> Tuple[str, str]:\n # Obtain arg obj\n if flag not in self.flags:\n err = \"{!r} is not a valid flag for this context! Valid flags are: {!r}\" # noqa\n raise ValueError(err.format(flag, self.flags.keys()))\n arg = self.flags[flag]\n # Determine expected value type, if any\n value = {str: \"STRING\", int: \"INT\"}.get(arg.kind)\n # Format & go\n full_names = []\n for name in self.names_for(flag):\n if value:\n # Short flags are -f VAL, long are --foo=VAL\n # When optional, also, -f [VAL] and --foo[=VAL]\n if len(name.strip(\"-\")) == 1:\n value_ = (\"[{}]\".format(value)) if arg.optional else value\n valuestr = \" {}\".format(value_)\n else:\n valuestr = \"={}\".format(value)\n if arg.optional:\n valuestr = \"[{}]\".format(valuestr)\n else:\n # no value => boolean\n # check for inverse\n if name in self.inverse_flags.values():\n name = \"--[no-]{}\".format(name[2:])\n\n valuestr = \"\"\n # Tack together\n full_names.append(name + valuestr)\n namestr = \", \".join(sorted(full_names, key=len))\n helpstr = arg.help or \"\"\n return namestr, helpstr", "def validate(self, txt, pos):\n state, rpos = qt.QDoubleValidator.validate(self, txt, pos)\n if txt.length() == 0:\n state = qt.QValidator.Acceptable\n return state, rpos", "def test_is(self):\n invalid = self.TDTT()\n self.check_invalid_is(invalid)\n\n valid = self.TDTT(when=self.txt_when)\n self.check_valid_is(valid)", "def label_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),label=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "async def process_input(cls, content: str, description: str) -> tuple[str, str]:\n return content, description", "def set_text_format(\n self, text_format: constants.TextFormatStr | constants.TextFormat\n ) -> Label:\n self.setTextFormat(constants.TEXT_FORMAT.get_enum_value(text_format))\n return self", "def displayText(self, value, locale=None):\n if value is None:\n return \"\"\n\n value = value.toPyObject()\n\n if isinstance(value, Exception):\n result = \"Error\"\n else:\n results = value.xml, value.profile, value.best_practices\n invalid = any(getattr(x, 'is_valid', None) is False for x in results)\n result = \"Invalid\" if invalid else \"Valid\"\n\n return super(ResultsDelegate, self).displayText(result, locale)", "def T(value):\n return (value if isinstance(value, basestring) else\n str(value) if isinstance(value, AltText) else \"\")" ]
[ "0.75339824", "0.75120217", "0.7407906", "0.73783916", "0.73783916", "0.7370748", "0.7224785", "0.71053016", "0.6644924", "0.66019595", "0.6499535", "0.6417826", "0.64168525", "0.6153556", "0.6028552", "0.58961284", "0.5666202", "0.5544326", "0.5529664", "0.552194", "0.55137515", "0.55114615", "0.55114615", "0.55114615", "0.55114615", "0.55114615", "0.5465152", "0.5431987", "0.5386246", "0.5374847", "0.53560525", "0.53351474", "0.53164434", "0.51913273", "0.51773", "0.51389235", "0.5138142", "0.51353323", "0.5131033", "0.5113542", "0.50931895", "0.5087743", "0.50827277", "0.508091", "0.5079896", "0.50728756", "0.506635", "0.50646484", "0.50598276", "0.5051095", "0.50442034", "0.5034121", "0.50227815", "0.5008525", "0.5003917", "0.50033945", "0.5002484", "0.4997127", "0.49916375", "0.49895492", "0.4987343", "0.49745822", "0.49549773", "0.49495748", "0.49483207", "0.49443227", "0.49394208", "0.4935806", "0.49357215", "0.49215195", "0.49159306", "0.49091235", "0.4906869", "0.49031273", "0.48934", "0.48872113", "0.48858744", "0.4877737", "0.4874273", "0.48670974", "0.48637822", "0.48595774", "0.48528624", "0.48524582", "0.485019", "0.4845463", "0.48405242", "0.48361325", "0.48343396", "0.4821164", "0.48117098", "0.48113686", "0.48110485", "0.48102424", "0.4802814", "0.4801128", "0.47997198", "0.47964677", "0.4789616", "0.4788183" ]
0.68091875
8
Return tuple of stored text from edited text and bool validity, using self.format
def storedText(self, editText): try: return (repr(GenNumber().setFromStr(editText, self.format)), True) except GenNumberError: return (editText, not editText and not self.isRequired)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def main(text,result=\"latex\",check_text=True,index_project='',file_name=''): # TODO detect and make a warning for genuine latex marks\n if isinstance(text,str):\n text = text.split('\\n')\n \n if check_text:\n check(text)\n \n print(text)\n \n ### managing placeholders\n text = parsers['v'].main(text)\n \n ### saving names\n if index_project:\n indexer.parse(text,index_project,file_name)\n \n \n for i in range(len(text)):\n line = text[i]\n ### managing end of line\n line = line.replace(\" ,,\",\"\\\\\\\\\")\n \n while line.count(opening_mark):\n first_part, mark, late_part = line.partition(',;')\n if not late_part:\n break\n late_part, text = parsers[late_part[0]].main(late_part = late_part,\n text=text,\n result=result,\n line_nb = i)\n line = first_part + late_part\n text[i] = line\n \n return '\\n'.join(text)", "def get_data_from_nonformat_text():\n pass", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def syntax_text():", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def handle_data(self, text):\n if self.bankacctfrom:\n if self.bankid:\n self.compte['banque'] = text.strip()\n self.bankid = False\n if self.branchid:\n self.compte['guichet'] = text.strip()\n self.branchid = False\n if self.acctid:\n self.compte['compte'] = text.strip()\n self.acctid = False\n if self.banktranlist:\n if self.stmttrn:\n if self.dtposted:\n self.ecriture_tmp['date'] = datetime.strptime(text.strip(), \"%Y%m%d\")\n self.dtposted = False\n if self.trnamt:\n self.ecriture_tmp['montant'] = locale.atof(text.strip())\n self.trnamt = False\n if self.trntype:\n self.ecriture_tmp['type'] = text.strip()\n self.trntype = False\n if self.name:\n self.ecriture_tmp['name'] = text.strip()\n self.name = False\n if self.memo:\n self.ecriture_tmp['memo'] = text.strip()\n self.memo = False", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def process_text(self, text, language):", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def getText(self):", "def process_raw_text(self, file_name, column_side):\n self.mvc_check()\n\n model_txt = None\n if column_side == LEFT_TEXT:\n model_txt = self.model.txt1\n elif column_side == RIGHT_TEXT:\n model_txt = self.model.txt2\n\n model_txt.open_raw(file_name)\n model_txt.process_raw()\n self.opened_txt[column_side] = True\n self.can_align = self.opened_txt[LEFT_TEXT] and self.opened_txt[RIGHT_TEXT]\n\n # Goldsmith\n model_txt.make_trie(column_side)\n model_txt.apply_goldsmith(1.1, 20, column_side)\n\n # Associate word for alignment if both text were opened\n if self.can_align:\n for view in self.views:\n view.end_task()\n view.change_task(\"Associating words\")\n self.model.associate_words(1.5)\n for view in self.views:\n view.end_task()\n\n # TODO : coherent saving to database using model.save_data\n\n return model_txt.str", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def get_edit_text(self):\n # grab edit page\n response = self._get_page('edit.php')\n html = response.text\n # parse out existing plan\n soup = bs4.BeautifulSoup(html, 'html5lib')\n plan = soup.find('textarea')\n if plan is None:\n raise PlansError(\"Couldn't get edit text, are we logged in?\")\n else:\n plan = u'' + plan.contents[0]\n # prepending the empty string somehow prevents BS from\n # escaping all the HTML characters (weird)\n assert type(plan) == str\n # convert to CRLF line endings\n plan = convert_endings(plan, 'CRLF')\n # parse out plan md5\n md5sum = soup.find('input',\n attrs={'name': 'edit_text_md5'}).attrs['value']\n # also, explicitly compute the hash, for kicks\n assert md5sum == plans_md5(plan)\n # verify that username has not changed\n assert self.username == self.parser.username\n return plan, md5sum", "def post_process_text(self, text):\n\t\treturn text", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def postprocess(self, text):\r\n return text", "def _get_and_build_text_structure(self):\n return Text_structure(self.filename, self)", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def get_text(self):", "def get_text(self):\n\n if self.text: return self.text\n # retrieve from args and return if exists\n text = Settings.get_text() or None\n if text: \n self.text = text\n return text\n # prompt skip\n if not Settings.prompt(\"text\"): return None\n question = {\n 'type': 'input',\n 'name': 'text',\n 'message': 'Text:'\n }\n text = prompt(question)[\"text\"]\n # confirm text\n if not Settings.confirm(text): return self.get_text()\n self.text = text\n return self.text", "def get_text(text_input):\r\n return text_input", "def preprocess_text(text: str) -> Tuple[List[str], Dict]:\n raise NotImplementedError", "def _get_delta_text_string(self):\n textstring = \"\"\n if (\n self.is_commit_test is True\n ): # include commits if this is an analysis of commit history\n # Write SHA1 commits under examination\n if len(self.delta_fp_string_dict.delta_dict[\"commits\"]) > 0:\n textstring += (\n os.linesep + \"Commit history SHA1 for this analysis:\" + os.linesep\n )\n for sha1_commit in self.delta_fp_string_dict.delta_dict[\"commits\"]:\n textstring += \" \" + sha1_commit + os.linesep\n textstring += os.linesep\n elif (\n self.is_branch_test is True\n ): # include branches if this is a branch v branch analysis\n if len(self.delta_fp_string_dict.delta_dict[\"branches\"]) > 0:\n textstring += os.linesep + \"Branches under analysis:\" + os.linesep\n for branch in self.delta_fp_string_dict.delta_dict[\"branches\"]:\n textstring += \" \" + branch + os.linesep\n textstring += os.linesep\n\n # include added files\n if len(self.delta_fp_string_dict.delta_dict[\"added\"]) > 0:\n for added_file in self.delta_fp_string_dict.delta_dict[\"added\"]:\n add_append_string = \"[A]:\" + added_file + os.linesep\n textstring += add_append_string\n # include deleted files\n if len(self.delta_fp_string_dict.delta_dict[\"deleted\"]) > 0:\n for deleted_file in self.delta_fp_string_dict.delta_dict[\"deleted\"]:\n del_append_string = \"[D]:\" + deleted_file + os.linesep\n textstring += del_append_string\n # include modified files\n if len(self.delta_fp_string_dict.delta_dict[\"modified\"]) > 0:\n for modified_file in self.delta_fp_string_dict.delta_dict[\"modified\"]:\n mod_append_string = \"[M]:\" + modified_file + os.linesep\n textstring += mod_append_string\n\n return textstring", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def text(self) -> str:", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def get_PoemText(self):\n return self.text if self.text else \"No Text Yet\\n\"", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def get_mark(text, short):\n\n line = text.readline()\n\n # check that the line begins with a valid entry type\n if not short and not re.match(r'^\\s*(text|mark) = \"', line):\n raise ValueError('Bad entry: ' + line)\n\n # read until the number of double-quotes is even\n while line.count('\"') % 2:\n next_line = text.readline()\n\n if not next_line:\n raise EOFError('Bad entry: ' + line[:20] + '...')\n\n line += next_line\n if short:\n pattern = r'^\"(.*?)\"\\s*$'\n else:\n pattern = r'^\\s*(text|mark) = \"(.*?)\"\\s*$'\n entry = re.match(pattern, line, re.DOTALL)\n\n return entry.groups()[-1].replace('\"\"', '\"')", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)", "def refang(self, text: str):", "def as_tuple(self, selected: bool) -> tuple:\n if self.check_ans:\n current = self.num.ljust(2) + self.answer\n else:\n current = self.num.ljust(2)+self.current.rjust(1)\n return ((\n \"fg:\"+self.style_fg + \" bg:\" # apply foreground color\n + (self.style_bg_selected if selected else self.style_bg),\n current\n if self.answer != \".\" else \"▐\" + \"█\" + \"▌\"), ('bg:#fefefe fg:#000', \"│\"))", "def text(value):\n return True", "def preprocess(self, text):\r\n return text", "async def process_input(cls, content: str, description: str) -> tuple[str, str]:\n return content, description", "def rich(text):\n return full(text, False)", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def formatTexts(owned, shared):\n owned_texts = []\n shared_texts = []\n # Catches error if there is no score from the databse search\n try:\n for text in range(len(owned)):\n owned_texts.append(\n {'title': owned[text][0], 'body': owned[text][1], 'score': owned[text][2]})\n for text in range(len(shared)):\n shared_texts.append(\n {'title': shared[text][0], 'body': shared[text][1], 'score': shared[text][2]})\n except:\n for text in range(len(owned)):\n owned_texts.append(\n {'title': owned[text][0], 'body': owned[text][1]})\n for text in range(len(shared)):\n shared_texts.append(\n {'title': shared[text][0], 'body': shared[text][1]})\n # Adds False if the either of the text arrays are empty\n if len(owned_texts) == 0:\n owned_texts.append(False)\n if len(shared_texts) == 0:\n shared_texts.append(False)\n return owned_texts, shared_texts", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''", "def result(target_text):\n\n display_text(target_text)\n readability(target_text)", "def obtain_text():\n pass", "def element_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier))\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def on_text(self, instance, value):\n if not EVENTS['IS_OBJ']:\n EVENTS['EDITOR_SAVED'] = False\n\n if value:\n self.valid_text = True\n EVENTS['IS_RAM_EMPTY'] = False\n else:\n self.valid_text = False", "def getData(self):\n if self.groupMode:\n return (self.nameEdit.text(),\n self.languages[self.groupCombo.currentIndex()][0]\n )\n else:\n return (self.nameEdit.text(),\n self.descriptionEdit.text(),\n self.groupCombo.currentText(),\n self.templateEdit.toPlainText()\n )", "def define_text_type(self):\n # only one text\n if len(self.text) == 1:\n text = self.text[0]\n\n # DIRECTORY\n if os.path.isdir(text):\n # retrieve files\n file_list = []\n\n # only fetch files in this folder\n for path, _, files in os.walk(text):\n if self.recursive is False:\n if path == text:\n for filename in files:\n filepath = Path(f\"{path}/{filename}\")\n file_list.append((filepath, filename))\n\n # recursively fetch all files\n else:\n for filename in files:\n filepath = Path(f\"{path}/{filename}\")\n file_list.append((filepath, filename))\n\n file_list.sort()\n self.input = file_list\n return \"file\"\n\n # SINGLE FILE\n elif os.path.isfile(text):\n filepath = Path(text)\n self.input.append((filepath, None))\n return \"file\"\n\n # STRING\n else:\n self.input.append(text)\n return \"string\"\n\n else:\n # MORE STRINGS\n self.input = self.text\n return \"string\"", "def apply(self, text):", "def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()", "def get_valid_value(self):\n\n text = self.currentText()\n lookup = set(self.itemText(i) for i in range(self.count()))\n if text not in lookup:\n return None\n\n return text", "def __getitem__(self, index):\n txt_seq = self.txt_seqs[index]\n word_id_seq = self._preprocess(txt_seq)\n return word_id_seq, txt_seq", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def label_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),label=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def getMarked(self):\n if not self.selection.isSelection():\n return u\"\"\n sm1, sm2 = self.selection.order(self.selection.selectionMark,\n self.selection.selectionMark2)\n w1 = sm1[0]\n w2 = sm2[0]\n cx1 = sm1[1]\n cx2 = sm2[1]\n if (w1 == w2):\n return w1.string[cx1:cx2]\n # Get the word fragments at the beginning and end of the selection\n snip1 = w1.string[cx1:]\n snip2 = w2.string[:cx2]\n tl1 = w1.tline\n wx1 = tl1.twords.index(w1)\n tl2 = w2.tline\n wx2 = tl2.twords.index(w2)\n # Start the text string with the format of the first line\n text = tl1.para.getFormat() + snip1\n # then get all intervening words\n if (tl1 == tl2): # only 1 line is involved\n # get words from wx1+1 to wx2-1 (incl.)\n for w in tl1.twords[wx1+1:wx2]:\n text += u\" \" + w.string\n ch = u\" \"\n\n else: # deletion block covers >1 line\n # get words from wx1+1 to end of paragraph\n for w in tl1.twords[wx1+1:]:\n text += u\" \" + w.string\n # get all the intervening lines\n while True:\n para = tl1.para\n tl1 = self.rsubject.nextLine(tl1)\n if (tl1.para == para):\n text += u\" \"\n else:\n text += u\"\\n\" + tl1.para.getFormat()\n if (tl1 == tl2): break\n text += tl1.getText()\n\n ch = u\"\"\n # Add the remaining words in tl2 up to w2-1\n for w in tl2.twords[:wx2]:\n text += ch + w.string\n ch = u\" \"\n\n # Add the fragment of the last marked word\n return text + ch + snip2", "def get_docdata(self, doc):\n try:\n text = doc.data['originalText'] # grab original full text\n except:\n text = 'no text in doc'\n\n try:\n auths = self.authorize(doc) # a list of authors\n except:\n auths = []\n\n return text, auths", "def process_word(self, str_word, column_side):\n self.mvc_check()\n\n if self.can_align:\n processed_txt = None\n translated_txt = None\n if column_side == LEFT_TEXT:\n processed_txt = self.model.txt1\n translated_txt = self.model.txt2\n elif column_side == RIGHT_TEXT:\n processed_txt = self.model.txt2\n translated_txt = self.model.txt1\n\n word_info = processed_txt.gold.build_word_info(str_word)\n if str_word in processed_txt.data:\n # the selected word is a regular word, just display information\n align_dist, align_str = self.model.dist_word(str_word)\n word_info_aligned = translated_txt.gold.build_word_info(align_str)\n return processed_txt[str_word], translated_txt[align_str], word_info, word_info_aligned\n else:\n # TODO : a new entry, compute everything, but for now, raise an error\n raise WordNotInDatabase\n raise DataNotProcessed", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def reformat_text(self, text):\n xml = BeautifulSoup(text)\n self.remove_header_and_footer(xml)\n self.process_superscripts(xml)\n self.remove_footnotes(xml)\n text = xml.get_text() # Strip XML tags.\n text = self.join_hyphenated_words(text)\n text = self.remove_linebreaks(text)\n return text", "def __getitem__(self, index):\n txt_seq = self.txt_seqs[index]\n word_id_seq, punc_id_seq = self._preprocess(txt_seq)\n return word_id_seq, punc_id_seq", "def getTextUnits_old(filename):\n doc = Document(filename)\n units = list()\n \n unit_tracker = defaultdict(int)\n \n non_units = [\"name:\", \"date:\", \"date\", \"series\", \"transcriber\", \"thesis:\", \"currently:\", \"note\", \"comment\", \"grandparents:\", \"transcript:\", \"note:\"]\n\n ongoing_answer = \"\"\n\n \n\n # iterate over all paragraphs to get text units\n for para in doc.paragraphs:\n paragraph = para.text\n # ensure it is not an empty line\n if len(paragraph.strip())>0:\n # get first word\n formatted_para = paragraph.lstrip()\n unit_type = formatted_para.partition(' ')[0]\n # in case it is in the format of e.g 'George Salton:'\n \n # e.g AJ:, B.\n m = re.compile('[A-Z][A-Z]?[.|:|-]')\n type1 = m.match(unit_type)\n\n # e.g [WJ]\n n = re.compile('\\[[A-Z][A-Z]\\]')\n typer2= n.match(unit_type)\n\n # timestamp e.g 15:01:27\n o = re.compile('[0-9]?[0-9]:[0-9][0-9]:[0-9][0-9]')\n type3 = o.match(unit_type) \n \n \n \n\n\n # else parse them according to formatting guidelines\n if (\"Question:\" in unit_type or\n type1 or\n \"Answer:\" in unit_type or \n typer2 or\n type3):\n\n # check if there was an ongoing paragraph\n #units.append({'unit': paragraph})\n \n if ongoing_answer: \n units.append({'unit': ongoing_answer})\n\n # reset it\n ongoing_answer = \"\"\n ongoing_answer += paragraph\n \n # update tracker\n unit_tracker[unit_type] += 1\n\n elif (unit_type.endswith(':') and\n unit_type.lower() not in non_units and\n unit_type[:-1].isalpha()):\n \n \n units.append({'unit': paragraph})\n # update tracker\n unit_tracker[unit_type] += 1\n \n \n # backup method,in case it is in the format of e.g 'George Salton:'\n elif len(paragraph.split()) > 3:\n backup_type = paragraph.split()[1]\n backup_two = paragraph.split()[2]\n\n if ((':' in backup_type or backup_type.lower() not in non_units) or\n (':' in backup_two or backup_two.lower() not in non_units)): \n \n if ((paragraph.strip()[0].islower() and len(paragraph.strip()) > 5) or (paragraph.strip()[-1] in ['.','!','?'])) and len(units) >0:\n units[-1]['unit']=units[-1]['unit']+ ' '+paragraph\n # update tracker\n unit_tracker[unit_type] += 1\n else:\n units.append({'unit':paragraph})\n unit_tracker[unit_type] += 1\n # if it is none of these cases, maybe there is an ongoing answer\n \n elif (ongoing_answer and ongoing_answer != paragraph):\n \n if not any(non_unit in paragraph.lower() for non_unit in non_units):\n ongoing_answer += paragraph\n else:\n units.append({'unit':paragraph})\n\n if len(unit_tracker) < 2:\n return []\n \n return units", "def text_example():\n \n text_store = \"01000001011000010010000001000010011000100000110100001010001100010011001000110011\"\n text.delete('1.0', tk.END) \n text.insert(tk.END, text_store) \n box=tk.Tk()\n m = tk.Message(box, text=\"You should be able to save this file and open it in a text editor like Notepad or Nano to read it. If you edit the values you may find it does not display properly as text. Unchanged, it should be interpreted by a text editor as:\\n\\nAa Bb\\n123\\n\\nAs the file was made on a Windows machines you may find other systems display the line breaks differently.\")\n m.config(padx=50, pady=50, width=350)\n m.pack()", "def reparseText(parsed):\n out = []\n buffer = ''\n for type, data in parsed:\n if type is RAW:\n buffer += data\n else:\n if buffer:\n b = re.sub(r'\\s+', ' ' , buffer)\n out.append((RAW, b))\n buffer = ''\n out.append((type,data))\n if buffer:\n b = re.sub(r'\\s+', ' ' , buffer)\n out.append((RAW, b))\n return out", "def getParameters(self):\n if self.idButton.isChecked():\n rev = self.idEdit.text()\n elif self.tagButton.isChecked():\n rev = self.tagCombo.currentText()\n elif self.branchButton.isChecked():\n rev = self.branchCombo.currentText()\n elif self.remoteBranchButton.isChecked():\n rev = self.remoteBranchCombo.currentText()\n else:\n rev = \"\"\n \n return (\n rev,\n self.commitGroupBox.isChecked(),\n self.commitMessageEdit.toPlainText(),\n self.addLogCheckBox.isChecked(),\n self.diffstatCheckBox.isChecked(),\n )", "def post_get_convert(self, site, getText):\n return getText", "def get_row_input_text(self, row_idx):\n return self.row_items[row_idx][1].get()", "def get_version_info() -> Tuple[Text, Text]:", "def displayText():\n global entryWidget,entryWidget1,entryWidget2,entryWidget3,entryWidget4 ,entryWidget5,entryWidget6\n global thefilename,itrial,do_stim, delaylen,ntest_arms,stop_if_error,timeout_arm_sec\n thefilename=entryWidget.get().strip()\n itrial=entryWidget1.get().strip()\n do_stim=entryWidget2.get().strip()\n delaylen=entryWidget3.get().strip()\n ntest_arms=entryWidget4.get().strip()\n stop_if_error=int(entryWidget5.get().strip())==1 # convert to logical\n print 'stop_if_error is ', stop_if_error\n\n\n timeout_arm_sec=entryWidget6.get().strip()\n root.destroy()\n return thefilename,itrial,do_stim,delaylen,ntest_arms,stop_if_error,timeout_arm_sec", "def addText(self, text, textxy, textargs=None):\n if textargs is None:\n textargs = {}\n restore = []\n restore.append({'text': self.attr('text'),\n 'textxy': self.attr('textxy'),\n 'textargs': self.attr('textargs')})\n text, textxy, textargs = self._checkValidTextInput(text, textxy, textargs)\n attrs = {'text': text, 'textxy': textxy, 'textargs': textargs}\n if self.attr('text', None) is None:\n self.update(attrs)\n else: # need to merge existing with new\n self.checkValidText() # makes sure existing info are as lists\n for key in attrs:\n self.update({key: self.attr(key) + attrs[key]})\n self.checkValidText()\n return restore", "def verify(self):\n\n # tekstlig testing om koden fungerer\n text = self.klar_tekst_start + \" ble sendt til mottaker som krypteringen \" + \\\n self.crypto + \".\\nMottaker dekrypterte dette til \" + self.klar_tekst_slutt\n\n return text", "def reformat(ctx):\n pass", "def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text", "def text(self, tid, lang):\n csv_paths = [\n os.path.join(self.config.csv.base, self.config.csv.path.texts),\n os.path.join(self.config.csv.base, self.config.csv.path.texts_patch)\n ]\n _text = None\n while _text is None:\n for csv_path in csv_paths:\n with open(csv_path, encoding=\"utf8\") as f:\n texts_reader = csv.DictReader(f)\n for row in texts_reader:\n keys = ['v', ' ']\n for key in keys:\n if key in row.keys():\n if row.get(key) == tid:\n s = row[lang]\n _text = s.replace('\\q', '\\\"')\n if _text is None:\n _text = ''\n\n return _text", "def formatOutput(self, storedText, titleMode, internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = ['<img src=\"%s\">' % escape(url, treedoc.escDict) for url\n in paths]\n return u'<br />'.join(results)", "def handleFormatText(paragraphContent):\n # We tokenize and remove the stop word\n words = tokenizeWord(paragraphContent) \n \n stemWords = []\n # We loop on each word.\n for word in words:\n stemWord = STEMMER.stem(word)\n \n # Selection on a part of string.\n stemWord = re.sub(\"[*\\'\\.+:,\\`:/]\", '', stemWord)\n if stemWord.isdigit() or len(stemWord) < 2:\n continue\n \n stemWords.append(stemWord)\n my_r_string = stemWords.pop(0)\n for word in stemWords:\n my_r_string += \" \"+str(word)\n return my_r_string", "def create_message_text(self):\n\n if self.text_received in configs.word_dict:\n return configs.word_dict[self.text_received]\n else:\n return 'Please send either the word ‘pizza’ or ‘ice cream’ for a different response'", "def get_text(self):\n return self.rule_id + '\\t' + self.rule_text" ]
[ "0.7803675", "0.77552223", "0.7596814", "0.7596814", "0.73449266", "0.7324226", "0.7316339", "0.7187415", "0.71592367", "0.6829843", "0.67425257", "0.6727259", "0.66042066", "0.6263688", "0.6086649", "0.5922825", "0.58909976", "0.58909976", "0.58909976", "0.58909976", "0.58909976", "0.587481", "0.58734727", "0.5806829", "0.5729706", "0.57161516", "0.56222916", "0.55517334", "0.55221933", "0.55142266", "0.5462706", "0.5446586", "0.54081523", "0.53653085", "0.535233", "0.5335211", "0.52871853", "0.5260944", "0.523887", "0.5213082", "0.5212169", "0.5187142", "0.51858795", "0.51732826", "0.5169958", "0.51650834", "0.516486", "0.5158107", "0.5148129", "0.51450187", "0.514131", "0.5139042", "0.51371616", "0.5136852", "0.51105696", "0.5104157", "0.50990427", "0.5076887", "0.5067613", "0.5065606", "0.5051904", "0.5029731", "0.5027122", "0.5023285", "0.5006465", "0.5003772", "0.4982362", "0.4980327", "0.4976877", "0.49736816", "0.49548814", "0.49530527", "0.4949283", "0.49441183", "0.49374625", "0.4936495", "0.49151513", "0.4915037", "0.49054784", "0.49002603", "0.48973164", "0.4896649", "0.48962077", "0.48902816", "0.488993", "0.48881322", "0.48868853", "0.4876415", "0.48740125", "0.48639", "0.48580468", "0.48563927", "0.48558256", "0.48514247", "0.4838778", "0.48338887", "0.4833695", "0.48289192", "0.48288813", "0.482844" ]
0.68767637
9
Return value to be compared for sorting and conditionals
def sortValue(self, data): storedText = data.get(self.name, '') try: return GenNumber(storedText).num except GenNumberError: return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare(self, value: int, /) -> None:", "def compare(self) -> int:", "def compareFunction( self, first, second ):\n for ascending,column in self.sortOrder:\n aValue,bValue = column.get(first),column.get(second)\n diff = cmp(aValue,bValue)\n if diff:\n if not ascending:\n return - diff \n else:\n return diff \n return 0", "def cmpValue(subInfo1, subInfo2):\n val1 = subInfo1[VALUE]\n val2 = subInfo2[VALUE]\n return val1 > val2", "def cmpValue(subInfo1, subInfo2):\n val1 = subInfo1[VALUE]\n val2 = subInfo2[VALUE]\n return val1 > val2", "def compare(a, b):\n if a > b:\n return a\n return b", "def _get_comparison_func(self, adjective):\n return self.SONG_ADJECTIVES.get(adjective, {}).get(\"comparison\")", "def item_comparer(self):\n return self.item_comparer_value", "def compare(a,b):\r\n if a>b:\r\n return 1\r\n elif a==b:\r\n return 0\r\n else:\r\n return -1", "def compare(self, variable):\n if (not self.lower and variable > self.value) or \\\n (self.lower and variable < self.value):\n return pt.common.Status.SUCCESS\n return pt.common.Status.FAILURE", "def operator(self, sort):\r\n return None", "def comparison(self):\n return self._comparison", "def _cmp(a, b): # pylint: disable=invalid-name\n return (a > b) - (a < b)", "def less_than_or_equal(self) -> global___Expression:", "def __cmp__(self,o):\n\t\tif o != None:\n\t\t\treturn cmp(self.value,o.value)\n\t\telse:\n\t\t\treturn cmp(self.value,0)", "def __cmp__(self, x):\n if self.score < x.score: return -1\n elif self.score == x.score: return 0\n else: return 1", "def comparison(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"comparison\")", "def _compare(self, value, target):\n result = getattr(self.reg, target) - value\n self.reg.N = result >> 7\n self.reg.C = getattr(self.reg, target) >= value\n self.reg.Z = result == 0", "def __ge__( self, value ):\r\n\t\treturn ( self > value ) or ( self == value )", "def test_key_predicate(datum):\n return 0 < datum", "def cmp(x, y):\n return (x > y) - (x < y)", "def _less_than_or_equal_to_op(spec):", "def cmp(a, b):\n return (a > b) - (a < b)", "def conditional_value(self) -> global___Expression.ConditionalOperator:", "def cmp(x, y):\n return (x > y) - (x < y)", "def less(value, other):\n return value > other", "def _default_eval_func(a, b):\n emphasis = \"r2\"\n a_value = getattr(a, emphasis)\n b_value = getattr(b, emphasis)\n return a_value > b_value", "def comparison(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"comparison\")", "def comparison(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"comparison\")", "def comparison(self) -> str:\n return self._values.get('comparison')", "def greater_than_or_equal(self) -> global___Expression:", "def cmp ( self, object1, object2 ):\n return cmp( self.get_raw_value( object1 ),\n self.get_raw_value( object2 ) )", "def _test_method_sorter(_, x, y):\n if x == 'test_gc':\n return 1\n if y == 'test_gc':\n return -1\n if x > y:\n return 1\n if x < y:\n return -1\n return 0", "def greater(value, other):\n return value < other", "def _greater_than_or_equal_to_op(spec):", "def compare(a, b, larger_is_better):\n\n if larger_is_better:\n return a > b\n else:\n return a < b", "def cmp(x, y):\n if x == y:\n return 0\n elif x is None:\n if y is None:\n return 0\n else:\n return -1\n elif y is None:\n return 1\n else:\n # TODO: consider casting the values to string or int or floats?\n # note that this is the minimal replacement function\n return (x > y) - (x < y)", "def compare(x, y):\n if x >= y:\n return 1.0\n else:\n return 0.0", "def __gt__(self, value):\n self = self.__ge__(value)\n return self.__invert__()", "def __gt__(self, other):\n\t\ttry:\n\t\t\treturn self.val > other.val\n\t\texcept:\n\t\t\treturn self.val > other", "def _custom_sorter(self, key1, key2):\n\n col = self._col\n ascending = self._colSortFlag[col]\n real = self.get_real_col(col)\n item1 = self.itemDataMap[key1][real]\n item2 = self.itemDataMap[key2][real]\n\n # Internationalization of string sorting with locale module\n if isinstance(item1, str) and isinstance(item2, str):\n cmpVal = locale.strcoll(item1, item2)\n elif isinstance(item1, bytes) or isinstance(item2, bytes):\n cmpVal = locale.strcoll(str(item1), str(item2))\n else:\n cmpVal = cmp(item1, item2)\n\n # If the items are equal, then pick something else to make the sort value unique\n if cmpVal == 0:\n cmpVal = cmp(*self.GetSecondarySortValues(col, key1, key2))\n\n if ascending:\n return cmpVal\n else:\n return -cmpVal", "def _comparison_function(comp, value=0.0, **kwargs):\n if comp == 'g' or comp == '>':\n func = np.greater\n elif comp == 'ge' or comp == '>=':\n func = np.greater_equal\n elif comp == 'l' or comp == '<':\n func = np.less\n elif comp == 'le' or comp == '<=':\n func = np.less_equal\n elif comp == 'e' or comp == '=' or comp == '==':\n func = np.equal\n elif comp == 'ne' or comp == '!=':\n func = np.not_equal\n else:\n raise ValueError(\"Unrecognized comparison '{}'.\".format(comp))\n\n def comp_func(xx):\n return func(xx, value, **kwargs)\n\n return comp_func", "def adjustedCompareValue(self, value):\n return value", "def foo_2(x, y):\n\tif x > y:\n\t\treturn x\n\treturn y", "def cmp(x, y):\n if x + y > y + x: return 1\n elif x + y == y + x: return 0\n else: return -1", "def test_get_sort_value_with_after_object(self):\n test_object = self.test.datum_type2\n actual = test_object.get_sort_value(after_object=self.test.datum_type1)\n expected = 10101\n self.assertEqual(expected, actual)", "def statusCompare (x, y):\n xs = db.status.get(x, 'order')\n ys = db.status.get(y, 'order')\n c = float(xs) - float(ys)\n if c >= 0.0: \n return int(c)\n else:\n return -int(abs(c))", "def sortValue(self, data):\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''", "def _default_eval_func(a, b):\n emphasis = \"accuracy\"\n a_value = getattr(a, emphasis)\n b_value = getattr(b, emphasis)\n return a_value > b_value", "def __cmp__(self, other) :\n if self.strength > other.strength:\n return 1;\n elif self.strength == other.strength :\n if self.rank > other.rank :\n return 1;\n elif self.rank == other.rank :\n return 1 if self.kickers > other.kickers else -1 if self.kickers < other.kickers else 0;\n return -1;", "def best_value(self):\r\n return self._best_value", "def get_result(mishkaScore: int, chrisScore: int) -> bool:\n if mishkaScore > chrisScore:\n return \"M\"\n if mishkaScore < chrisScore:\n return \"C\"\n return \"D\"", "def getValue(self):\n if self.left.getValue() >= self.right.getValue():\n return self.left.getValue()\n\n return self.right.getValue()", "def compare(num1, num2):\n if num1 > num2:\n return num1, num2\n return num2, num1", "def decide():", "def compare(self, *args):\n return _ida_hexrays.creturn_t_compare(self, *args)", "def lt_success_func(target, result):\n if result is None:\n return False\n return result < target", "def getValue(self):\n r = 1 if self.left.getValue() > self.right.getValue() else 0\n return r", "def test_get_sort_value_without_after_object(self):\n test_object = self.test.datum_type2\n actual = test_object.get_sort_value()\n expected = 10101\n self.assertEqual(expected, actual)", "def compare(self, operator, value, **kw):\n\n return operator(self.comparator, value)", "def assembly_compare(x, y) :\n if x.kinf() < y.kinf() :\n return 1\n elif x.kinf() == y.kinf() :\n return 0\n else : #x.resultType < y.resultType\n return -1", "def _cmp(x, y):\n if x[1].count > y[1].count:\n return CmpRelation.GREATER\n if x[1].count < y[1].count:\n return CmpRelation.LESS\n if x[1].ptn_length < y[1].ptn_length:\n return CmpRelation.GREATER\n if x[1].ptn_length > y[1].ptn_length:\n return CmpRelation.LESS\n return CmpRelation.EQUAL", "def comparator(self):\n return self.get_scores()", "def getValue(self):\n if self.left.getValue() <= self.right.getValue():\n return self.left.getValue()\n\n return self.right.getValue()", "def __cmp__(self, other):\n \n result = cmp(self.value, other.value)\n if result == 0:\n \"\"\"Values are identical, suits differ. Doesn't affect ranking in\n any way.\"\"\"\n result = cmp(self.suit, other.suit)\n return result", "def getValue(self):\n r = 1 if self.left.getValue() <= self.right.getValue() else 0\n return r", "def gt_success_func(target, result):\n if result is None:\n return False\n return result > target", "def compare(a, b):\n return a - b", "def getValue(self):\n r = 1 if self.left.getValue() >= self.right.getValue() else 0\n return r", "def getValue(self):\n r = 1 if self.left.getValue() < self.right.getValue() else 0\n return r", "def compare(self, comp_els):\n return min(comp_els, key= lambda x: x[1])[0]", "def compare(self, *args):\n return _ida_hexrays.cwhile_t_compare(self, *args)", "def comparator_converter(self, val):\r\n return val", "def _greater_than_op(spec):", "def cmp(a, b):\n if a is None and b is None:\n return 0\n elif a is None:\n return -1\n elif b is None:\n return 1\n else:\n return (a > b) - (a < b)", "def _slack_get_value(slack_response, search_value, search_field, return_field, classifier):\n if not slack_response['ok']:\n return False\n for item in slack_response[classifier]:\n if search_field in item and search_value == item[search_field] and return_field in item:\n return item[return_field]", "def _less_than_op(spec):", "def compare(self, comp_els):\n return max(comp_els, key=lambda x: x[1])[0]", "def item_comparer(self, value):\n self.item_comparer_value = value", "def compare_to(self, other) -> int:\n if self.id == other.id:\n return 0\n if self.status != other.status:\n return -1 if self.status < other.status else 1\n if self.last_played != other.last_played:\n return -1 if self.last_played < other.last_played else 1\n return -1 if self.id < other.id else 1", "def value(self) -> bool:", "def compare_entities(e1, e2):\n sp1 = e1.sorting_priority\n sp2 = e2.sorting_priority\n if sp1 > sp2:\n return 1\n elif sp1 == sp2:\n return 0\n else:\n return -1", "def try_compare(obj, key, comparison, search_value, override_value=\"\"):\n value = override_value if override_value else obj[key]\n try:\n return getattr(value, comparison)(search_value)\n except KeyError:\n return False\n except Exception as e:\n logging.warning('The following exception was ignored in {0}: {1}'.format(try_compare.__name__, e))", "def compare(first, second):\n for i in data:\n if(i['name'] == first ):\n first_num = i['follower_count']\n if(i['name'] == second):\n second_num = i['follower_count']\n if first_num > second_num:\n return 'a'\n else:\n return 'b'", "def compare(self, *args):\n return _ida_hexrays.fnumber_t_compare(self, *args)", "def fn_if(self, value):\n\n condition_name, true_value, false_value = value\n if self.parser.conditions.evaluate(condition_name):\n return true_value\n else:\n return false_value", "def compare(self, *args):\n return _ida_frame.stkpnt_t_compare(self, *args)", "def compare(self, *args):\n return _ida_hexrays.cnumber_t_compare(self, *args)", "def __cmp__(self, other):\n \n result = cmp(self.rank(), other.rank())\n if (result == 0):\n # Compare hand values\n for i in range(len(self.values())):\n result = cmp(self.values()[i], other.values()[i])\n if (result != 0):\n return result\n return result", "def __cmp__(self, other):\n if options.rank_by.lower() != \"money\":\n \"\"\"flags ▲, money ▲, hints ▼, time ▼\"\"\"\n this, that = len(self.flags), len(other.flags)\n if this == that:\n this, that = self.money, other.money\n if this == that:\n this, that = len(other.hints), len(self.hints)\n if this == that:\n this, that = other.last_scored(), self.last_scored()\n else:\n \"\"\"money ▲, hints ▼, time ▼, flags ▲\"\"\"\n this, that = self.money, other.money\n if this == that:\n this, that = len(other.hints), len(self.hints)\n if this == that:\n this, that = other.last_scored(), self.last_scored()\n if this == that:\n this, that = len(self.flags), len(other.flags)\n if this < that:\n return 1\n elif this == that:\n return 0\n else:\n return -1", "def ge_success_func(target, result):\n if result is None:\n return False\n return result >= target", "def le(self, val):\n\t\treturn LessOrEquals(self, val)", "def compareAUTOR(offense1, offense2):\n \n if (offense1 == offense2):\n return 0\n elif (offense1 > offense2):\n return 1\n else:\n return -1", "def ge(self, val):\n\t\treturn GreaterOrEquals(self, val)", "def __lt__(self, rs):\n Number.comparisons += 1\n result = self.data < rs.data\n return result", "def __lt__(self, value):\n return self.name < value.name", "def answer_sorter(thing):\r\n try:\r\n return float(thing[0])\r\n except ValueError:\r\n # Put all non-numerical answers first.\r\n return float('-inf')", "def comparator(self) -> typing.Callable[[Vec, Vec, Term], bool]:\n pass", "def __value_of(sentiment):\n if sentiment == 'positive': return 1\n if sentiment == 'negative': return -1\n return 0", "def comparison(op):\n def comp(*args):\n if args:\n item = args[0]\n for o in args[1:]:\n if op(item, o):\n item = o\n else:\n return Boolean(False)\n return Boolean(True)\n else:\n return Boolean(True)\n return comp", "def GetPriorityValue(self, *args, **kwargs):\n pass" ]
[ "0.68654275", "0.68611443", "0.66724616", "0.6351402", "0.6351402", "0.63162804", "0.6138383", "0.6053028", "0.6039845", "0.5910521", "0.58821", "0.58723485", "0.5849007", "0.5812883", "0.5799768", "0.57915914", "0.5790346", "0.57894623", "0.5771585", "0.5759572", "0.5746019", "0.5740628", "0.5739194", "0.5738822", "0.57271117", "0.57232124", "0.5721245", "0.5719918", "0.5719918", "0.5710413", "0.5666354", "0.56614244", "0.5653205", "0.56457806", "0.56438506", "0.562456", "0.559111", "0.5568418", "0.554077", "0.55370283", "0.55257344", "0.5520409", "0.5503359", "0.550171", "0.5500652", "0.5496061", "0.5495651", "0.5488403", "0.5479606", "0.54749304", "0.5464671", "0.5463505", "0.54627633", "0.5460634", "0.5458287", "0.5454057", "0.54538625", "0.544756", "0.5444319", "0.5443308", "0.5440087", "0.54264873", "0.5423893", "0.5420949", "0.5419035", "0.5415935", "0.54151773", "0.5401301", "0.5401075", "0.54007626", "0.5400393", "0.53982997", "0.5398238", "0.53902316", "0.5387147", "0.53787", "0.53642255", "0.5362832", "0.53625894", "0.53396165", "0.5337684", "0.5337436", "0.5336828", "0.53298044", "0.5327047", "0.5311193", "0.53040814", "0.5295887", "0.5295521", "0.5292093", "0.5285901", "0.5283696", "0.5282874", "0.52811736", "0.52635443", "0.5261918", "0.5257477", "0.52570784", "0.52501166", "0.52414715", "0.52397144" ]
0.0
-1
Any format, prefix, suffix, html info in attrs dict
def __init__(self, name, attrs={}): TextFormat.__init__(self, name, attrs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def html_attrs(attrs):\n html = \"\"\n for a in attrs.items():\n if a[1]:\n html = html + \"%s=\\\"%s\\\" \"%(a)\n return html", "def attr(*attrs: ATTRIBUTE) -> str:\n return PyHTML.attr(*attrs)", "def attrs(context):\n result = \"\"\n for key, value in context.flatten().items():\n if key not in [\"True\", \"False\", \"None\", \"content\", \"element\"]:\n if \"hx_\" in key:\n key = key.replace(\"_\", \"-\")\n result += f' {key}=\"{value}\"'\n return mark_safe(result)", "def __get_attr_format (self, attrs):\r\n format = { \r\n 'editor': None,\r\n 'min': None,\r\n 'max': None,\r\n 'step': None,\r\n 'subtype': None,\r\n 'flags': None,\r\n 'enums': None\r\n }\r\n\r\n for attr in attrs: \r\n attr_type = attr[\"type\"]\r\n if \"editor\" == attr_type:\r\n format['editor'] = attr[\"value\"] \r\n if \"min\" == attr_type:\r\n format['min'] = attr[\"value\"] \r\n if \"max\" == attr_type:\r\n format['max'] = attr[\"value\"] \r\n if \"default\" == attr_type:\r\n format['default'] = attr[\"value\"] \r\n if \"step\" == attr_type:\r\n format['step'] = attr[\"value\"]\r\n if \"subtype\" == attr_type:\r\n format['subtype'] = attr[\"value\"]\r\n if \"flags\" == attr_type:\r\n format['flags'] = attr['value']\r\n if \"enums\" == attr_type:\r\n format['enums'] = attr['value']\r\n\r\n return format", "def _formatAttributes(self, attr=None, allowed_attrs=None, **kw):\n\n # Merge the attr dict and kw dict into a single attributes\n # dictionary (rewriting any attribute names, extracting\n # namespaces, and merging some values like css classes).\n attributes = {} # dict of key=(namespace,name): value=attribute_value\n if attr:\n for a, v in attr.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n if kw:\n for a, v in kw.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n\n # Add title attribute if missing, but it has an alt.\n if ('html', 'alt') in attributes and ('html', 'title') not in attributes:\n attributes[('html', 'title')] = attributes[('html', 'alt')]\n\n # Force both lang and xml:lang to be present and identical if\n # either exists. The lang takes precedence over xml:lang if\n # both exist.\n #if ('html', 'lang') in attributes:\n # attributes[('xml', 'lang')] = attributes[('html', 'lang')]\n #elif ('xml', 'lang') in attributes:\n # attributes[('html', 'lang')] = attributes[('xml', 'lang')]\n\n # Check all the HTML attributes to see if they are known and\n # allowed. Ignore attributes if in non-HTML namespaces.\n if allowed_attrs:\n for name in [key[1] for key in attributes if key[0] == 'html']:\n if name in _common_attributes or name in allowed_attrs:\n pass\n elif name.startswith('on'):\n pass # Too many event handlers to enumerate, just let them all pass.\n else:\n # Unknown or unallowed attribute.\n err = 'Illegal HTML attribute \"%s\" passed to formatter' % name\n raise ValueError(err)\n\n # Finally, format them all as a single string.\n if attributes:\n # Construct a formatted string containing all attributes\n # with their values escaped. Any html:* namespace\n # attributes drop the namespace prefix. We build this by\n # separating the attributes into three categories:\n #\n # * Those without any namespace (should only be xmlns attributes)\n # * Those in the HTML namespace (we drop the html: prefix for these)\n # * Those in any other non-HTML namespace, including xml:\n\n xmlnslist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if not k[0]]\n htmllist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] == 'html']\n otherlist = ['%s:%s=\"%s\"' % (k[0], k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] and k[0] != 'html']\n\n # Join all these lists together in a space-separated string. Also\n # prefix the whole thing with a space too.\n htmllist.sort()\n otherlist.sort()\n all = [''] + xmlnslist + htmllist + otherlist\n return ' '.join(all)\n return ''", "def get_format_attrs(self, name, field, alt_field_info={}):\n # important_props = ('initial', 'autofocus', 'widget')\n if name in alt_field_info:\n field = deepcopy(field)\n for prop, value in alt_field_info[name].items():\n setattr(field, prop, value)\n initial = field.initial\n initial = initial() if callable(initial) else initial\n attrs, result = {}, []\n if initial and not isinstance(field.widget, Textarea):\n attrs['value'] = str(initial)\n data_val = self.form.data.get(get_html_name(self.form, name), None)\n if data_val not in ('', None):\n attrs['value'] = data_val\n attrs.update(field.widget_attrs(field.widget))\n result = ''.join(f'{key}=\"{val}\" ' for key, val in attrs.items())\n if getattr(field, 'autofocus', None):\n result += 'autofocus '\n if issubclass(self.form.__class__, FormOverrideMixIn):\n # TODO: Expand for actual output when using FormOverrideMixIn, or a sub-class of it.\n result += '%(attrs)s' # content '%(attrs)s'\n else:\n result = '%(attrs)s' + result # '%(attrs)s' content\n return result", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def render_attr(key, value, attr_format='{key}=\"{value}\"'):\n\n if not key or ' ' in key:\n raise InvalidAttribute('Invalid name \"{}\"'.format(key))\n\n if value:\n if type(value) is RawNode:\n value = str(value)\n else:\n value = html.escape(str(value))\n\n return attr_format.format(key=key, value=value)\n\n return key", "def make_attrs(self, mixed):\n if isinstance(mixed, dict):\n return ''.join('%s=\"%s\" ' % (k, v) for k, v in mixed.items())\n return str(mixed)", "def gen_tag_attrs(self, *a, **kw):\n return gen_tag_attrs(self, *a, **kw)", "def attributes(self):\n _attrs = [\"label\"]\n if self.attrs:\n _attrs.append(\"attrs\")\n return _attrs", "def back_to_tag(tag, attrs):\n sol = '<' + tag\n for (prop, val) in attrs:\n sol += ' ' + prop + '=\"' + val + '\"'\n sol += '>'\n return sol", "def add_attrs(value, arg):\n try:\n # Split list on comma\n kv_pairs = arg.split(\",\")\n except ValueError:\n raise template.TemplateSyntaxError(\n \"add_attrs requires as an argument a string in the format 'key:value, key1:value1, key2:value2...'\"\n )\n\n\n # Create dictionary\n html_attrs = dict()\n\n # Clean items and add attribute pairs to dictionary\n for item in kv_pairs:\n item = item.strip()\n k, v = item.split(\":\")\n html_attrs.update({k.strip():v.strip()})\n\n return value.as_widget(attrs=html_attrs)", "def _wrap_attr(attr):\n return '={0},'.format(attr)", "def string_for_attrs(attrs):\n if not attrs: return ''\n return ''.join(' %s=\"%s\"' % (attr, value) for attr, value in attrs)", "def attr(*attrs: ATTRIBUTE) -> str:\n formatted = []\n for attr_ in attrs:\n if isinstance(attr_, str):\n formatted.append(attr_)\n elif isinstance(attr_, tuple) and len(attr_) == 2:\n formatted.append(f'{attr_[0]}=\"{attr_[1]}\"')\n else:\n raise ValueError(f\"Bad attribute: {attr_}\")\n return \" \".join(formatted)", "def attrs(self):\n return self.size, self.propSuffix, self.specified", "def attrs(**kwds):\n\n def decorate(f):\n for k in kwds:\n setattr(f, k, kwds[k])\n return f\n\n return decorate", "def dot_node_attrs(self):\n\n lbl_name = '%s' % self.format_name(True, True, 24)\n lbl_acc = '<font point-size=\"8.0\">%s</font>' % self.format_id()\n label = self.node_label_fmt % (self.url(), self.name,\n lbl_name, lbl_acc)\n\n node_attrs = {'label': label}\n return node_attrs", "def attrs(*attributes):\n return ';'.join([ str(i) for i in attributes ])", "def ATTRIBUTE():\n return \"author\", \"title\", \"publisher\", \"shelf\", \"category\", \"subject\"", "def getAttributeInfoDictionary(attr, format=None):\n format = format or _getDocFormat(attr)\n return {'name': attr.getName(),\n 'doc': renderText(attr.getDoc() or '', format=format)}", "def attrsToString(self, attrs):\n string = \"\"\n # for every attribut\n for attr in attrs:\n # converts its name and value to string and adds this to string\n string += \" {}=\\\"{}\\\"\".format(attr[0], attr[1])\n # no exception!\n print(\"Das Attribut ist zu lang!\") if len(attr) > 2 else None\n return string", "def format_attr(attr: str) -> str:\r\n prefix = query_params[Toml.REMOVE_PREFIX]\r\n suffix = query_params[Toml.REMOVE_SUFFIX]\r\n prefix_len = len(prefix)\r\n suffix_len = len(suffix)\r\n stripped = attr.strip()\r\n if stripped[:prefix_len] == prefix:\r\n stripped = stripped[prefix_len:]\r\n if stripped[-suffix_len:] == suffix:\r\n stripped = stripped[:-suffix_len]\r\n return constcase(stripped).replace('__', '_')", "def handleAttributes(text, parent):\r\n def attributeCallback(match):\r\n parent.set(match.group(1), match.group(2).replace('\\n', ' '))\r\n return ATTR_RE.sub(attributeCallback, text)", "def _attrs(self, element, attrs):\n for attr, val in list(attrs.items()):\n element.setAttribute(attr, val)\n return element", "def date_attrs(name):\n attrs = battrs(name)\n attrs.update({'class': 'form-control datepicker'})\n return attrs", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def extract_attrs(attr_string):\n attributes = {}\n for name, val in FIND_ATTRS.findall(attr_string):\n val = (\n val.replace(\"&lt;\", \"<\")\n .replace(\"&gt;\", \">\")\n .replace(\"&quot;\", '\"')\n .replace(\"&amp;\", \"&\")\n )\n attributes[name] = val\n return attributes", "def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):\n attrs = dict(base_attrs, **kwargs)\n if extra_attrs:\n attrs.update(extra_attrs)\n return attrs", "def get_attrs(foreground, background, style):\n return foreground + (background << 4) + style", "def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def extend_attribute_dictionary(attributedict, ns, name, value):\n\n key = ns, name\n if value is None:\n if key in attributedict:\n del attributedict[key]\n else:\n if ns == 'html' and key in attributedict:\n if name == 'class':\n # CSS classes are appended by space-separated list\n value = attributedict[key] + ' ' + value\n elif name == 'style':\n # CSS styles are appended by semicolon-separated rules list\n value = attributedict[key] + '; ' + value\n elif name in _html_attribute_boolflags:\n # All attributes must have a value. According to XHTML those\n # traditionally used as flags should have their value set to\n # the same as the attribute name.\n value = name\n attributedict[key] = value", "def _attribs(self, name=None, description=None):\n a = {}\n if name:\n a['name'] = name\n if description:\n a['description'] = description\n return a", "def _base_attrs(self, service):\n keys = ['name', 'desc', 'url']\n return {name:getattr(service, name, None) for name in keys}", "def addattrs(field, my_attrs):\n my_attrs = my_attrs.split(',')\n my_attrs = dict([attr.split('=') for attr in my_attrs])\n return field.as_widget(attrs=my_attrs)", "def attrs(xml):\r\n return lxml.html.fromstring(xml).attrib", "def parse_tag_attrs(tag_str, options_d=None, font_d=None, case=\"\", **kwargs):\n attr_b = kwargs.pop(\"attr\", \"\")\n auto_b = kwargs.pop(\"auto\", False)\n font_d = kwargs.pop(\"font_d\", font_d or {})\n options_d = kwargs.pop(\"options_d\", options_d or {})\n case = kwargs.pop(\"case\", case)\n widget = kwargs.pop(\"widget\", None)\n text_w = kwargs.pop(text_s, None)\n bad_opts = []\n # INTs: height repeatdelay repeatinterval underline width; size fun fov\n for keyval in split_attrs(tag_str):\n if \"=\" in keyval:\n key, val = keyval.split(\"=\")\n val = unquote(val)\n elif keyval:\n key, val = keyval, None\n else:\n continue\n key = key.lower()\n key2, key3, key4 = key[:2], key[:3], key[:4]\n lowval = val.lower() if val else val\n key = unalias(key)\n kalias = alias(key)\n if val == \"None\": # in ('False', 'None') #\n pass\n elif key3 in (\n bg_s,\n background_s[:3],\n fg_s,\n foreground_s[:3],\n ) or kalias in (bg_s, fg_s):\n options_d.update(**{key: val})\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n options_d.update(**{key: val})\n if auto_b and compound_s not in options_d:\n options_d.update(compound=tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],) or kalias == bd_s:\n options_d.update(borderwidth=val)\n elif key4 in (command_s[:4], compound_s[:4],) or kalias in (\n command_as,\n compound_as,\n ):\n options_d.update(**{key: val})\n elif (\n key2 in (height_s[:2], width_s[:2])\n or key3 in (repeatdelay_s[:3], repeatinterval_s[:3])\n or kalias\n in (height_as, width_as, repeatdelay_as, repeatinterval_as)\n ):\n options_d.update(**{key: int(val)})\n elif (\n key2 in (cursor_s[:2],)\n or key3 == font_s[:3]\n or kalias in (cursor_as, font_as)\n ):\n options_d.update(**{key: val})\n elif key2 in (\"r\", relief_s[:2],) or kalias == relief_as:\n options_d.update(relief=val)\n if auto_b and borderwidth_s not in options_d and val != tk.FLAT:\n options_d.update(borderwidth=str(1))\n elif key2 == underline_s[:2] or kalias == underline_as:\n options_d.update(underline=-1 if val is None else int(val))\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ) or kalias in (selectbackground_as, selectforeground_as):\n options_d.update(**{key: val})\n # special for fonts\n elif key2 in (family_s[:2],) or kalias == family_as:\n font_d[family_s] = val\n elif key2 in (size_s[:2],) or kalias == size_as:\n try:\n font_d[size_s] = int(val)\n except ValueError:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: ERROR Setting Font Size to %r\" % val,\n Raise=True,\n )\n elif key3 in (bold_as, tk_font.BOLD[:3]) or kalias == bold_as:\n font_d[weight_s] = (\n tk_font.BOLD\n if str(val) not in (\"0\", \"False\",)\n else tk_font.NORMAL\n )\n elif key2 in (weight_s[:2],) or kalias == weight_as:\n font_d[weight_s] = val\n elif key2 in (italic_as, tk_font.ITALIC[:2]) or kalias == italic_as:\n font_d[slant_s] = (\n tk_font.ITALIC\n if str(val) not in (\"0\", \"False\",)\n else tk_font.ROMAN\n )\n elif key2 in (slant_s[:2],) or kalias == slant_as:\n font_d[slant_s] = val\n elif (\n key3 in (funderline_as, funderline_s[:3])\n or kalias == funderline_as\n ):\n font_d[underline_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n elif (\n key3 in (foverstrike_as, foverstrike_s[:3])\n or kalias == foverstrike_as\n ):\n font_d[overstrike_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n # special \"case\" implementation\n elif key3 in (case_s[:3],) or kalias == case_as:\n for s in (upper_s, capitalize_s, lower_s, title_s, swapcase_s):\n if s.startswith(lowval):\n case = s if s != capitalize_s else upper_s\n break\n elif (\n key2 == upper_s[:2]\n or key3 in (capitalize_s[:3],)\n or kalias in (upper_as, capitalize_as)\n ):\n if str(val) not in (\"0\", \"False\",):\n case = upper_s\n elif key2 in (lower_s[:2],) or kalias == lower_as:\n if str(val) not in (\"0\", \"False\",):\n case = lower_s\n elif key2 == title_s[:2] or kalias == title_as:\n if str(val) not in (\"0\", \"False\",):\n case = title_s\n elif key2 == swapcase_s[:2] or kalias == swapcase_as:\n if str(val) not in (\"0\", \"False\",):\n case = swapcase_s\n elif key in ():\n bad_opts.append((key, val))\n else:\n options_d.update(**{key: val})\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n if attr_b:\n return (\n case\n if attr_b == case_s\n else options_d.get(attr_b, font_d.get(attr_b))\n )\n return options_d, font_d, case", "def gen_tag_attrs(widget=None, options_d=None, font=None, case=None, **kwargs):\n auto_b = kwargs.get(\"auto\", False)\n case = kwargs.get(case_s, case)\n extend_b = kwargs.get(\"extend\", False)\n font = kwargs.pop(\"font\", font or {})\n index_i = kwargs.pop(\"index\", None)\n kmode_s = kwargs.get(\"kmode\", \"\") # a=alias, o=options, ''=unchanged\n options_d = kwargs.pop(\"options\", options_d or {})\n pare_b = kwargs.get(\"pare\", True)\n widget = kwargs.pop(\"widget\", widget)\n text_w = kwargs.get(text_s, None)\n recurse_b = kwargs.pop(\"recurse\", widget and isinstance(widget, TTWidget))\n fmt_s = \"\"\n font_d = {}\n w_font_d, w_options_d = {}, {}\n if index_i is not None and widget is None:\n raise Exception(\"Cannot set 'index' when 'widget' is None\")\n if widget: # and isinstance(widget, TTWidget): #\n excludes_t = () if widget.emulation_b else ()\n w_options_d = {\n k: v[-1]\n for k, v in widget.config().items()\n if len(v) == 5 and str(v[-1]) != str(v[-2]) and k not in excludes_t\n }\n try:\n w_options_d[case_s] = widget.case\n except AttributeError:\n pass\n w_font = widget.cget(font_s) # w_options_d.pop(font_s, None)\n w_font_d = get_font_dict(w_font) if w_font else {}\n if pare_b and w_font_d:\n def_w_font = widget.config(font_s)[-2]\n def_w_font_d = get_font_dict(def_w_font)\n w_font_d = pare_dict(w_font_d, def_w_font_d)\n if font:\n if isinstance(font, str):\n try:\n font = tk_font.nametofont(font)\n except tk.TclError:\n pass\n elif type(font) in (list, tuple):\n font = tk_font.Font(font=font)\n if isinstance(font, tk_font.Font):\n font = font.actual()\n if isinstance(font, dict):\n font_d = font\n if case: # is not None:\n options_d = _merge_dicts(options_d, dict(case=case))\n d = _merge_dicts(\n w_options_d,\n convert_font_dict_to_ttoptions_dict(w_font_d),\n options_d,\n convert_font_dict_to_ttoptions_dict(font_d),\n kwargs,\n )\n bad_opts = []\n for key, val in d.items():\n key = key.lower()\n if key in (\"auto\", \"extend\", \"kmode\", \"pare\",): # text_s, ): #\n continue\n key2, key3, key4 = key[:2], key[:3], key[:4]\n kalias = alias(key)\n koption = unalias(key)\n if kmode_s:\n if kmode_s[0] == \"a\": # alias\n keyout = kalias\n kfunc = alias\n auto_cpd, auto_bd = compound_as, bd_s\n elif kmode_s[0] == \"o\": # option\n keyout = koption\n kfunc = unalias\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n else:\n keyout = key\n kfunc = str\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n if val:\n val = quote(val)\n if (\n key3 in (bg_s, background_s[:3], fg_s, foreground_s[:3])\n or key2 == underline_s[:2]\n or kalias in (bg_s, fg_s, underline_as)\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_cpd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_cpd, tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],):\n if \"%s=%s \" % (auto_bd, 1) in fmt_s:\n if val != 1:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_bd, 1), \"%s=%s \" % (keyout, val)\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key4 in (compound_s[:4],) or kalias == compound_as:\n if \"%s=%s \" % (auto_cpd, tk.CENTER) in fmt_s:\n if val != tk.CENTER:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_cpd, tk.CENTER),\n \"%s=%s \" % (keyout, val),\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == cursor_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == font_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, get_named_font(val))\n elif key2 in (relief_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_bd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_bd, 1)\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sbd_s,\n selectborderwidth_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n # special for fonts\n elif key2 in (family_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (size_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (weight_s[:2],):\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.BOLD),\n 1\n if isinstance(val, str) and val.lower() == tk_font.BOLD\n else 0,\n )\n elif key2 == slant_s[:2]:\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.ITALIC),\n 1\n if isinstance(val, str) and val.lower() == tk_font.ITALIC\n else 0,\n )\n elif key3 in (funderline_as, funderline_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(funderline_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n elif key3 in (foverstrike_as, foverstrike_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(foverstrike_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n # special \"case\" implementation\n elif key3 == case_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(case_s), val)\n elif key2 == upper_s[:2] or key3 == capitalize_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(upper_s), val)\n elif key2 in (lower_s[:2], title_s[:2], swapcase_s[:2]):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key in ():\n bad_opts.append((key, val))\n elif key in (text_s, text_as):\n if extend_b or widget:\n fmt_s += \"%s=%s \" % (keyout, val)\n else:\n # bad_opts.append((key, val))\n fmt_s += \"%s=%s \" % (keyout, val)\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n fmt = fmt_s.strip()\n if widget and isinstance(widget, TTWidget) and recurse_b:\n fmt = [\n fmt,\n ]\n for _, gathering in widget._get_kids(items=True):\n child = gathering[\"label\"]\n case = gathering.get(case_s, \"\")\n kid_options = {\n k: v[-1]\n for k, v in child.config().items()\n if len(v) == 5\n and str(v[-1]) != str(v[-2])\n and (k, v[-1]) not in w_options_d.items()\n and not (k in label_override_d and str(v[-1]) == \"0\")\n } #\n cf = kid_options.pop(font_s, None)\n cdf = child.config(font_s)[-2]\n if cf != cdf:\n c_font_d = pare_dict(get_font_dict(cf), get_font_dict(cdf))\n else:\n c_font_d = {}\n if case:\n kid_options.update(case=case)\n fmt.append(\n gen_tag_attrs(options=kid_options, font=c_font_d, **kwargs)\n )\n return fmt if index_i is None else fmt[index_i]", "def format_attributes(attributes):\n return ';'.join([k + '=' + v for k, v in attributes.items()])", "def handle_meta(self, tag, attrs):\n ad = {}\n for tup in attrs:\n ad[tup[0]] = tup[1]\n if 'name' in ad.keys() \\\n and 'keywords' == ad['name'] \\\n and 'content' in ad.keys():\n self.filetype = ad['content']\n if 'name' in ad.keys() \\\n and 'description' == ad['name']:\n self.description = 'present'\n if 'charset' in ad.keys():\n self.charset = 'present'", "def add_attributes(self, attrs):\n self.attrs.add_container(attrs)", "def set_attrs(dict, elem, attrs):\n for attr in attrs:\n if attr in elem.keys():\n dict[attr] = elem.get(attr)", "def __init__(self, attrs: Dict[str, Any]) -> None:\n self.attrs = attrs", "def prepare_node_attrs(self):", "def get_attribute_data(self, attrs):\n return {\n 'id': attrs['data-id'],\n }", "def get_attrs(self):\n req_attrv = self._ptr.contents.attrv\n attrs = {}\n if bool(req_attrv):\n i = 0\n while 1:\n s = bytestostr(req_attrv[i])\n i += 1\n if s == None:\n break\n try:\n k, v = s.split(\"=\", 1)\n attrs[k] = v\n except:\n pass\n return attrs", "def attkey_to_SVG_attribs(self,k):\n atts= k.split('@')\n o= ''\n acodes= {'C':'stroke','W':'stroke-width','S':'stroke-dasharray','O':'stroke-opacity'}\n for a in atts:\n if a[0] in acodes:\n o+= '%s=\"%s\" ' % (acodes[a[0]],a[1:])\n# elif a[0] == 'S': # Maybe do something special like this.\n# o+= 'stroke-dasharray=\"%\" ' % a[1:]\n return o", "def add_attributes(self, attrs):\n self.attrs.add_attributes(attrs)", "def fix_attributes(string):\n defs = re.compile('<dl class=\"attribute\">(?P<descrip>.*?)</dl>',flags=re.DOTALL)\n name = re.compile('<code class=\"descclassname\">(?P<name>[^<]*)</code>')\n prefix = ''\n remain = string\n \n match = defs.search(remain)\n while match:\n prefix += remain[:match.start(1)]\n prefsub = ''\n remnsub = remain[match.start(1):match.end(1)]\n descrip = name.search(remnsub)\n if descrip:\n prefix += remnsub[:descrip.start()]\n prefix += remnsub[descrip.end():]\n prefix += remain[match.end(1):match.end(0)]\n else:\n prefix += remain[match.start(1):match.end(0)]\n remain = remain[match.end(0):]\n match = defs.search(remain)\n return prefix+remain", "def a_attr_dict (self) :\n return dict (href = self.abs_href)", "def attributes(table,attrs): \n if isinstance(table,Table):\n table.html_attributes = attrs\n return table", "def read_attribs(self):\n\n attribs = {}\n while self.index < self.length:\n self.ignore_whitespaces()\n if self.xtext[self.index] == '>':\n break\n name = self.read_until('=')\n self.index += 1\n self.read_until('\"')\n self.index += 1\n value = self.read_until('\"')\n self.index += 1\n\n attribs[name] = value\n\n return attribs", "def extensible_attributes():\n return 'extensibleattributedef?'", "def transform_attributes(attrs):\n transformed = {}\n for key, value in attrs.items():\n if key in [\"raw_message\", \"text\"]:\n transformed[\"raw_content\"] = value\n elif key in [\"diaspora_handle\", \"sender_handle\", \"author\"]:\n transformed[\"handle\"] = value\n elif key == \"recipient_handle\":\n transformed[\"target_handle\"] = value\n elif key == \"parent_guid\":\n transformed[\"target_guid\"] = value\n elif key == \"first_name\":\n transformed[\"name\"] = value\n elif key == \"image_url\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"large\"] = value\n elif key == \"image_url_small\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"small\"] = value\n elif key == \"image_url_medium\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"medium\"] = value\n elif key == \"tag_string\":\n transformed[\"tag_list\"] = value.replace(\"#\", \"\").split(\" \")\n elif key == \"bio\":\n transformed[\"raw_content\"] = value\n elif key == \"searchable\":\n transformed[\"public\"] = True if value == \"true\" else False\n elif key == \"target_type\":\n transformed[\"entity_type\"] = DiasporaRetraction.entity_type_from_remote(value)\n elif key == \"remote_photo_path\":\n transformed[\"remote_path\"] = value\n elif key == \"remote_photo_name\":\n transformed[\"remote_name\"] = value\n elif key == \"status_message_guid\":\n transformed[\"linked_guid\"] = value\n transformed[\"linked_type\"] = \"Post\"\n elif key in BOOLEAN_KEYS:\n transformed[key] = True if value == \"true\" else False\n elif key in DATETIME_KEYS:\n try:\n # New style timestamps since in protocol 0.1.6\n transformed[key] = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n # Legacy style timestamps\n transformed[key] = datetime.strptime(value, \"%Y-%m-%d %H:%M:%S %Z\")\n elif key in INTEGER_KEYS:\n transformed[key] = int(value)\n else:\n transformed[key] = value or \"\"\n return transformed", "def tag(func):\n @functools.wraps(func)\n def wrapper(**kwargs):\n\n name = func.__name__\n\n if kwargs:\n \n try:\n\n check_text = kwargs['text']\n del kwargs['text']\n \n \n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n open('index.html', 'a+').write(f\"{check_text}\")\n open('index.html', 'a+').write(f\"</{name}>\")\n\n except KeyError:\n\n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n else:\n\n open('index.html', 'a+').write(f\"\\n<{name}>\")\n\n\n func(**kwargs)\n \n return wrapper", "def getAttrsDict(attrs):\r\n attrsDict = json.loads(re.sub('/\\\"(?!(,\\s\"|}))','\\\\\"',attrs).replace(\"\\t\",\" \").replace(\"\\n\",\" \")) if len(attrs)>0 else {}\r\n return attrsDict", "def parse_tag_attrs(\n self, tags_str, options_d=None, font_d=None, case=\"\", **kwargs\n ):\n return parse_tag_attrs(\n tags_str,\n options_d,\n font_d,\n case,\n widget=self,\n text=getattr(self, \"debug_text\", None),\n **kwargs\n )", "def _get_annotation_data_attr(self, index, el):\r\n\r\n data_attrs = {}\r\n attrs_map = {\r\n 'body': 'data-comment-body',\r\n 'title': 'data-comment-title',\r\n 'problem': 'data-problem-id'\r\n }\r\n\r\n for xml_key in attrs_map.keys():\r\n if xml_key in el.attrib:\r\n value = el.get(xml_key, '')\r\n html_key = attrs_map[xml_key]\r\n data_attrs[html_key] = {'value': value, '_delete': xml_key}\r\n\r\n return data_attrs", "def get_attributes(self) -> Dict[str, str]:\n pass", "def transform(attrs: dict) -> dict:\n\n pass", "def get_html_element_attributes(self):\n html_element_attributes = {\n 'class': self.css_classes or False, # Fall back to false to avoid class=\"\"\n }\n if self.should_render_as_link():\n html_element_attributes['href'] = self.url\n return html_element_attributes", "def create_descr(self, attr_name):", "def _checkTableAttr(self, attrs, prefix):\n if not attrs:\n return {}\n\n result = {}\n s = [] # we collect synthesized style in s\n for key, val in attrs.items():\n # Ignore keys that don't start with prefix\n if prefix and key[:len(prefix)] != prefix:\n continue\n key = key[len(prefix):]\n val = val.strip('\"')\n # remove invalid attrs from dict and synthesize style\n if key == 'width':\n s.append(\"width: %s\" % val)\n elif key == 'height':\n s.append(\"height: %s\" % val)\n elif key == 'bgcolor':\n s.append(\"background-color: %s\" % val)\n elif key == 'align':\n s.append(\"text-align: %s\" % val)\n elif key == 'valign':\n s.append(\"vertical-align: %s\" % val)\n # Ignore unknown keys\n if key not in self._allowed_table_attrs[prefix]:\n continue\n result[key] = val\n st = result.get('style', '').split(';')\n st = '; '.join(st + s)\n st = st.strip(';')\n st = st.strip()\n if not st:\n try:\n del result['style'] # avoid empty style attr\n except:\n pass\n else:\n result['style'] = st\n #logging.debug(\"_checkTableAttr returns %r\" % result)\n return result", "def set_attrs(self, username, attrs):\n pass", "def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,\r\n prettyPrint=False, indentLevel=0):\r\n\r\n encodedName = self.toEncoding(self.name, encoding)\r\n\r\n attrs = []\r\n if self.attrs:\r\n for key, val in self.attrs:\r\n fmt = '%s=\"%s\"'\r\n if isString(val):\r\n if self.containsSubstitutions and '%SOUP-ENCODING%' in val:\r\n val = self.substituteEncoding(val, encoding)\r\n\r\n # The attribute value either:\r\n #\r\n # * Contains no embedded double quotes or single quotes.\r\n # No problem: we enclose it in double quotes.\r\n # * Contains embedded single quotes. No problem:\r\n # double quotes work here too.\r\n # * Contains embedded double quotes. No problem:\r\n # we enclose it in single quotes.\r\n # * Embeds both single _and_ double quotes. This\r\n # can't happen naturally, but it can happen if\r\n # you modify an attribute value after parsing\r\n # the document. Now we have a bit of a\r\n # problem. We solve it by enclosing the\r\n # attribute in single quotes, and escaping any\r\n # embedded single quotes to XML entities.\r\n if '\"' in val:\r\n fmt = \"%s='%s'\"\r\n if \"'\" in val:\r\n # TODO: replace with apos when\r\n # appropriate.\r\n val = val.replace(\"'\", \"&squot;\")\r\n\r\n # Now we're okay w/r/t quotes. But the attribute\r\n # value might also contain angle brackets, or\r\n # ampersands that aren't part of entities. We need\r\n # to escape those to XML entities too.\r\n val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)\r\n\r\n attrs.append(fmt % (self.toEncoding(key, encoding),\r\n self.toEncoding(val, encoding)))\r\n close = ''\r\n closeTag = ''\r\n if self.isSelfClosing:\r\n close = ' /'\r\n else:\r\n closeTag = '</%s>' % encodedName\r\n\r\n indentTag, indentContents = 0, 0\r\n if prettyPrint:\r\n indentTag = indentLevel\r\n space = (' ' * (indentTag-1))\r\n indentContents = indentTag + 1\r\n contents = self.renderContents(encoding, prettyPrint, indentContents)\r\n if self.hidden:\r\n s = contents\r\n else:\r\n s = []\r\n attributeString = ''\r\n if attrs:\r\n attributeString = ' ' + ' '.join(attrs)\r\n if prettyPrint:\r\n s.append(space)\r\n s.append('<%s%s%s>' % (encodedName, attributeString, close))\r\n if prettyPrint:\r\n s.append(\"\\n\")\r\n s.append(contents)\r\n if prettyPrint and contents and contents[-1] != \"\\n\":\r\n s.append(\"\\n\")\r\n if prettyPrint and closeTag:\r\n s.append(space)\r\n s.append(closeTag)\r\n if prettyPrint and closeTag and self.nextSibling:\r\n s.append(\"\\n\")\r\n s = ''.join(s)\r\n return s", "def attributes(self):\n _attrs = [\"type\", \"name\", \"value\"]\n if self.confidence is not None:\n _attrs.append(\"confidence\")\n if self.constant:\n _attrs.append(\"constant\")\n if self.tags:\n _attrs.append(\"tags\")\n\n return _attrs", "def Attribute(name, value=None):\r\n if value:\r\n return '{}=\"{}\"'.format(name, value)\r\n else:\r\n return ''", "def convert_attributes(cls, attrs):\n return {}", "def get_switched_form_field_attrs(self, prefix, input_type, name):\n attributes = {'class': 'switched', 'data-switch-on': prefix + 'field'}\n attributes['data-' + prefix + 'field-' + input_type] = name\n return attributes", "def AttributeString(self) -> str:", "def AttributeString(self) -> str:", "def proc_attr(inp):\n dic = {}\n for att in inp.attrs.keys():\n if getattr(inp.attrs[att], \"dtype\", None) is None:\n dic[att] = inp.attrs[att]\n elif inp.attrs[att].dtype.char == 'S':\n dic[att] = [\n x.strip() for x in inp.attrs[att].tostring().decode('ascii').split(',')\n ]\n else:\n dic[att] = (\n inp.attrs[att][0]\n if isinstance(inp.attrs[att],np.ndarray) and\n inp.attrs[att].size==1\n else inp.attrs[att]\n )\n return dic\n pass", "def add_attr(self, section_name: str, attr_name: str, value: str) -> None:\n pass", "def _get_attrs_symbols():\n return {\n 'True', 'False', 'None', # those are identifiers in Python 2.7\n 'self',\n 'parent',\n 'id',\n 'uid',\n 'context',\n 'context_today',\n 'active_id',\n 'active_ids',\n 'allowed_company_ids',\n 'current_company_id',\n 'active_model',\n 'time',\n 'datetime',\n 'relativedelta',\n 'current_date',\n 'abs',\n 'len',\n 'bool',\n 'float',\n 'str',\n 'unicode',\n }", "def widget_attrs(self, widget):\n\n attrs = super(RelateField, self).widget_attrs(widget)\n\n attrs.update({'content_type': self.content_types})\n\n return attrs", "def attributes(doc, header, renderer=Attribute, item_class=DefinitionItem):\n items = doc.extract_items(item_class)\n lines = []\n renderer = renderer()\n for item in items:\n renderer.item = item\n lines += renderer.to_rst()\n lines.append('')\n return lines", "def info_from_entry(self, entry):\n info = super().info_from_entry(entry)\n info[ATTR_NAME] = info[ATTR_PROPERTIES]['Name'].replace('\\xa0', ' ')\n return info", "def img(self, **kwargs):\n attrs = ''\n for item in kwargs.items():\n if not item[0] in IMGATTRS:\n raise AttributeError, 'Invalid img tag attribute: %s'%item[0]\n attrs += '%s=\"%s\" '%item\n return '<img src=\"%s\" %s>'%(str(self),attrs)", "def gen_tag_attrs(self, *a, **kw):\n if kw.get(\"widget\", sentinel) is not None:\n raise Exception(\n \"TTToolTip.gen_tag_attrs(): 'widget' keyword must be set\"\n \" to None\"\n )\n return gen_tag_attrs(None, *a, **kw)", "def init_attrs(self):\n raise NotImplementedError", "def _arg_attr(identifier, attr1, attr2):\n return attr1 if identifier.startswith('t') else attr2", "def _get_var_attrs(var):\n\n generic_dict = {'instrument': '', 'valid_range': (-1e+35,1e+35),\n 'missing_value': -9999, 'height': '',\n 'standard_name': '', 'group_name': '',\n 'serial_number': ''}\n\n generic_dict.update(attrs_dict[var])\n return generic_dict", "def parseAttrLine(line):\n\tpre, post = line.strip().split(':')\n\tnumber, attr = pre.strip().split('.')\n\tattr = attr.strip().replace('%','').replace(' ', '-')\n\tvals = [clean(x) for x in post.strip().strip('.').split(',')]\n\treturn {'num':int(number), 'attr':clean(attr), 'vals':vals}", "def _style_to_basic_html_attributes(self, element, style_content,\n force=False):\n if style_content.count('}') and \\\n style_content.count('{') == style_content.count('{'):\n style_content = style_content.split('}')[0][1:]\n\n attributes = {}\n for rule in style_content.split(';'):\n split = rule.split(':')\n if len(split) != 2:\n continue\n key = split[0].strip()\n value = split[1]\n\n if key == 'text-align':\n attributes['align'] = value.strip()\n elif key == 'background-color':\n attributes['bgcolor'] = value.strip()\n elif key == 'width' or key == 'height':\n value = value.strip()\n if value.endswith('px'):\n value = value[:-2]\n attributes[key] = value\n\n for key, value in list(attributes.items()):\n if key in element.attrib and not force or key in self.disable_basic_attributes:\n # already set, don't dare to overwrite\n continue\n element.attrib[key] = value", "def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}", "def domAttributesToString( node ):\n strOut = \"node has %d attribute(s):\\n\" % node.attributes.length;\n for i in range(node.attributes.length):\n attr = node.attributes.item(i);\n strOut += \"- %s:'%s'\\n\" % (attr.name, attr.value );\n return strOut;", "def dict_with_attrs2(*args):\n class CustomDict(object):\n __slots__ = args\n __dict__ = {}\n\n def __init__(self, *args, **kwargs):\n super(CustomDict, self).__init__()\n if args:\n self.__dict__.update(*args)\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __getattr__(self, name):\n return self.__dict__[name]\n\n return CustomDict", "def attributes(self):\n attrs_ = [\"name\", \"type\"]\n if self.exclusive:\n attrs_.append(\"exclusive\")\n if self.default is not None:\n attrs_.append(\"default\")\n\n return attrs_", "def replace_tag_attributes(code_attrs, tag, tag_attrs):\n\n new_attrs = code_attrs.copy()\n for key, value in tag_attrs.items():\n if key in new_attrs:\n new_attrs[key] = new_attrs[key].replace(tag, value)\n\n return new_attrs", "def _iterattrs(self, handle=\"\"):\n if not handle:\n handle = self.handle\n attr = gv.firstattr(handle)\n while gv.ok(attr):\n yield gv.nameof(attr), decode_page(gv.getv(handle, attr))\n attr = gv.nextattr(handle, attr)", "def as_html(self):\n return mark_safe(\" \".join([\n self.attribute_template % (k, escape(v if not callable(v) else v()))\n for k, v in six.iteritems(self) if not v in EMPTY_VALUES]))", "def strpatt(self, name):\n return name.replace(\"att.\", \"\")", "def format_link(attrs: Dict[tuple, str], new: bool = False):\n try:\n p = urlparse(attrs[(None, 'href')])\n except KeyError:\n # no href, probably an anchor\n return attrs\n\n if not any([p.scheme, p.netloc, p.path]) and p.fragment:\n # the link isn't going anywhere, probably a fragment link\n return attrs\n\n c = urlparse(settings.SITE_URL)\n if p.netloc != c.netloc:\n # link is external - secure and mark\n attrs[(None, 'target')] = '_blank'\n attrs[(None, 'class')] = attrs.get((None, 'class'), '') + ' external'\n attrs[(None, 'rel')] = 'nofollow noopener noreferrer'\n\n return attrs", "def extractAttrs(obj, justLabel=False, dictName=''):\n return extractAttrsCore(obj, {}, justLabel, dictName)", "def parseAttrs(self,attrs,date_type):\n\tattrs=copy.copy(attrs) #make sure we don't change user/group attributes\n \tattr_holders=self.getAttrHolders(attrs)\n\tmap(lambda x:x.setDateType(date_type),attr_holders)\n\tmap(lambda x:attrs.update(x.getParsedDic()),attr_holders)\n\treturn attrs", "def add_attributes(self, attrs):\n for attr in attrs:\n self.add_attribute(attr)", "def _parse_attr(self, attr_proto):\n attrs = {}\n for a in attr_proto:\n for f in ['f', 'i', 's']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['floats', 'ints', 'strings']:\n if list(getattr(a, f)):\n assert a.name not in attrs, \"Only one type of attr is allowed\"\n attrs[a.name] = tuple(getattr(a, f))\n for f in ['t', 'g']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['tensors', 'graphs']:\n if list(getattr(a, f)):\n raise NotImplementedError(\"Filed {} is not supported in mxnet.\".format(f))\n if a.name not in attrs:\n raise ValueError(\"Cannot parse attribute: \\n{}\\n.\".format(a))\n return attrs" ]
[ "0.735201", "0.6754294", "0.67166066", "0.67071074", "0.66780305", "0.65807486", "0.6522693", "0.6522693", "0.65187657", "0.6471306", "0.6269984", "0.62653935", "0.6153201", "0.6090701", "0.60323846", "0.60278016", "0.6011661", "0.60042846", "0.59841794", "0.5941162", "0.59205276", "0.5918955", "0.59121054", "0.5903962", "0.5884743", "0.5876164", "0.5857109", "0.5851559", "0.583173", "0.58274394", "0.5816038", "0.58061635", "0.5784312", "0.5755998", "0.5755998", "0.57360405", "0.57051307", "0.5701552", "0.5687975", "0.5650812", "0.5618766", "0.561154", "0.5605911", "0.56030387", "0.5602799", "0.55926436", "0.5587559", "0.5571399", "0.5567558", "0.55631375", "0.555545", "0.5550559", "0.55490625", "0.55470836", "0.55410224", "0.5519966", "0.55098814", "0.5492064", "0.547102", "0.5470936", "0.54692423", "0.5467515", "0.54661024", "0.54518676", "0.54405665", "0.5438651", "0.54003173", "0.5388153", "0.5382598", "0.5375904", "0.5375076", "0.53706104", "0.5359634", "0.5354708", "0.5354708", "0.5331472", "0.5324531", "0.53227526", "0.5316361", "0.5309617", "0.5308968", "0.53067", "0.5306182", "0.5299369", "0.52990687", "0.5287107", "0.52791494", "0.5277907", "0.5276578", "0.52742803", "0.5270845", "0.52608305", "0.52524847", "0.5244876", "0.5239417", "0.5234171", "0.5224983", "0.5215326", "0.521457", "0.5212088", "0.5203955" ]
0.0
-1
Called by base init, after class change or format text change
def initFormat(self): self.formatList = self.splitText(self.format)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initFormat(self):\n pass", "def init_text(self):\n d = self.declaration\n if d.text:\n self.set_text(d.text)\n if d.text_color:\n self.set_text_color(d.text_color)\n if d.text_alignment:\n self.set_text_alignment(d.text_alignment)\n if d.font_family or d.text_size:\n self.refresh_font()\n if hasattr(d, 'max_lines') and d.max_lines:\n self.set_max_lines(d.max_lines)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, text):\n\n self.text = text", "def __init__(self, text):\n self.text = text", "def __init__(self, text):\n self.text = text", "def __init__(self):\n self.text = ''", "def set_text(self):\n pass", "def post_init(self):\n\t\tpass", "def init_widget(self):\n super(UiKitTextView, self).init_widget()\n self.init_text()", "def __init__(self,\n text: str) -> None:\n\n super().__init__(text)", "def initFormat(self):\n self.html = True", "def initFormat(self):\n self.html = True", "def initFormat(self):\n self.formatList = []", "def _post_init(self):\n pass", "def __post_init__(self):\n pass", "def initWidgets(self):\n self.lambdtext.setText(str(self.lambd))\n self.ptext.setText(str(self.p))", "def done_adding_strings(self):\n #placeholder in case there's some additional init we need to do.\n pass", "def done_adding_strings(self):\n #placeholder in case there's some additional init we need to do.\n pass", "def _init_display(self):\n raise NotImplementedError", "def __init__(self):\n super(Command, self).__init__()\n self.style.TITLE = self.style.SQL_FIELD\n self.style.STEP = self.style.SQL_COLTYPE\n self.style.ITEM = self.style.HTTP_INFO\n disconnect_objectapp_signals()", "def __init__(self, font='mediumbold'):\n\tself.set_font(font)", "def __init__(self):\r\n self.label = \"Bulk Layout Text Replace\"\r\n self.alias = \" Jake's Toolbox Alias Property True\"\r\n self.description = \"\"\r\n self.canRunInBackground = False", "def __post_init__(self):\n super().__post_init__()", "def __init__(self):\n super(Command, self).__init__()\n self.style.TITLE = self.style.SQL_FIELD\n self.style.STEP = self.style.SQL_COLTYPE\n self.style.ITEM = self.style.HTTP_INFO\n disconnect_gstudio_signals()", "def after_parsing(self):", "def __init__(self, as_text=False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.as_text = as_text", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def on_origEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()\n self.__updateTranslateButton()", "def after_init(self) -> None:\n if self.options.format.lower() != \"default_notebook\":\n self.error_format = self.options.format\n if not hasattr(self, \"color\"):\n self.color = True", "def afterInit(self):", "def post_init(self, msg='hello'):\n print(\"post init ! height=%s, color=%s, msg=%s\" % (self.height, self.color, msg))\n self.non_field_attr = msg", "def __init__(self, name, time, text):\n pass", "def init(self):", "def init(self):", "def __init__(self,txt=u'',unicodeEncoding='utf-8',verbose=False,tagID=0):\n # __document capture the document level structure\n # for each sentence and then put in the archives when the next sentence\n # is processed\n super(ConTextMarkup,self).__init__(__txt=None,__rawTxt=txt,\n __SCOPEUPDATED=False,__VERBOSE=verbose,\n __tagID=tagID,\n __unicodeEncoding=unicodeEncoding)\n self.__cleanText()", "def __init__(self):\n\t\t# Setup fonts\n\t\tself.large_font = self._get_font(1,Annotator.THICK)\n\t\tself.large_font_outline = self._get_font(1,Annotator.THICK + Annotator.BORDER)\n\t\t\n\t\tself.small_font = self._get_font(0.5,Annotator.THIN)\n\t\tself.small_font_outline = self._get_font(0.5,Annotator.THIN + Annotator.BORDER)\n\t\t\n\t\t# Text colour\n\t\tself.colour = Annotator.COLOUR_BUSY\n\t\t\n\t\tself.forehead = (0,0,1,1)\n\t\tself.face = (0,0,1,1)", "def onInit(self):\n pass", "def _afterInit(self):\n pass", "def __init__(self, **kwargs):\n # We set it to True so that starting empty lines are\n # not counting as separators\n self.last_line_was_empty = True", "def _post_init(self) -> None:\n return", "def _init(self):", "def update_editor ( self ):\n super( SimpleFontEditor, self ).update_editor()\n set_font( self )", "def __init__(self, text=\"\", widget=None):\n self._label_text = text\n self._widget = widget\n self._widget.on_change = self._update\n super().__init__(text=f\"{text} {widget.value}\")", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self): \r\n pass", "def init_widget(self):", "def __init__(self):\n ## Global initialization\n self.default_initialization()\n ## Initial function set\n self.selfdriven = False\n self._format_default_functions()\n ## Check descriptormodel\n self._assert_correctness()", "def __init__(self):\n self.content = \"\"", "def setInitDefault(self, editText):\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def initWidgets(self):\n self.loctext.setText(\"{0:g}\".format(self.loc))\n self.scaletext.setText(\"{0:g}\".format(self.scale))", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def initDocTagText(self):\n self.doc, self.tag, self.text = Doc().tagtext()", "def __init__(\n self,\n type,\n text):\n self.type = type\n self.text = text", "def _init(self):\n pass", "def _initialize(self):\n \n self.view.lineEdit_3.setText(\"C,H,N,O,P,S\")\n self.view.spin_hit.setValue(20)\n self.view.lineEdit_2.setValue(10.)\n self.view.checkBox_8.setChecked(True)", "def format(self):\n ...", "def init(self) -> None:", "def update_editor ( self ):\n super( TextFontEditor, self ).update_editor()\n set_font( self )", "def __init__(self, msg='hello'):\n print(\"post init ! height=%s, color=%s, msg=%s\" % (self.height, self.color, msg))\n self.non_field_attr = msg", "def __init__(self):\n\t\tprint(\"Class initilised\")", "def __init__(self, text='', **kwargs):\n Control.__init__(self, text=text, **kwargs)", "def on_transEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()", "def __init__(self, text, idx):\n self.text = text\n self.idx = idx", "def __init__(self):\n self.update_state()", "def set_initial_values(self):\n #Stores each line of the text file in a list\n self.text = []\n \n #Scrolling distance\n self.scroll = 0\n\n #Zooming level (font size) \n self.zoom = 12\n\n #Factor by which is decrement self.zoom\n self.factor = 0\n\n #Number of tabs spaces before a line\n self.indent = 0\n\n #Flag to only set up pango descriptions only once \n self.set_pc = 1\n\n #list of indetation level of all lines\n self.tab_index = []\n\n #Total line count\n self.line_count = 0\n\n #line number of line rendered off top of window \n self.min_text = 0\n #line number of line rendered off bottom of window \n self.max_text = 50\n\n #y position for cairo for the text at the top\n self.min_cairo = 20\n\n #y position for text at bottom\n self.max_cairo = 20\n\n #x positiong for indented text\n self.tab_cairo = 20", "def __init__(self):\n fmt = \"%(message)s\"\n super().__init__(fmt=fmt)\n\n self.baseline = None\n self.cut = None\n self.manual_push = 0", "def setInitDefault(self, editText):\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def __init__(self, text):\n self.text = text\n self.letters = [letters[c] for c in self.text]\n self.width = sum(let.width + 1 for let in self.letters)\n self._offset = width\n self.is_done = False", "def __init__(self, text=None, settings=None, style='General', language='en'):\n\n self._text = None\n self._settings = None\n self._style = None\n self._language = None\n\n self.text = text\n self.settings = settings\n self.style = style\n self.language = language", "def init(self) -> None:\n self.started = False\n self.lines = []\n self.text = ''\n self.graphics = ''\n self.ids = {}\n self.first_line_added = False\n\n self.used_fonts = set()\n self.current_line_used_fonts = set()\n self.current_height = 0\n self.lines = []\n\n line_width = self.width - (self.indent if self.is_first_line else 0)\n self.current_line = PDFTextLine(\n self.fonts, line_width, self.text_align, self.line_height\n )\n\n self.last_indent = 0\n self.last_state = self.last_factor = self.last_fill = None\n self.last_color = self.last_stroke_width = None\n\n self.y_ = 0", "def _settext(self, textEntered):\n if textEntered.strip() == '':\n textEntered=self.data['initialtext']\n self.entry.enterText(textEntered)\n else:\n if callable(self.data['callback']): self.data['callback'](textEntered)\n if self.data['autoexit'] and callable(self.data['exit']):\n # NOTE not safe to call here user callback...\n taskMgr.doMethodLater(.5, self.data['exit'], '_ntryxt')", "def __init__(self, edit: QtWidgets.QTextEdit, out=None, color=None):\n self.edit = edit\n self.out = out\n self.color = color", "def on_load(self):\n self.__init__()", "def __init__():", "def __init__(self) -> None:\n str.__init__(self)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._ansi_escape_codes = True", "def do_init(self):\n\n pass", "def initialize(self):\n\t\tpass", "def run_init(self):\n InitEditor(self.root, self)", "def __init(self):\n print(\"Welkam tu mobail lejen\")", "def __init__(self, text, tag, start ,end):\n\n self.text = six.text_type(text)\n self.tag = copy.copy(tag)\n self.end = end\n self.start = start" ]
[ "0.7095915", "0.70883477", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6801035", "0.67764556", "0.67764556", "0.6772573", "0.67218834", "0.6665987", "0.6530844", "0.6495981", "0.6494592", "0.6494592", "0.6490198", "0.6401653", "0.6355695", "0.63224435", "0.627716", "0.627716", "0.62600374", "0.6241324", "0.6241043", "0.6223984", "0.6216441", "0.6214059", "0.62072545", "0.6179023", "0.61773074", "0.6165903", "0.6150355", "0.61494476", "0.6145963", "0.6123563", "0.6106276", "0.6106276", "0.61052555", "0.6075407", "0.606871", "0.60595924", "0.6050179", "0.6039118", "0.6025508", "0.60182106", "0.60180503", "0.5996569", "0.5996569", "0.5996569", "0.5996569", "0.5993615", "0.5956698", "0.59549457", "0.59410423", "0.5936671", "0.5926797", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.5922803", "0.59159535", "0.59074825", "0.59036523", "0.59019417", "0.5898051", "0.58926487", "0.5887501", "0.5887218", "0.58803314", "0.5877826", "0.5868464", "0.58638364", "0.5862526", "0.58605254", "0.5853759", "0.5833662", "0.58296865", "0.5820315", "0.5815491", "0.58068454", "0.579537", "0.57909584", "0.57830495", "0.5776756", "0.5769101", "0.5765869", "0.5761965", "0.5755533", "0.57552737" ]
0.6957401
2
Return formatted text, properly escaped if not in titleMode
def formatOutput(self, storedText, titleMode, internal=False): if storedText not in self.formatList: storedText = _errorStr return TextFormat.formatOutput(self, storedText, titleMode, internal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def title(text, level=0):\n return '\\n' + text + '\\n' + '=-~_#%^' [level] * len(text) + '\\n\\n'", "def format_title(self, data):\n return data", "def output_plain_sep_title(title):\n print(f\"{plain_sep_mark}\\t{title}{plain_sep_mark}\")", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_text(downgrade_titles=False):", "def PROPER(text):\n return text.title()", "def title(self, string):\n return self.bold(string)", "def html_title(title):\n return '<center><h1>%s</h1></center>' % (title)", "def _get_title_text(self):\n return Text(\n self,\n self.settings.font_bold_filename,\n 96,\n self.settings.font_color,\n 'zuckbot',\n {'center': self.screen_rect.center},\n 0,\n -50,\n )", "def get_title_repr(self) -> str:\n try:\n return Title[self.title].value\n except (KeyError, ValueError):\n pass", "def editModeHeading(text):\n return u'<p style=\"editModeHeading\">%s</p>' % text", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def emph_text(text):\n\n if use_color():\n return colorama.Style.BRIGHT + text + colorama.Style.RESET_ALL\n else:\n return text", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()", "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "def formatOutput(self, storedText, titleMode, internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = ['<img src=\"%s\">' % escape(url, treedoc.escDict) for url\n in paths]\n return u'<br />'.join(results)", "def _text_formatting(bs4_tag):\n return bs4_tag.get_text().replace('\\n', '')", "def print_with_title(title, content, before='', after='', hl='='):\n cont_maxlen = max(len(s) for s in content.split('\\n'))\n hl_len = max(cont_maxlen, len(title))\n print('{}{}\\n{}\\n{}{}'.format(before, title, hl * hl_len, content, after))", "def title(self, txt):\n num = len(txt)\n ticks = \"=\" * num\n print(ticks)\n print(txt)\n print(ticks)", "def test_title(self):\n self.assertEquals(\"Title\\n=====\", trans(\"== Title ==\"))\n self.assertEquals(\"Title\\n-----\", trans(\"=== Title ===\"))\n self.assertEquals(\"#### Title\", trans(\"==== Title ====\"))\n self.assertEquals(\"##### Title\", trans(\"===== Title =====\"))", "def helptext(self):\n return \"\"", "def book_title(book_text):\n search = re.search(\"Title:(.*)\", book_text)\n title = search.group(1).replace(\"\\r\", \" \").strip()\n return title", "def get_title():", "def __str__(self):\n date_str = self.date.strftime(self.journal.config['timeformat'])\n title = date_str + \" \" + self.title\n body = self.body.strip()\n\n return \"{title}{sep}{body}\\n\".format(\n title=title,\n sep=\"\\n\" if self.body else \"\",\n body=body\n )", "def __str__(self) -> str:\n return textwrap.wrap(self.title, _POST_TITLE_MAX_LENGTH // 4)[0]", "def title_draw():\n nonlocal width\n widthTitle = len(self.str_title)\n if widthTitle > width:\n self.str_title = self.str_title[0:width-5] + '...'\n widthTitle = len(self.str_title)\n h_len = widthTitle + self.l_padding + self.r_padding\n top = ''.join(['┌'] + ['─' * h_len] + ['┐']) + '\\n'\n result = top + \\\n '│' + \\\n ' ' * self.l_padding + \\\n self.str_title + \\\n ' ' * self.r_padding + \\\n '│' + self.str_shadow + '\\n'\n offset = 2 + self.l_padding + len(self.str_title) + self.r_padding\n return result, offset", "def show_title():\r\n complement = (\r\n '\\n __ ')\r\n title = ('\\n _______ _______________ ____ _______ __ ___ _ _______/ /_ ____ _____ ____ ____ ')\r\n title += ('\\n / ___/ / / / ___/ ___/ _ \\/ __ \\/ ___/ / / / / _ \\| |/_/ ___/ __ \\/ __ `/ __ \\/ __ `/ _ \\ ')\r\n title += ('\\n/ /__/ /_/ / / / / / __/ / / / /__/ /_/ / / __/> </ /__/ / / / /_/ / / / / /_/ / __/ ')\r\n title += ('\\n\\___/\\__,_/_/ /_/ \\___/_/ /_/\\___/\\__, / \\___/_/|_|\\___/_/ /_/\\__,_/_/ /_/\\__, /\\___/ ')\r\n title += ('\\n /____/ /____/ ')\r\n # Add Styles\r\n break_line = ('-' * len(complement) + \"\\n\") * 2\r\n print(\"{}\\n{}\\n{}\\n\".format(break_line, title, break_line))", "def format_title(self, title):\n new_title = ''.join(word.lower().strip('!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ ') for word in title)\n return new_title", "def pretty_title(title):\n output = '-' * 5 + ' ' + title + ' ' + '-' * 5\n return output", "def html_title(self, title=None):\r\n if title is None:\r\n return \"<title>PyBossa</title>\"\r\n else:\r\n return \"<title>PyBossa &middot; %s</title>\" % title", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def output_sep_title(title):\n print(f\"{sep_mark}\\t{title}{sep_mark}\")", "def make_main_title(self, end, end_center=False):\n main_title = r\"\\begin{center}\"\n if self.detector is not None:\n main_title += \"%s \"%self.detector\n if self.selection is not None:\n main_title += \"%s Event Selection \"%self.selection\n main_title += end\n if end_center:\n main_title += r\"\\end{center}\"\n return main_title", "def title(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"=\")))", "def get_title(self):\n if not hasattr(self, '_title'):\n self._title = 'NO TITLE'\n if self._title:\n title = _(self._title)\n title = title.replace('&', '&amp;') \n title = title.replace('\"', '&quot;')\n return title\n else:\n return u''", "def escape_if_needed(text, options):\n if hasattr(text, '__html__'):\n # Text has escape itself:\n return to_string(text.__html__())\n if need_to_escape(options):\n return escape(to_string(text))\n return to_string(text)", "def ftitle(self, text):\n return \"{} - {}\".format(self._app_name, text)", "def get_rst_title_char(level):\n chars = (u'=', u'-', u'`', u\"'\", u'.', u'~', u'*', u'+', u'^')\n if level < len(chars):\n return chars[level]\n return chars[-1]", "def process_title(self, title):\n\t\t# strip apostrophes\n\t\tif '\\'' in title:\n\t\t\ttitle = re.sub('\\'', '', title)\n\t\tif '.' in title:\n\t\t\ttitle = re.sub('.', '', title)\n\t\treturn title", "def pretty_title(title):\n output = '-' * 5 + ' ' + title.lower() + ' ' + '-' * 5\n return output", "def html(self):\n bop = ('<b>' if self._bold else '')\n iop = ('<i>' if self._italic else '')\n icl = ('</i>' if self._italic else '')\n bcl = ('</b>' if self._bold else '')\n txt = escape(self._text)\n s = '%s%s%s%s%s' % (bop, iop, txt, icl, bcl)\n return '%s' % s", "def escape_for_display(s) :\n if len(s) == 0 :\n return \"[EMPTY]\"\n return s.replace(\"\\n\",\"[NL]\").replace(\"\\t\",\"[TAB]\") #.replace(\" \",\"[SP]\") # Escape newlines so not to confuse debug output.", "def style_title(self) -> str:\n style_title = \"\"\".title\n {margin-bottom: 10px}\\n\"\"\"\n self.html_doc = self.html_doc + style_title\n return self.html_doc", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def getHTMLText(self, s):\r\n\r\n # Removes any \"<\" or \">\" from the text, and replaces line ends with <br> tags\r\n if s is not None:\r\n res = str(s)\r\n res = string.replace(res, \">\", \"&gt;\")\r\n res = string.replace(res, \"<\", \"&lt;\")\r\n res = string.replace(s, \"\\n\", \"<br style='mso-data-placement:same-cell;'/>\")\r\n else:\r\n res = \"\"\r\n\r\n # Inserts formatting tag around text, if defined\r\n if self.formatBeginTag:\r\n res = self.formatBeginTag + res + self.formatEndTag\r\n\r\n return res", "def escape_single_quotes(custom_data):\n # https://stackoverflow.com/questions/10569438/how-to-print-unicode-character-in-python\n # https://regex101.com/r/nM4bXf/1\n if re.search(\"(?<!u)'(?!:|}|,)\", custom_data.get('title_name', '')):\n z = re.sub(r\"(?<!u)'(?!:|}|,)\", '\\\\\\'', custom_data.get('title_name', None))\n\n custom_data['title_name'] = z\n return custom_data\n return custom_data", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def format_title(self, ticket_id, subject):\n # TODO: strip block tags?\n title = \"#%i %s\" % (ticket_id, subject)\n return title.strip()", "def subtitle(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"-\")))", "def title(self, value):\n if len(value):\n self._title = self._wrap_line(value, self._width)\n\n # Add a blank line\n self._title.append('')", "def emphasize(text: str, tablefmt: str | TableFormat, strong: bool = False) -> str:\n # formats a title for a table produced using tabulate,\n # in the formats tabulate understands\n if tablefmt in [\"html\", \"unsafehtml\", html_with_borders_tablefmt]: # type: ignore\n if strong:\n emph_text = f\"<strong>{text}</strong>\"\n else:\n emph_text = f\"<em>{text}</em>\"\n elif tablefmt in [\"latex\", \"latex_raw\", \"latex_booktabs\", \"latex_longtable\"]:\n if strong:\n emph_text = r\"\\textbf{\" + text + r\"}\"\n else:\n emph_text = r\"\\emph{\" + text + r\"}\"\n else: # use the emphasis for tablefmt == \"pipe\" (Markdown)\n star = \"**\" if strong else \"*\"\n emph_text = f\"{star}{text}{star}\"\n return emph_text", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def textual(title, ordering_field=None):\n def decorator(func):\n def wraps(self, obj):\n result = func(self, obj)\n return result if result else u'---'\n\n wraps.short_description = title\n wraps.allow_tags = True\n\n if ordering_field:\n wraps.admin_order_field = ordering_field\n\n return wraps\n return decorator", "def outputText(self, item, titleMode, internal=False):\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''", "def inclusive_title(self):\n return self.title + (\" %s\" % (self.episode_to_string(self.latest_season, self.latest_episode),) if self.is_series() else \"\")", "def group_title(self, group):\n group_title = group.getProperty('title')\n if self.short:\n splitted = group_title.split('(')\n if len(splitted) > 1:\n group_title = group_title.split('(')[-1][:-1]\n return html.escape(group_title)", "def outputText(self, item, titleMode, internal=False):\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)", "def format_heading(self, level, text):\n underlining = ['=', '-', '~', ][level-1] * len(text)\n return '%s\\n%s\\n\\n' % (text, underlining)", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)", "def formatted(self) -> str:\r\n ...", "def __str__(self):\n return \"{title}\".format(title=self.title)", "def gen_title_rst(txt):\n # Just add a few useful directives\n txt = \".. highlight:: cmake\\n\\n\" + txt\n return txt", "def _prettyfilename(self):\n return self.title", "def wrap_title(title, mpl_layout):\n fig = mpl_layout.canvas.figure\n ax = fig.axes[0]\n ext_pixels = ax.get_window_extent()\n ext_inches = ext_pixels.transformed(fig.dpi_scale_trans.inverted())\n magic_number = 10\n letters_per_line = int(ext_inches.width * magic_number)\n title_wrapped = '\\n'.join(textwrap.wrap(title, letters_per_line))\n return title_wrapped", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title} :\" + s\n\n return s", "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "def get_title(text, uuid=None):\n if uuid is not None:\n text += get_provenance_link(uuid)\n title = pn.Row(pn.pane.HTML('<h2>{}</h2>'.format(text)), align='start')\n\n return title", "def print_title(title):\n print \"\\n\"+\"#\"*32+\"\\n# \"+title+\"\\n\"+\"#\"*32+\"\\n\"", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''", "def transform(text: str) -> str:\n return text.title()", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n # add prefix/suffix within the executable path:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' %\n (escape(path, treedoc.escDict), altText or url))\n return u'<br />'.join(results)", "def complete_alt_title(self, obj):\n return str(obj)", "def clean_title(\r\n title: str,\r\n mode: Literal[\"soft\", \"hard\", \"safe\"],\r\n allow_dot: bool = False,\r\n n: Optional[int] = None,\r\n) -> str:\r\n ...", "def text(self) -> str:", "def get_title(self):\n\n title = ''\n doc = self.article.doc\n\n title_element = self.parser.getElementsByTag(doc, tag='title')\n # no title found\n if title_element is None or len(title_element) == 0:\n return title\n\n # title elem found\n title_text = self.parser.getText(title_element[0])\n used_delimeter = False\n\n # split title with |\n if '|' in title_text:\n title_text = self.split_title(title_text, PIPE_SPLITTER)\n used_delimeter = True\n\n # split title with -\n if not used_delimeter and '-' in title_text:\n title_text = self.split_title(title_text, DASH_SPLITTER)\n used_delimeter = True\n\n # split title with »\n if not used_delimeter and u'»' in title_text:\n title_text = self.split_title(title_text, ARROWS_SPLITTER)\n used_delimeter = True\n\n # split title with :\n if not used_delimeter and ':' in title_text:\n title_text = self.split_title(title_text, COLON_SPLITTER)\n used_delimeter = True\n\n title = MOTLEY_REPLACEMENT.replaceAll(title_text)\n return title", "def print_title(title):\n\n print(\"\\n\" + title)\n print(\"=\" * len(title))", "def format_rich_quote(rich_text_quote):\n rich_text = format_rich_text(rich_text_quote)\n return \"> \" + \"\\n> \".join(rich_text.split(\"\\n\")) + \"\\n\"", "def SearchableText(self):\n ctool = getToolByName(self, 'portal_cpscalendar')\n if getattr(ctool, 'event_fulltext_index', False):\n return '%s %s' % (self.title, self.description)\n return ''", "def __str__(self):\n t = Template(\n \"\"\"\n <h4>$title</h4>\n $imgs\n $footnotes\n <hr/>\"\"\")\n # Return result.\n return t.substitute({\n \"title\": self.title,\n \"imgs\": self.render_images(),\n \"footnotes\": self.render_footnotes()\n })", "def visit_title_reference(self, node):\n self.body.append('\\\\emph{\\\\textbf{')", "def render(resolve_unicode,\n title_force_uppercase,\n msdos_eol_style,\n output_encoding,\n omit_fields=[]):", "def inject_title(self,template,title):\n return re.sub('TITLE',title,template)", "def format_screen(self,str):\n # Paragraph continue\n par_re = re.compile(r'\\\\$',re.MULTILINE)\n str = par_re.sub('',str)\n return str", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def title_content(label=\"A title\"):\n return {'label':label}", "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def title_p(self):\n self.run_command('title_p')", "def title(self):\n # Use the first line of the articles text as title, if not title\n # exists.\n title = self._text[:min(32, self._text.find(\"\\n\"))]\n return title", "def numbered_title(self):\n return f\"{self.title}\"", "def numbered_title(self):\n return f\"{self.title}\"", "def render_text(self):\n if self.text_type == 1:\n return markdown.markdown(self.text)\n else:\n return self.text", "def dumps(self):\n\n if not self.numbering:\n num = '*'\n else:\n num = ''\n\n string = Command(self.latex_name + num, self.title).dumps()\n string += '%\\n' + self.dumps_content()\n\n return string", "def processTitle(title):\n\n cleaned = re.sub(r'[@#\\\"]+', '', title.lower().strip())\n cleaned = re.sub(r'\\(\\d{4}.*\\)', '', cleaned)\n cleaned = re.sub(r':.+', '', cleaned).strip()\n return cleaned", "def show(self) -> str:\n return f'[{self.font}]{self.text}[{self.font}]' if self.font else self.text" ]
[ "0.67517006", "0.6623557", "0.64947814", "0.6347113", "0.6307539", "0.621596", "0.6210496", "0.60684896", "0.60674477", "0.60663515", "0.60421175", "0.6019259", "0.59935653", "0.59802073", "0.59790826", "0.595393", "0.5948588", "0.5939195", "0.590317", "0.5872387", "0.58521676", "0.5838757", "0.5835408", "0.5834278", "0.5832544", "0.58303535", "0.58232164", "0.58196765", "0.5818879", "0.581837", "0.58134586", "0.57893336", "0.5777435", "0.5773666", "0.5759935", "0.57562524", "0.57514244", "0.5736761", "0.5721786", "0.57156", "0.5693657", "0.56579095", "0.56524575", "0.56516933", "0.56416726", "0.5639766", "0.5630319", "0.56235963", "0.5607828", "0.55989367", "0.5597865", "0.5593643", "0.55868447", "0.5576239", "0.55753696", "0.5570099", "0.556155", "0.55568874", "0.55474097", "0.5539662", "0.5532411", "0.5531814", "0.5512975", "0.5479672", "0.54774815", "0.54768354", "0.5473451", "0.54682344", "0.5464578", "0.54521894", "0.5445922", "0.5437787", "0.54369724", "0.5422958", "0.5415149", "0.5415149", "0.5399354", "0.539413", "0.53890395", "0.5382889", "0.5382856", "0.53564143", "0.535306", "0.53529805", "0.5352455", "0.5347083", "0.5333787", "0.5333257", "0.5332394", "0.5331696", "0.53306514", "0.53304696", "0.53293514", "0.5327383", "0.53269297", "0.53269297", "0.53238297", "0.53169096", "0.5314785", "0.5314103" ]
0.58123326
31
Return tuple of text in edit format and bool validity, using edit format option
def formatEditText(self, storedText): if storedText in self.formatList: return (storedText, True) return (storedText, not storedText)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def reformat(ctx):\n pass", "def task_format_check():\n return {\"actions\": [[\"black\", HERE, \"--check\"]], \"verbosity\": 1}", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def formatted(s):\n matches = re.findall(_format_re, normalize(s))\n if len(matches) == 1 and matches[0][0] != '':\n return matches[0][0]\n def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):\n if txt_none != '':\n return FORMAT_NONE, txt_none\n elif txt_sw != '':\n return FORMAT_SW, txt_sw\n elif txt_rem != '':\n return FORMAT_REM, txt_rem\n elif txt_em != '':\n return FORMAT_EM, txt_em\n elif txt_a != '':\n return FORMAT_A, txt_a\n return [to_fmt(*m) for m in matches]", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def test_format_permissions_and_docstring(self):\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\"],\n {\"some\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"permission formatted string\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\"\n ),\n )\n\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\", \"another permission\"],\n {\"some\": \"docstring\", \"another\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"- permission formatted string\\n\"\n \"- another permission\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\\n\"\n \"- **another** : docstring\"\n ),\n )", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def text(value):\n return True", "def test_is_entry_formatted(self):\n\n valid_formats = test_case_data.get('valid_formats')\n for i, valid_entry in enumerate(test_case_data.get('valid_entries')):\n entry = [value.strip() for value in valid_entry.split(',')]\n format_fields = valid_formats[i].split(',')\n valid = self.parser._is_entry_formatted(entry, format_fields)\n self.assertTrue(valid, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [value.strip() for value in invalid_entry.split(',')]\n for f in valid_formats:\n format_fields = f.split(',')\n entry_dict = self.parser._is_entry_formatted(entry, format_fields)\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def _check_style(file_path, clang_format_bin):\n with open(file_path, 'r') as f:\n is_valid_header = f.read().startswith(CppFormatter.standard_header)\n\n cmd = [\n clang_format_bin,\n \"-style=file\",\n \"-output-replacements-xml\",\n file_path,\n ]\n result = subprocess.check_output(cmd).decode(\"utf-8\")\n if \"<replacement \" in result:\n is_valid_style = False\n else:\n is_valid_style = True\n return (is_valid_style, is_valid_header)", "def info(level):\n if level == 'basic':\n string = _(\"Basic markup\")\n text = _(\"Only basic text tags are available in this input field.\")\n elif level == 'rich':\n string = _(\"Rich markup\")\n text = _(\"Rich and basic text tags are available in this input field.\") \n elif level == 'full':\n string = _(\"Full markup\")\n text = _(\"Every tags are available in this input field.\") \n elif level == 'none':\n string = _(\"No markup\")\n text = _(\"No tags are available in this input field.\") \n\n if level != 'none':\n text = text + \" \" + _(\"Check the markup reminder in related documentation for a description of these tags.\")\n\n return '<span class=\"help\" title=' + quoteattr(text) \\\n + '><img src=\"' + settings.STATIC_MEDIA_URL \\\n + 'images/savane/common/misc.default/edit.png' \\\n + ' border=\"0\" class=\"icon\" alt=\"\" />' \\\n + string + '</span>'", "def human_readable(self):\n if self.no_flags_set():\n return \"no flags set\"\n else:\n flag_desc = []\n for name in (\"bookmarked\", \"for_validation\", \"candidate\", \"final causative\"):\n if getattr(self, \"flag_%s\" % name.replace(\" \", \"_\")):\n flag_desc.append(name)\n for name in (\"visual\", \"validation\", \"molecular\", \"phenotype_match\", \"summary\"):\n field = getattr(self, \"flag_%s\" % name)\n if field and field != \"empty\":\n flag_desc.append(\"%s rating is %s\" % (name.split(\"_\")[0], field))\n return \", \".join(flag_desc)", "def fancyString(inVal, correctOutput, funcOutput):\r\n checkCorrect = \"Correct = \" + u'\\u2713'*(funcOutput == correctOutput) + 'X'*(funcOutput != correctOutput)\r\n # Check mark code from site below:\r\n # https://stackoverflow.com/questions/16676101/print-the-approval-sign-check-mark-u2713-in-python\r\n return \"Input(s) = {:<15} Output = {:<25} Your Output = {:<35} \".format(str(inVal), str(correctOutput), str(funcOutput)) + checkCorrect", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))", "def parse_edits(line):\n \n # Get edits inside brackets allowing parentheses\n edit_strings = re.findall(r'\\((.*?)\\)[,\\]]', line)\n \n edits = []\n for edit_string in edit_strings:\n # Splitting by comma is not enough. Some of the edits actually fix spacing\n # when commas are used, so we may can't use edit_string.split(', ')\n # For some reason, the unicode gold strings are enclosed in a list\n m = re.match(r'^(\\d+), (\\d+), (.*), \\[?(.*)\\]?$', edit_string)\n edit_items = [m.group(i) for i in range(1, 5)]\n \n # No way to handle this in regex\n if edit_items[3].endswith(']'):\n edit_items[3] = edit_items[3][:-1]\n \n # Cast the indices\n edit_items[0] = int(edit_items[0])\n edit_items[1] = int(edit_items[1])\n \n # Convert unicode-string-inside-string into actual unicode string\n edit_items[2] = codecs.decode(edit_items[2][2:-1], 'unicode_escape') or 'NIL'\n edit_items[3] = codecs.decode(edit_items[3][2:-1], 'unicode_escape') or 'NIL'\n \n edits.append(edit_items)\n \n return edits", "def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text", "def show_line_edit_dialog(text):\n dialog = QDialog()\n interface = lineEditEntryGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True, str(interface.lineEdit.text())\n else:\n return False, \"\"", "def as_tuple(self, selected: bool) -> tuple:\n if self.check_ans:\n current = self.num.ljust(2) + self.answer\n else:\n current = self.num.ljust(2)+self.current.rjust(1)\n return ((\n \"fg:\"+self.style_fg + \" bg:\" # apply foreground color\n + (self.style_bg_selected if selected else self.style_bg),\n current\n if self.answer != \".\" else \"▐\" + \"█\" + \"▌\"), ('bg:#fefefe fg:#000', \"│\"))", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def hints(s):\n if s == 'hello':\n # string, color, bold\n return (' World', 35, False)\n return None", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def FormatYesNo(value):\n if value:\n return u'Yes'\n else:\n return u'No'", "def get_data_from_nonformat_text():\n pass", "def validateWorkFormat(format):\n\n if not(format):\n return \"You must select a work format.\"", "def rich(text):\n return full(text, False)", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def change_prompt_format(self, arg, **_):\n if not arg:\n message = 'Missing required argument, format.'\n return [(None, None, None, message)]\n\n self.prompt_format = self.get_prompt(arg)\n return [(None, None, None, \"Changed prompt format to %s\" % arg)]", "def test_command_edit_info_boolean_flags():\n def f(inputfile):\n with tempfile.NamedTemporaryFile() as tmp:\n shutil.copy(inputfile, tmp.name)\n\n for flag in (\"write_protected\", \"synchronized\", \"cleaned\"):\n for true_value, false_value in ((\"1\", \"0\"),\n (\"yes\", \"no\"),\n (\"YES\", \"No\"),\n (\"true\", \"false\"),\n (\"tRuE\", \"FaLsE\")):\n wozardry.parse_args([\"edit\", \"-i\", \"%s:%s\" % (flag, true_value), tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[flag] == True\n wozardry.parse_args([\"edit\", \"-i\", \"%s:%s\" % (flag, false_value), tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[flag] == False\n f(kValid1)\n f(kValid2)", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def get_help_text(self):\n requirements = \"Your password must contain at least: {min_length} \" \\\n \"character(s), {min_uppercase} uppercase letter(s), \" \\\n \"{min_lowercase} lowercase letter(s) and \" \\\n \"{min_digits} digit(s). \".format(\n min_length=self.min_length,\n min_uppercase=self.min_uppercase,\n min_lowercase=self.min_lowercase,\n min_digits=self.min_digits)\n return requirements", "def help_for(self, flag: str) -> Tuple[str, str]:\n # Obtain arg obj\n if flag not in self.flags:\n err = \"{!r} is not a valid flag for this context! Valid flags are: {!r}\" # noqa\n raise ValueError(err.format(flag, self.flags.keys()))\n arg = self.flags[flag]\n # Determine expected value type, if any\n value = {str: \"STRING\", int: \"INT\"}.get(arg.kind)\n # Format & go\n full_names = []\n for name in self.names_for(flag):\n if value:\n # Short flags are -f VAL, long are --foo=VAL\n # When optional, also, -f [VAL] and --foo[=VAL]\n if len(name.strip(\"-\")) == 1:\n value_ = (\"[{}]\".format(value)) if arg.optional else value\n valuestr = \" {}\".format(value_)\n else:\n valuestr = \"={}\".format(value)\n if arg.optional:\n valuestr = \"[{}]\".format(valuestr)\n else:\n # no value => boolean\n # check for inverse\n if name in self.inverse_flags.values():\n name = \"--[no-]{}\".format(name[2:])\n\n valuestr = \"\"\n # Tack together\n full_names.append(name + valuestr)\n namestr = \", \".join(sorted(full_names, key=len))\n helpstr = arg.help or \"\"\n return namestr, helpstr", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def __check_format(node, lint_ctx, profile: str, allow_ext=False):\n if \"format_source\" in node.attrib and (\"ext\" in node.attrib or \"format\" in node.attrib):\n lint_ctx.warn(\n f\"Tool {node.tag} output '{node.attrib.get('name', 'with missing name')}' should use either format_source or format/ext\",\n node=node,\n )\n if \"format_source\" in node.attrib:\n return True\n if node.find(\".//action[@type='format']\") is not None:\n return True\n # if allowed (e.g. for discover_datasets), ext takes precedence over format\n fmt = None\n if allow_ext:\n fmt = node.attrib.get(\"ext\")\n if fmt is None:\n fmt = node.attrib.get(\"format\")\n if fmt == \"input\":\n message = f\"Using format='input' on {node.tag} is deprecated. Use the format_source attribute.\"\n if Version(str(profile)) <= Version(\"16.01\"):\n lint_ctx.warn(message, node=node)\n else:\n lint_ctx.error(message, node=node)\n\n return fmt is not None", "def get_opt_formatted(self, command):\n if \"formatted\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"formatted\"]\n else:\n return CommandDict.DEFAULT_OPT_FORMATTED", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def verify_diagnostics_and_usage_text():\r\n msg, status = \"\", True\r\n try:\r\n sleep(10)\r\n if g.platform == 'android':\r\n sleep(3)\r\n 'verify end user license agreement label'\r\n flag1,msg = element_textvalidation('Diagnostics_usage_lbl','Diagnostics and Usage')\r\n sleep(4) \r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_Diagnostcs_nd_usage_txt)\r\n 'verify end user license agreement label'\r\n flag2,msg = element_textvalidation('Diagnostics_usage_txt',text_to_verify)\r\n \r\n flag = False if not (flag1 and flag2) else True\r\n else:\r\n\r\n \r\n 'Verify diagnostics usage text'\r\n flag1 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_lbl'))\r\n 'Verify diagnostics usage text'\r\n flag2 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_txt'))\r\n \r\n sleep(4) \r\n \r\n flag = False if not (flag1 and flag2) else True\r\n if not flag:\r\n return False, msg\r\n else:\r\n print \"License agreement screen name is displayed properly\"\r\n \r\n \r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return False, msg\r\n return True, msg", "def _format_action(self, action):\n parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)\n if action.nargs == argparse.PARSER:\n parts = \"\\n\".join(parts.split(\"\\n\")[1:])\n return parts", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def _engine_option_string_and_comment(option: engine.Option, value: engine.ConfigValue) -> Tuple[str, str]:\n if value is None:\n value = ''\n name_equals_val = f'{option.name}={value}'\n if option.type == 'check' or option.type == 'string' or option.type == 'button':\n return (name_equals_val, f'type={option.type}')\n if option.type == 'spin':\n return (name_equals_val, f'type=spin, min={option.min}, max={option.max}')\n if option.type == 'combo':\n return (name_equals_val, f'type=combo, var={option.var}')\n return (name_equals_val, 'type=unknown')", "def TEXT(number, format_type):\n raise NotImplementedError()", "def __verify_plot_options(self, options_str):\n default_line = '-'\n default_marker = ''\n default_colour = 'k'\n\n # Split str into chars list\n options_split = list(options_str)\n\n # If 0, set defaults and return early\n if len(options_split) == 0:\n return [default_line, default_marker, default_colour]\n\n # If line_style given, join the first two options if applicable\n # (some types have 2 characters)\n for char in range(0, len(options_split) - 1):\n # If char is '-' (only leading character in double length option)\n if options_split[char] == '-' and len(options_split) > 1:\n # If one of the leading characters is valid\n if options_split[char + 1] == '-' or \\\n options_split[char + 1] == '.':\n # Join the two into the first\n options_split[char] = options_split[char] \\\n + options_split[char + 1]\n # Shuffle down the rest\n for idx in range(char + 2, len(options_split)):\n options_split[idx - 1] = options_split[idx]\n # Remove duplicate extra\n options_split.pop()\n\n # If any unknown, throw error\n for option in options_split:\n if option not in self.__line_styles and \\\n option not in self.__marker_styles and \\\n option not in self.__colour_styles:\n error_string = \"Unknown character entered: '{0}'\"\n raise ValueError(error_string.format(option))\n\n ##############################\n # Verify Line Style\n ##############################\n line_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n line_style_index = 0\n for option in options_split:\n if option in self.__line_styles:\n line_style_count = line_style_count + 1\n line_style_index = self.__line_styles.index(option)\n\n # If more than one, throw error\n if line_style_count > 1:\n raise ValueError(\n \"Too many line style arguments given. Only one allowed\")\n # If none, set as solid\n elif line_style_count == 0 or not any(\n item in options_split for item in self.__line_styles):\n output_line = default_line\n # If one, set as given\n else:\n output_line = self.__line_styles[line_style_index]\n ##############################\n\n ##############################\n # Verify Marker Style\n ##############################\n marker_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n marker_style_index = 0\n for option in options_split:\n if option in self.__marker_styles:\n marker_style_count = marker_style_count + 1\n marker_style_index = self.__marker_styles.index(option)\n\n # If more than one, throw error\n if marker_style_count > 1:\n raise ValueError(\n \"Too many marker style arguments given. Only one allowed\")\n # If none, set as no-marker\n elif marker_style_count == 0 or not any(\n item in options_split for item in self.__marker_styles):\n output_marker = default_marker\n # If one, set as given\n else:\n output_marker = self.__marker_styles[marker_style_index]\n # If marker set and no line given, turn line to no-line\n if line_style_count == 0 or not any(\n item in options_split for item in self.__line_styles):\n output_line = ''\n ##############################\n\n ##############################\n # Verify Colour Style\n ##############################\n colour_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n colour_style_index = 0\n for option in options_split:\n if option in self.__colour_styles:\n colour_style_count = colour_style_count + 1\n colour_style_index = self.__colour_styles.index(option)\n\n # If more than one, throw error\n if colour_style_count > 1:\n raise ValueError(\n \"Too many colour style arguments given. Only one allowed\")\n # If none, set as black\n elif colour_style_count == 0 or not any(\n item in options_split for item in self.__colour_styles):\n output_colour = default_colour\n # If one, set as given\n else:\n output_colour = self.__colour_styles[colour_style_index]\n ##############################\n\n return [output_line, output_marker, output_colour]", "def test_form_help_text_is_correct(self):\n # https://stackoverflow.com/questions/24344981/how-to-change-help-\n # text-of-a-django-form-field\n\n # Above link helped figure out how to access help_text.\n self.assertEqual(\n self.form.fields[\"texture\"].help_text,\n \"One word descriptions seperated by commas.\",\n )", "def reformat():\n toolkit.reformat()", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def validate_format(self):\n raise NotImplementedError()", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def _validate_performatives(performative: str) -> Tuple[bool, str]:\n # check performative is not a reserved name\n if _is_reserved_name(performative):\n return (\n False,\n \"Invalid name for performative '{}'. This name is reserved.\".format(\n performative,\n ),\n )\n\n # check performative's format\n if not _is_valid_regex(PERFORMATIVE_REGEX_PATTERN, performative):\n return (\n False,\n \"Invalid name for performative '{}'. Performative names must match the following regular expression: {} \".format(\n performative, PERFORMATIVE_REGEX_PATTERN\n ),\n )\n\n return True, \"Performative '{}' is valid.\".format(performative)", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def _format (color, style=''):\n _format = QtGui.QTextCharFormat()\n if color != '':\n _format.setForeground(getattr(QtCore.Qt, color))\n if 'bold' in style:\n _format.setFontWeight(QtGui.QFont.Bold)\n if 'italic' in style:\n _format.setFontItalic(True)\n return _format", "def get_replacement():\n run_linter_throw(\"path/to/file\",\n \"{s}\\n{m} Text{e}\",\n style,\n whitelist=[\"headerblock/desc_space\"])", "def edit_form_entry_help_text_extra(cls):\n return \"\"\"\n <ul class=\"{container_class}\">\n {edit_option_html}\n <li><a href=\"{delete_url}\">\n <span class=\"{delete_option_class}\"></span> {delete_text}</a>\n </li>\n </ul>\n <input type=\"hidden\" value=\"{form_element_position}\"\n name=\"form-{counter}-position\"\n id=\"id_form-{counter}-position\"\n class=\"form-element-position\">\n <input type=\"hidden\" value=\"{form_element_pk}\"\n name=\"form-{counter}-id\" id=\"id_form-{counter}-id\">\n \"\"\".format(\n container_class=cls.form_list_container_class,\n edit_option_html=\"{edit_option_html}\",\n delete_url=\"{delete_url}\",\n delete_option_class=cls.form_delete_form_entry_option_class,\n delete_text=\"{delete_text}\",\n form_element_position=\"{form_element_position}\",\n counter=\"{counter}\",\n form_element_pk=\"{form_element_pk}\",\n )", "def extension (formatStr):\n assert False, \"TODO:\"", "def _generateReadOnly(self, obj, **args):\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'readonly'\n if self._script.utilities.isReadOnlyTextArea(obj):\n result.append(self._script.formatting.getString(**args))\n return result", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def is_valid(self):\r\n for lineedit in self.lineedits:\r\n if lineedit in self.validate_data and lineedit.isEnabled():\r\n validator, invalid_msg = self.validate_data[lineedit]\r\n text = to_text_string(lineedit.text())\r\n if not validator(text):\r\n QMessageBox.critical(self, self.get_name(),\r\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\r\n QMessageBox.Ok)\r\n return False\r\n return True", "def test_incorrect_format_1(self):\n changelog = changelog_temp.format(\n before_changelog=\"## [Not yet released]\\n\\n### Added\\n\\n- Added a new feature\\n\"\n )\n with pytest.raises(ValueError):\n parse_changelog(changelog)", "def test_match_entry_to_format(self):\n\n # matches valid entries with valid formats\n for valid_entry in test_case_data.get('valid_entries'):\n entry = [e.strip() for e in valid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertTrue(entry_dict, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [e.strip() for e in invalid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def is_text_editable(path):\n return False", "def _format_answer(self, text):\n text = str(text).replace('\\n', ' ')\n answer_width = 70\n pretty_text = '\\n\\t'.join(textwrap.wrap(text, answer_width))\n\n return pretty_text", "def flags(self, f):\n if f.is_inlined:\n return \" (inlined)\"\n return \"\"", "def text_to_display(level):\n if level == \"html\":\n return html_answers, html_text\n elif level == \"css\":\n return css_answers, css_text\n elif level == \"python\":\n return python_answers, python_text", "def editorForTyp(typ):\n\n if typ == \"quint32\":\n return (\"QSpinBox\", \"setValue\", \"value\")\n elif typ == \"QString\":\n return (\"QLineEdit\", \"setText\", \"text\")\n elif typ == \"bool\":\n return (\"QCheckBox\", \"setChecked\", \"isChecked\")\n return (None, None, None)", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def show_fields(*fields):\n\n fields = filter( lambda x: x, fields )\n target_len = max( len(name) for name, value in fields ) + 2\n for name, value in fields:\n line = name + ':' + \" \" * (target_len - len(name))\n if type(value) == bool:\n line += color_text(\"Yes\", 'green') if value else color_text(\"No\", 'red')\n else:\n line += str(value)\n print line", "def testCheckRequiredFormat(self):\n plugin = gdrive_synclog.GoogleDriveSyncLogTextPlugin()\n\n file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()\n file_system_builder.AddFile('/file.txt', (\n b'2018-01-24 18:25:08,454 -0800 INFO pid=2376 7780:MainThread '\n b'logging_config.py:295 OS: Windows/6.1-SP1\\n'))\n\n file_entry = file_system_builder.file_system.GetFileEntryByPath('/file.txt')\n\n parser_mediator = self._CreateParserMediator(None, file_entry=file_entry)\n\n file_object = file_entry.GetFileObject()\n text_reader = text_parser.EncodedTextReader(file_object)\n text_reader.ReadLines()\n\n result = plugin.CheckRequiredFormat(parser_mediator, text_reader)\n self.assertTrue(result)", "def formatted(self) -> str:\r\n ...", "def ISTEXT(value):\n return isinstance(value, (basestring, AltText))", "def text_editor():\n return True", "def validate(self, txt, pos):\n state, rpos = qt.QDoubleValidator.validate(self, txt, pos)\n if txt.length() == 0:\n state = qt.QValidator.Acceptable\n return state, rpos", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text" ]
[ "0.74414396", "0.73706305", "0.7158762", "0.7101311", "0.7101311", "0.70359075", "0.7028627", "0.655066", "0.6453073", "0.6392096", "0.63097495", "0.63002694", "0.61881006", "0.60249096", "0.58351296", "0.5770396", "0.5735489", "0.56134075", "0.5609827", "0.5604693", "0.55462486", "0.5309045", "0.52991766", "0.5209923", "0.52029717", "0.52029717", "0.52029717", "0.52029717", "0.52029717", "0.51995766", "0.5173284", "0.5163057", "0.51601666", "0.5118592", "0.51145864", "0.5095493", "0.50919986", "0.507758", "0.5076713", "0.5066263", "0.505785", "0.5048219", "0.5010526", "0.50008905", "0.49893162", "0.497208", "0.49717423", "0.49444363", "0.4933414", "0.49300218", "0.49295422", "0.49209118", "0.4898785", "0.48946583", "0.4889563", "0.48859346", "0.48719627", "0.48597744", "0.48540577", "0.48528767", "0.48496515", "0.48474717", "0.484253", "0.48417863", "0.4838876", "0.48276186", "0.4824091", "0.48239785", "0.4821053", "0.48203927", "0.48191538", "0.48187387", "0.4813598", "0.48087016", "0.48079142", "0.48021486", "0.47995895", "0.47952986", "0.47923297", "0.47902", "0.4777973", "0.4775352", "0.47560427", "0.47556764", "0.47520024", "0.47499168", "0.47496396", "0.4736746", "0.47359556", "0.47356373", "0.4734012", "0.47335908", "0.47326112", "0.47288954", "0.4723627", "0.47181588", "0.4714406", "0.47135657", "0.47128206", "0.47116646" ]
0.68839306
7
Return tuple of stored text from edited text and bool validity, using edit format option
def storedText(self, editText): if editText in self.formatList: return (editText, True) return (editText, not editText and not self.isRequired)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def get_data_from_nonformat_text():\n pass", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def on_edit(self, event, text):\n return None", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text", "def reformat(ctx):\n pass", "def main(text,result=\"latex\",check_text=True,index_project='',file_name=''): # TODO detect and make a warning for genuine latex marks\n if isinstance(text,str):\n text = text.split('\\n')\n \n if check_text:\n check(text)\n \n print(text)\n \n ### managing placeholders\n text = parsers['v'].main(text)\n \n ### saving names\n if index_project:\n indexer.parse(text,index_project,file_name)\n \n \n for i in range(len(text)):\n line = text[i]\n ### managing end of line\n line = line.replace(\" ,,\",\"\\\\\\\\\")\n \n while line.count(opening_mark):\n first_part, mark, late_part = line.partition(',;')\n if not late_part:\n break\n late_part, text = parsers[late_part[0]].main(late_part = late_part,\n text=text,\n result=result,\n line_nb = i)\n line = first_part + late_part\n text[i] = line\n \n return '\\n'.join(text)", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def get_edit_text(self):\n # grab edit page\n response = self._get_page('edit.php')\n html = response.text\n # parse out existing plan\n soup = bs4.BeautifulSoup(html, 'html5lib')\n plan = soup.find('textarea')\n if plan is None:\n raise PlansError(\"Couldn't get edit text, are we logged in?\")\n else:\n plan = u'' + plan.contents[0]\n # prepending the empty string somehow prevents BS from\n # escaping all the HTML characters (weird)\n assert type(plan) == str\n # convert to CRLF line endings\n plan = convert_endings(plan, 'CRLF')\n # parse out plan md5\n md5sum = soup.find('input',\n attrs={'name': 'edit_text_md5'}).attrs['value']\n # also, explicitly compute the hash, for kicks\n assert md5sum == plans_md5(plan)\n # verify that username has not changed\n assert self.username == self.parser.username\n return plan, md5sum", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def parse_edits(line):\n \n # Get edits inside brackets allowing parentheses\n edit_strings = re.findall(r'\\((.*?)\\)[,\\]]', line)\n \n edits = []\n for edit_string in edit_strings:\n # Splitting by comma is not enough. Some of the edits actually fix spacing\n # when commas are used, so we may can't use edit_string.split(', ')\n # For some reason, the unicode gold strings are enclosed in a list\n m = re.match(r'^(\\d+), (\\d+), (.*), \\[?(.*)\\]?$', edit_string)\n edit_items = [m.group(i) for i in range(1, 5)]\n \n # No way to handle this in regex\n if edit_items[3].endswith(']'):\n edit_items[3] = edit_items[3][:-1]\n \n # Cast the indices\n edit_items[0] = int(edit_items[0])\n edit_items[1] = int(edit_items[1])\n \n # Convert unicode-string-inside-string into actual unicode string\n edit_items[2] = codecs.decode(edit_items[2][2:-1], 'unicode_escape') or 'NIL'\n edit_items[3] = codecs.decode(edit_items[3][2:-1], 'unicode_escape') or 'NIL'\n \n edits.append(edit_items)\n \n return edits", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_edits(text):\n edit_p = re.compile(\"(?P<open><edit.*?>)(?P<inner>.*?)(?P<close></edit>)\")\n corr_p = re.compile(\"<corrections>.*?</corrections>\")\n edits = []\n\n offset = 0\n\n for m in re.finditer(edit_p, text):\n # Make an edit object\n edit_text = \"\".join(m.groups())\n edit = ET.XML(m.group(0))\n\n # Set the bounds of the original text and adjust offset\n inner_string = m.group('inner') \n start = m.start() - offset\n corr_m = re.search(corr_p, inner_string)\n \n if corr_m: # Replacement/insertion have a correction\n offset += len(corr_m.group(0)) \n \n if not inner_string.startswith(\"<empty/>\"):\n end = start + corr_m.start()\n else:\n offset += len(\"<empty/>\") # It is \"\" in plain text\n end = start\n else:\n # Deletions may not have a correction\n if not inner_string.startswith(\"<empty/>\"):\n end = start + len(inner_string)\n else: # Unspecified error <empty/> is \"\" in plain text\n end = start\n offset += len(inner_string)\n\n\n edit.set(\"start\", \"%d\" % start) \n edit.set(\"end\", \"%d\" % end)\n\n offset += len(m.group('open')) + len(m.group('close'))\n \n\n # Make the original text a subelement of <edit>\n # Original text may be a string or <empty/> element.\n original = ET.SubElement(edit, \"original\")\n \n if edit.text:\n original.text = edit.text\n edit.text = \"\"\n else:\n empty = edit.find('empty')\n \n try:\n edit.remove(empty)\n original.append(empty)\n except Exception as e:\n pass\n \n edits.append(edit)\n\n return edits", "def refang(self, text: str):", "def edit_text(self, _, val):\n t_edit = text_editor.TextEditor(val, \"value\")\n t_edit.execute = self.execute", "def handle_data(self, text):\n if self.bankacctfrom:\n if self.bankid:\n self.compte['banque'] = text.strip()\n self.bankid = False\n if self.branchid:\n self.compte['guichet'] = text.strip()\n self.branchid = False\n if self.acctid:\n self.compte['compte'] = text.strip()\n self.acctid = False\n if self.banktranlist:\n if self.stmttrn:\n if self.dtposted:\n self.ecriture_tmp['date'] = datetime.strptime(text.strip(), \"%Y%m%d\")\n self.dtposted = False\n if self.trnamt:\n self.ecriture_tmp['montant'] = locale.atof(text.strip())\n self.trnamt = False\n if self.trntype:\n self.ecriture_tmp['type'] = text.strip()\n self.trntype = False\n if self.name:\n self.ecriture_tmp['name'] = text.strip()\n self.name = False\n if self.memo:\n self.ecriture_tmp['memo'] = text.strip()\n self.memo = False", "def get_mark(text, short):\n\n line = text.readline()\n\n # check that the line begins with a valid entry type\n if not short and not re.match(r'^\\s*(text|mark) = \"', line):\n raise ValueError('Bad entry: ' + line)\n\n # read until the number of double-quotes is even\n while line.count('\"') % 2:\n next_line = text.readline()\n\n if not next_line:\n raise EOFError('Bad entry: ' + line[:20] + '...')\n\n line += next_line\n if short:\n pattern = r'^\"(.*?)\"\\s*$'\n else:\n pattern = r'^\\s*(text|mark) = \"(.*?)\"\\s*$'\n entry = re.match(pattern, line, re.DOTALL)\n\n return entry.groups()[-1].replace('\"\"', '\"')", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def getText(self):", "def get_text(text_input):\r\n return text_input", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def rich(text):\n return full(text, False)", "def text(value):\n return True", "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def get_text_editor_input(initial_msg):\n EDITOR = os.environ.get('EDITOR', 'vi')\n CROP_MARK = ('\\n\\nAnything above this line will be ignored:\\n' +\n ('-' * 34) + '>8' + ('-' * 34) + '\\n')\n\n wrapper = TextWrapper(replace_whitespace=False, drop_whitespace=False)\n initial_msg = '\\n'.join(wrapper.wrap(initial_msg))\n initial_msg += CROP_MARK\n\n with tempfile.NamedTemporaryFile(suffix='.md') as temp:\n temp.write(initial_msg.encode('utf-8'))\n temp.flush() # Write buffer to the file\n subprocess.call([EDITOR, temp.name])\n\n # The pointer was already after the initial message, but we return to\n # the beginning just in case the user added content before the mark\n temp.seek(0)\n return temp.read().decode('utf-8').split(CROP_MARK, 1)[1].strip()", "def edit():", "def text_example():\n \n text_store = \"01000001011000010010000001000010011000100000110100001010001100010011001000110011\"\n text.delete('1.0', tk.END) \n text.insert(tk.END, text_store) \n box=tk.Tk()\n m = tk.Message(box, text=\"You should be able to save this file and open it in a text editor like Notepad or Nano to read it. If you edit the values you may find it does not display properly as text. Unchanged, it should be interpreted by a text editor as:\\n\\nAa Bb\\n123\\n\\nAs the file was made on a Windows machines you may find other systems display the line breaks differently.\")\n m.config(padx=50, pady=50, width=350)\n m.pack()", "def is_text_editable(path):\n return False", "def show_line_edit_dialog(text):\n dialog = QDialog()\n interface = lineEditEntryGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True, str(interface.lineEdit.text())\n else:\n return False, \"\"", "def on_text(self, instance, value):\n if not EVENTS['IS_OBJ']:\n EVENTS['EDITOR_SAVED'] = False\n\n if value:\n self.valid_text = True\n EVENTS['IS_RAM_EMPTY'] = False\n else:\n self.valid_text = False", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def edit_once(self, text):\n return self._edit_engine(text, break_on_success=True)", "def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text", "def edit_google_vision_text(self,text):\n s1=text\n try:\n log_info(\"Correcting google vision text to remove extra spacing\",MODULE_CONTEXT)\n i=0\n while(i<len(text)):\n s1=text\n if text[i] in [\"/\",\"।\",'।' ,':','|',\",\" ,'०',\"]\",\"-\",\")\",\"}\"] and text[i-1]==\" \": \n text=text[:i-1]+text[i:]\n if i > 0 :\n if text[i-1] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i]==\" \":\n text=text[:i]+text[i+1:]\n elif text[i] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i+1]==\" \":\n text=text[:i+1]+text[i+2:]\n i=i+1\n except Exception as e:\n log_exception(\"Exception while correcting google vision text\", MODULE_CONTEXT, e)\n return s1\n return text", "def process_text(self, text, language):", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def get_row_input_text(self, row_idx):\n return self.row_items[row_idx][1].get()", "def updateText(widget,text,format=''):\n # autorecognition\n if format not in ['plain','html','rest']:\n if type(text) is str and text.startswith('..'):\n format = 'rest'\n\n # conversion\n if format == 'rest' and pf.options.rst2html:\n html = utils.rst2html(text)\n if html[:10] == text[:10]:\n #print \"CONVERSION TO HTML FAILED\"\n text += \"\\n\\nNote: This reStructuredText is displayed as plain text because it could not be converted to html. If you install python-docutils, you will see this text (and other pyFormex messages) in a much nicer layout!\\n\"\n else:\n text = html\n\n # We leave the format undefined, because we are not sure\n # that the conversion function (docutils) is available\n # and always produces good results\n format = ''\n\n if format == 'plain':\n widget.setPlainText(text)\n elif format == 'html':\n widget.setHtml(text)\n else:\n # As a last rescue, try QT4's autorecognition\n widget.setText(text)", "def modified_flag(self, event):\n text = self.get_current()\n text.modified = 1", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def get_text_from_editor():\n with tempfile.NamedTemporaryFile(suffix='.tmp', mode='w+t') as f:\n # Create a temporary file with instructions on describing bug\n f.write(message + '\\n\\n')\n f.flush()\n # Open the editor and allow the user to type\n editor = os.environ.get('EDITOR', 'vim')\n subprocess.call([editor, f.name])\n # Read and clean the file\n f.seek(0)\n text = ''.join([line.lstrip() for line in f.readlines()\n if line and not line.lstrip().startswith('#')])\n return '\\n'.join(textwrap.wrap(text, width=100))", "def storeTextEditValue(self):\n\n\t\tcategory, attr = self.getWidgetMeta(self.sender())\n\t\tvalue = self.sender().toPlainText()\n\t\tself.storeValue(category, attr, value)", "def _editorText(self):\n if self.__lineEditKind:\n return self._editor.text()\n else:\n return self._editor.currentText()", "def _hidden_in_unicode(self, txt):", "def fix_document(key, value, _format, _meta):\n if key == \"Link\":\n url = value[2][0]\n if url.startswith(\"user-manual\") or url.startswith(\"developers-guide\"):\n # Return the link text\n return value[1]\n # Reformat the text inside block quotes\n elif key == \"BlockQuote\":\n try:\n first_string = value[0][\"c\"][0][\"c\"]\n if first_string == \"[!NOTE]\":\n value[0][\"c\"][0] = Strong([Str(\"Note:\")])\n return BlockQuote(value)\n elif first_string == \"[!INFO]\":\n value[0][\"c\"][0] = Strong([Str(\"Info:\")])\n return BlockQuote(value)\n elif first_string == \"[!TIP]\":\n value[0][\"c\"][0] = Strong([Str(\"Tip:\")])\n return BlockQuote(value)\n elif first_string == \"[!WARNING]\":\n value[0][\"c\"][0] = Strong([Str(\"Warning:\")])\n return BlockQuote(value)\n elif first_string == \"[!ATTENTION]\":\n value[0][\"c\"][0] = Strong([Str(\"Attention:\")])\n return BlockQuote(value)\n except Exception:\n return\n return", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def edit_type(self, candidate, word):\n edit = [False] * 4\n correct = \"\"\n error = \"\"\n replaced = ''\n replacer = ''\n for i in range(min([len(word), len(candidate)]) - 1):\n if candidate[0:i + 1] != word[0:i + 1]: # inconsistency in the first (i + 1) characters of the two strings\n if candidate[i:] == word[i - 1:]:\n edit[1] = True # deletion\n correct = candidate[i - 1] # candidate[i - 1] is deleted and we get word\n error = ''\n replaced = candidate[i - 2]\n replacer = candidate[i - 2] + candidate[i - 1]\n break\n elif candidate[i:] == word[i + 1:]:\n edit[0] = True # insertion\n correct = ''\n error = word[i] # word[i] is redundant\n if i == 0:\n replacer = '@'\n replaced = '@' + error\n else:\n replacer = word[i - 1]\n replaced = word[i - 1] + error\n break\n if candidate[i + 1:] == word[i + 1:]:\n edit[2] = True # substitution\n correct = candidate[i]\n error = word[i]\n replaced = error\n replacer = correct\n break\n if candidate[i] == word[i + 1] and candidate[i + 2:] == word[i + 2:]:\n edit[3] = True # transposition\n correct = candidate[i] + candidate[i + 1]\n error = word[i] + word[i + 1]\n replaced = error\n replacer = correct\n break\n\n # string inversion\n candidate = candidate[::-1]\n word = word[::-1]\n\n for i in range(min([len(word), len(candidate)]) - 1):\n if candidate[0:i + 1] != word[0:i + 1]:\n if candidate[i:] == word[i - 1:]:\n edit[1] = True\n correct = candidate[i - 1]\n error = ''\n replaced = candidate[i - 2]\n replacer = candidate[i - 2] + candidate[i - 1]\n break\n elif candidate[i:] == word[i + 1:]:\n correct = ''\n error = word[i]\n if i == 0:\n replacer = '@'\n replaced = '@' + error\n else:\n replacer = word[i - 1]\n replaced = word[i - 1] + error\n edit[0] = True\n break\n if candidate[i + 1:] == word[i + 1:]:\n edit[2] = True\n correct = candidate[i]\n error = word[i]\n replaced = error\n replacer = correct\n break\n if candidate[i] == word[i + 1] and candidate[i + 2:] == word[i + 2:]:\n edit[3] = True\n correct = candidate[i] + candidate[i + 1]\n error = word[i] + word[i + 1]\n replaced = error\n replacer = correct\n break\n\n if word == candidate:\n return \"None\", '', '', '', ''\n if edit[0]:\n return EDIT_TYPE_INSERTION, correct, error, replaced, replacer\n elif edit[1]:\n return EDIT_TYPE_DELETION, correct, error, replaced, replacer\n elif edit[2]:\n return EDIT_TYPE_SUBSTITUTION, correct, error, replaced, replacer\n elif edit[3]:\n return EDIT_TYPE_TRANSPOSITION, correct, error, replaced, replacer", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def text_editor():\n return True", "def task_format_check():\n return {\"actions\": [[\"black\", HERE, \"--check\"]], \"verbosity\": 1}", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def stepText2Changed(build, step, text2):", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def retrieve_input():\r\n inputValue = simpleText.get(\"1.0\",\"end-1c\") #Our Variable\r\n #\"1.0\" = start from first character in the text widget\r\n #\"end-1c = delete the last character that Text creates every time\"\r\n return inputValue", "def on_idEdit_textChanged(self, txt):\n self.__generateDefaultCommitMessage()\n self.__updateOK()", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def element_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier))\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def process_raw_text(self, file_name, column_side):\n self.mvc_check()\n\n model_txt = None\n if column_side == LEFT_TEXT:\n model_txt = self.model.txt1\n elif column_side == RIGHT_TEXT:\n model_txt = self.model.txt2\n\n model_txt.open_raw(file_name)\n model_txt.process_raw()\n self.opened_txt[column_side] = True\n self.can_align = self.opened_txt[LEFT_TEXT] and self.opened_txt[RIGHT_TEXT]\n\n # Goldsmith\n model_txt.make_trie(column_side)\n model_txt.apply_goldsmith(1.1, 20, column_side)\n\n # Associate word for alignment if both text were opened\n if self.can_align:\n for view in self.views:\n view.end_task()\n view.change_task(\"Associating words\")\n self.model.associate_words(1.5)\n for view in self.views:\n view.end_task()\n\n # TODO : coherent saving to database using model.save_data\n\n return model_txt.str", "def reformat():\n toolkit.reformat()", "def get_text(self):\n\n if self.text: return self.text\n # retrieve from args and return if exists\n text = Settings.get_text() or None\n if text: \n self.text = text\n return text\n # prompt skip\n if not Settings.prompt(\"text\"): return None\n question = {\n 'type': 'input',\n 'name': 'text',\n 'message': 'Text:'\n }\n text = prompt(question)[\"text\"]\n # confirm text\n if not Settings.confirm(text): return self.get_text()\n self.text = text\n return self.text", "def handleFormatText(paragraphContent):\n # We tokenize and remove the stop word\n words = tokenizeWord(paragraphContent) \n \n stemWords = []\n # We loop on each word.\n for word in words:\n stemWord = STEMMER.stem(word)\n \n # Selection on a part of string.\n stemWord = re.sub(\"[*\\'\\.+:,\\`:/]\", '', stemWord)\n if stemWord.isdigit() or len(stemWord) < 2:\n continue\n \n stemWords.append(stemWord)\n my_r_string = stemWords.pop(0)\n for word in stemWords:\n my_r_string += \" \"+str(word)\n return my_r_string", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def read_plain_txt(input_fn: str) -> Tuple[List[str], List[str]]:\n\n with open(input_fn, 'r') as f:\n migrations = []\n queries = []\n mode = 'none'\n for line in f:\n stripped = line.strip()\n if len(stripped) == 0:\n continue\n if stripped.lower() == '== migrations':\n if mode != 'none':\n raise ValueError(f'Invalid {input_fn}: The migrations section should appear first.')\n mode = 'migrations'\n elif stripped.lower() == '== queries':\n if mode != 'migrations':\n raise ValueError(f'Invalid {input_fn}: The queries section should appear after the migrations section.')\n mode = 'queries'\n elif stripped[0] == '#':\n pass\n else:\n if mode == 'migrations':\n migrations.append(stripped)\n elif mode == 'queries':\n queries.append(stripped)\n else:\n pass\n return migrations, queries", "def on_lineEdit_textChanged(self, p0):\n # str_me = \"我爱我的祖国\"\n # self.lineEdit.setText(str_me) # 设置单行文本内容\n input_text = self.lineEdit.text()\n self.textEdit.setPlainText(input_text)\n # self.textEdit.setHtml(input_text) # 显示Html,如 <font color='red' size='20'>HELLO!</font>\n a = self.textEdit.toPlainText()\n print(a)", "def post_process_text(self, text):\n\t\treturn text", "def text(self) -> str:", "def editText(self, text, jumpIndex=None, highlight=None):\n try:\n import gui\n except ImportError, e:\n print 'Could not load GUI modules: %s' % e\n return text\n editor = gui.EditBoxWindow()\n return editor.edit(text, jumpIndex=jumpIndex, highlight=highlight)", "def alter_text_format(self):\n service = self.slides_service\n requests = [\n {\n 'updateParagraphStyle': {\n 'objectId': self.song_numbers_box_id,\n 'style': {\n 'alignment': 'CENTER'\n },\n 'fields': 'alignment'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.song_numbers_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.TITLE_FONT_SIZE, # numbers slightly larger than lyrics\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.left_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.FONT_SIZE,\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.right_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.FONT_SIZE,\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n }\n ]\n body = {\n 'requests': requests\n }\n response = service.presentations().batchUpdate(presentationId=self.presentation_id, body=body).execute()\n print(f'Updated the text style for shape with ID: {self.left_box_id}')\n return response", "def T(value):\n return (value if isinstance(value, basestring) else\n str(value) if isinstance(value, AltText) else \"\")", "def as_tuple(self, selected: bool) -> tuple:\n if self.check_ans:\n current = self.num.ljust(2) + self.answer\n else:\n current = self.num.ljust(2)+self.current.rjust(1)\n return ((\n \"fg:\"+self.style_fg + \" bg:\" # apply foreground color\n + (self.style_bg_selected if selected else self.style_bg),\n current\n if self.answer != \".\" else \"▐\" + \"█\" + \"▌\"), ('bg:#fefefe fg:#000', \"│\"))", "def edit(self,edits):\n\t\tself.alphanumeric=edits['alphanumeric'] if 'alphanumeric' in edits else None\n\t\tself.alphanumeric_color = edits['alphanumeric_color'] if 'alphanumeric_color' in edits else None\n\t\tif self.alphanumeric_color ==\"grey\":\n\t\t\tself.alphanumeric_color = \"gray\"\n\t\tself.background_color = edits['background_color'] if 'background_color' in edits else None\n\t\tif self.background_color == \"grey\":\n\t\t\tself.background_color = \"gray\";\n\t\tshapeChoices = dict((x,y) for x,y in Target.SHAPE_CHOICES)\n\t\tself.shape = str(shapeChoices[edits['shape']]) if 'shape' in edits else None\n\t\tself.orientation = edits['orientation'] if 'orientation' in edits else None\n\t\tself.ptype = edits['ptype']\n\t\tself.description = edits['description'] if 'description' in edits else None\n\t\tself.save()", "def label_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),label=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def displayText():\n global entryWidget,entryWidget1,entryWidget2,entryWidget3,entryWidget4 ,entryWidget5,entryWidget6\n global thefilename,itrial,do_stim, delaylen,ntest_arms,stop_if_error,timeout_arm_sec\n thefilename=entryWidget.get().strip()\n itrial=entryWidget1.get().strip()\n do_stim=entryWidget2.get().strip()\n delaylen=entryWidget3.get().strip()\n ntest_arms=entryWidget4.get().strip()\n stop_if_error=int(entryWidget5.get().strip())==1 # convert to logical\n print 'stop_if_error is ', stop_if_error\n\n\n timeout_arm_sec=entryWidget6.get().strip()\n root.destroy()\n return thefilename,itrial,do_stim,delaylen,ntest_arms,stop_if_error,timeout_arm_sec" ]
[ "0.76840067", "0.7569935", "0.7569935", "0.73801994", "0.7312141", "0.7185404", "0.71525943", "0.70902133", "0.69048536", "0.6863909", "0.6808694", "0.67496157", "0.66044503", "0.62728953", "0.6122511", "0.60096884", "0.5692688", "0.5692688", "0.5692688", "0.5692688", "0.5692688", "0.5534609", "0.55292165", "0.55122524", "0.54905003", "0.5460751", "0.539562", "0.5386719", "0.5354485", "0.53479636", "0.53389454", "0.532986", "0.53046286", "0.53009975", "0.52862155", "0.525454", "0.5231931", "0.52078664", "0.5165661", "0.51404953", "0.51277274", "0.51189363", "0.50971085", "0.5088265", "0.5054947", "0.5048544", "0.5042088", "0.5035617", "0.5031853", "0.5024283", "0.5017046", "0.50136364", "0.50128716", "0.5011537", "0.49993557", "0.49967337", "0.4986811", "0.4980363", "0.49699798", "0.4965772", "0.49580243", "0.4952371", "0.4948915", "0.49471527", "0.49142417", "0.49096134", "0.48924685", "0.48859364", "0.48703972", "0.4866565", "0.4855901", "0.48431557", "0.48414293", "0.4838512", "0.48360094", "0.48314703", "0.48113507", "0.48052862", "0.4801024", "0.4800837", "0.4797172", "0.47929224", "0.47766042", "0.4773588", "0.47715577", "0.47713175", "0.47710884", "0.47694808", "0.47682908", "0.4765882", "0.47634518", "0.47566456", "0.4753546", "0.47510284", "0.47495022", "0.47466233", "0.47453552", "0.47350493", "0.47240865", "0.4720859" ]
0.787282
0
Return list of choices for combo box, each a tuple of edit text and any annotation text
def getEditChoices(self, currentText=''): return [(text, '') for text in self.formatList]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def choices(self, typename, value_field='description'):\n rows = self.type(typename).values('id', value_field)\n return [(r['id'], r[value_field]) for r in rows]", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def select_combo_text(cb, text, index=0):\n i = 0\n for n in cb.get_model():\n if n[index] == text:\n break\n i += 1\n cb.set_active(i)", "def application_command_autocomplete_choice_builder(name, value):\n return {\n 'name': name,\n 'value': value,\n }", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def choices(self):\n return tuple(self._choices)", "def iter_choices(self):\n\n for pk, obj in self._get_object_list():\n if hasattr(obj, self.id):\n selected = getattr(obj, self.id)\n else:\n selected = obj in self.data\n\n yield (pk, self.get_label(obj), selected)", "def _get_choices_str(self):\n return ', '.join(\n '\"%s\"' % choice\n for choice in self.choices\n )", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def fill_combobox_attributes(self):\n\n list_char = [\"\"]\n list_num = [\"\"]\n if self.ui.radioButton_file.isChecked():\n for a in self.attributes:\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n else:\n for a in self.attributes:\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n self.ui.comboBox_num_attributes.blockSignals(True)\n self.ui.comboBox_char_attributes.blockSignals(True)\n self.ui.comboBox_num_attributes.clear()\n self.ui.comboBox_char_attributes.clear()\n self.ui.comboBox_char_attributes.addItems(list_char)\n self.ui.comboBox_num_attributes.addItems(list_num)\n self.ui.comboBox_num_attributes.blockSignals(False)\n self.ui.comboBox_char_attributes.blockSignals(False)", "def getComboTerms(tuples):\t\t\t\n\t\t\t#return \"[{0}]\".format('; '.join([\"({0})\".format(','.join([text[indices[0]:indices[1]], str(indices[0])])) for indices in tuples]))\n\t\t\treturn \"{0}\".format('; '.join((\"{0}\".format(text[indices[0]:indices[1]]) for indices in tuples)))", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def get_choices(cls):\n return cls.values.items()", "def _find_options(self, inputfield):\r\n elements = inputfield.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def create_combo(self) -> typing.Iterable[Combo]:\n raise NotImplementedError()", "def choices(cls):\n return tuple(item.as_tuple for item in list(cls))", "def choices(self) -> list:\n return [self.mapper(item) for item in self]", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice>\" +\r\n \"tag; got {0} instead\".format(choice.tag)\r\n )\r\n\r\n components = []\r\n choice_text = ''\r\n if choice.text is not None:\r\n choice_text += choice.text\r\n # Initialize our dict for the next content\r\n adder = {\r\n 'type': 'text',\r\n 'contents': choice_text,\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n components.append(adder)\r\n\r\n for elt in choice:\r\n # for elements in the choice e.g. <text> <numtolerance_input>\r\n adder = {\r\n 'type': 'text',\r\n 'contents': '',\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n tag_type = elt.tag\r\n # If the current `elt` is a <numtolerance_input> set the\r\n # `adder`type to 'numtolerance_input', and 'contents' to\r\n # the `elt`'s name.\r\n # Treat decoy_inputs and numtolerance_inputs the same in order\r\n # to prevent students from reading the Html and figuring out\r\n # which inputs are valid\r\n if tag_type in ('numtolerance_input', 'decoy_input'):\r\n # We set this to textinput, so that we get a textinput html\r\n # element.\r\n adder['type'] = 'textinput'\r\n adder['contents'] = elt.get('name')\r\n else:\r\n adder['contents'] = elt.text\r\n\r\n # Add any tail text(\"is the mean\" in the example)\r\n adder['tail_text'] = elt.tail if elt.tail else ''\r\n components.append(adder)\r\n\r\n # Add the tuple for the current choice to the list of choices\r\n choices.append((choice.get(\"name\"), components))\r\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices", "def combo_callback(self, eventobj):\n print(self.combo_input.get()) # print name\n print(self.combo_input.current()) # print index", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)", "def initDefaultChoices(self):\n return [text for text in self.formatList]", "def __init__(self,choices,caption='ListSelection',default=[],single=False,check=False,sort=False,*args,**kargs):\n InputDialog.__init__(self,caption=caption,items = [\n dict(name='input',value=default,itemtype='list',choices=choices,\n text='',single=single,check=check,sort=sort,*args,**kargs),\n ],)", "def _build_dropdown(options):\n return [(x, x) for x in options]", "def set_choices(self, index, choices):\n if len(choices) == 1:\n self._label(index)\n self._widgets[index][\"text\"] = str(choices[0])\n else:\n self._combo(index)\n self._widgets[index][\"values\"] = [str(t) for t in choices]\n width = max(len(str(t)) for t in choices)\n width = max(5, width)\n self._widgets[index][\"width\"] = width", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice> tag; got %s instead\"\r\n % choice.tag)\r\n choices.append((choice.get(\"name\"), stringify_children(choice)))\r\n return choices", "def __init__(self, \n num_fld=1, \n lab_txt=[\"1\"], \n txt_fld=[\"1\"], \n title_txt=\"test\", \n comb_txt=[],\n comb_lab_txt=[], \n comb_num=0, \n root_x=50, \n root_y=50):\n super().__init__()\n self.geometry(f'+{root_x}+{root_y}') #head=y+20px\n self.str_in=[]\n self.title(title_txt)\n if comb_txt:\n self.name=[0]*num_fld\n ent=[0]*num_fld\n self.comb=[]\n self.act=[]\n lab=[0]*num_fld\n lab_comb=[0]*comb_num\n else:\n self.name=[0]*num_fld\n ent=[0]*num_fld\n lab=[0]*num_fld\n self.comb=[]\n self.act=[]\n for i in range(num_fld):\n self.name[i]=tk.StringVar()\n ent[i]=tk.Entry(self,textvariable=self.name[i])\n ent[i].insert(0, txt_fld[i])\n lab[i] = tk.Label(self,width=15, text=lab_txt[i])\n lab[i].pack()\n ent[i].pack()\n for i in range(comb_num):\n lab_comb[i]=tk.Label(self,width=35, text=comb_lab_txt[i])\n self.comb.append(ttk.Combobox(self, values=comb_txt))\n lab_comb[i].pack()\n self.comb[i].pack()\n self.comb[i].current(1)\n\n but_ac=tk.Button(self, text=\"Accept\", command=self.ins)\n but_ac.pack()\n self.mainloop", "def input_choices_from_list(choices, text):\n no_courses_text = \"\"\"\n init will only list the courses you are enrolled in\n and there seem to be none.\n Either enrol in a course or add the course id as command line argument.\n \"\"\"\n if choices is None or len(choices) == 0:\n print(no_courses_text)\n raise SystemExit(1)\n\n digits = str(math.ceil(math.log10(len(choices))))\n format_str = '{:' + digits + 'd} {}'\n for n, c in enumerate(choices):\n print(format_str.format(n, c))\n try:\n return [int(c) for c in input(text).split()]\n except EOFError:\n return []", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def combobox(self):\n return self._combo", "def widgets_from_abbreviations(self, seq):\n result = []\n for name, abbrev, default in seq:\n widget = self.widget_from_abbrev(abbrev, default)\n if not (isinstance(widget, ValueWidget) or isinstance(widget, fixed)):\n if widget is None:\n raise ValueError(\"{!r} cannot be transformed to a widget\".format(abbrev))\n else:\n raise TypeError(\"{!r} is not a ValueWidget\".format(widget))\n if not widget.description:\n widget.description = name\n widget._kwarg = name\n result.append(widget)\n return result", "def multiple_choice():\n\n return [\"MAR\", \"MAR\", \"NI\", \"NI\", \"MCAR\"]", "def get_chosen_options(user):\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n ocl = eval(user_application.options_selected)\n chosen_options = []\n for oc in ocl:\n chosen_options.append(Option.objects.get(opt_code=int(oc))) \n return chosen_options", "def build_comboboxes(activities, events):\n global comboboxes\n # For each activity set up a selector for an event\n\n for activity in activities:\n\n # Setup frame for better display in gui\n frame = Frame(main_window)\n frame.configure(background=\"gray30\")\n\n # Label the left column as activity in a model + \"beautify gui\"\n text = \"Activity name (model):\"\n Label(frame, text=text, bg=\"gray30\", fg=\"white\", padx=5).grid(column=0, row=0)\n Label(frame, text=activity, bg=\"gray30\", fg=\"white\").grid(column=0, row=1)\n\n # Set up the combobox for an event\n combo = Combobox(frame)\n combo['values'] = events\n\n # If activity is in events preselect the current one\n if activity in events:\n combo.current(events.index(activity))\n\n # Label the combobox and place label and box in frame\n Label(frame, text=\"Event name (log):\", bg=\"gray30\", fg=\"white\", padx=5).grid(column=1, row=0)\n combo.grid(column=1, row=1)\n\n # If the last activity in the graph is handled then do not write a separator\n if activity != activities[-1]:\n Separator(frame, orient=\"horizontal\").grid(row=2, columnspan=2, sticky=\"ew\", pady=10)\n\n comboboxes[activity] = combo\n # place the frame in the main_window\n frame.grid(column=0)", "def get_generic_choices(model, key, allow_null=False):\n CHOICES = [('', '--------')] if allow_null else []\n for i in model.objects.all().values_list(key, flat=True).distinct():\n CHOICES.append((str(i), str(i)))\n CHOICES.sort()\n\n return CHOICES", "def _combobox_choice(self, _=None):\n combobox_string = self.value_combobox.var.get()\n if combobox_string.startswith(\"Unknown: \"):\n value = int(combobox_string[len(\"Unknown: \"):])\n else:\n value = int(self.value_combobox.var.get().split(\" \")[0])\n self.master.change_field_value(self.field_name, value)", "def widgets(parameter: Parameter):\n widgets = []\n for key in parameter.keys():\n textEdit = QTextEdit()\n textEdit.setText(key)\n widgets.append(textEdit)\n if isinstance(parameter[key], Enum):\n comboBox = MyQtEnumComboBox()\n comboBox.fillValues(type(parameter[key]))\n widgets.append(comboBox)\n elif isinstance(parameter[key], bool):\n comboBox = QComboBox()\n comboBox.addItems((\"False\", \"True\"))\n widgets.append(comboBox)\n else:\n textEdit = QTextEdit()\n textEdit.setText(str(parameter[key]))\n widgets.append(textEdit)\n for widget in widgets:\n widget.setFixedHeight(30)\n return widgets", "def _getBrailleRegionsForComboBox(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForComboBox\", obj)\n\n regions = []\n\n focusedRegionIndex = 0\n label = self._script.getDisplayedLabel(obj)\n if label and (len(label) > 0):\n regions.append(braille.Region(label + \" \"))\n focusedRegionIndex = 1\n\n # Check to see if the text is editable. If so, then we want\n # to show the text attributes (such as selection -- see bug\n # 496846 for more details).\n #\n textObj = None\n for child in obj:\n if child and child.getRole() == pyatspi.ROLE_TEXT:\n textObj = child\n if textObj and textObj.getState().contains(pyatspi.STATE_EDITABLE):\n textRegion = braille.Text(textObj)\n regions.append(textRegion)\n else:\n displayedText = self._script.getDisplayedText(obj)\n if displayedText:\n regions.append(braille.Region(displayedText))\n\n regions.append(braille.Region(\n \" \" + rolenames.getBrailleForRoleName(obj)))\n\n # Things may not have gone as expected above, so we'll do some\n # defensive programming to make sure we don't get an index out\n # of bounds.\n #\n if focusedRegionIndex >= len(regions):\n focusedRegionIndex = 0\n if len(regions) == 0:\n focusedRegion = None\n else:\n focusedRegion = regions[focusedRegionIndex]\n\n # [[[TODO: WDW - perhaps if a text area was created, we should\n # give focus to it.]]]\n #\n return [regions, focusedRegion]", "def form_SelectChoiceCallableOptions(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n def _():\n options = [(1,'a'),(2,'b'),(3,'c')]\n for option in options:\n yield option\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(_)\n return form", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def _comboSlot(self, select):\n select = self.sender().itemText(select)\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n item = QtXml.QDomElement()\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() != select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n widget_enabled = h.elem.attribute(\"Enabled\", \"True\")\n widget_visible = h.elem.attribute(\"Visible\", \"Unknown\")\n h.widget.setEnabled(False)\n if(widget_visible != \"Unknown\"):\n h.label.hide()\n h.widget.hide()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() == select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n h.widget.setEnabled(True)\n h.label.show()\n h.widget.show()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")", "def get_choices(self, cutoff=None):\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict(\n [\n (\n # This line below is modifed.\n item.pk,\n self.display_value(item),\n )\n for item in queryset\n ]\n )", "def test_rendering_combobox(qtbot):\n layer = Image(np.random.rand(8, 8))\n qtctrl = QtImageControls(layer)\n qtbot.addWidget(qtctrl)\n combo = qtctrl.renderComboBox\n opts = {combo.itemText(i) for i in range(combo.count())}\n rendering_options = {\n 'translucent',\n 'additive',\n 'iso',\n 'mip',\n 'minip',\n 'attenuated_mip',\n 'average',\n }\n assert opts == rendering_options\n # programmatically updating rendering mode updates the combobox\n layer.rendering = 'iso'\n assert combo.findText('iso') == combo.currentIndex()", "def get_choicesdata(self):\n selected_value = self.get_cleaned_value()\n choicesdata = []\n for value, label in self.get_choices_cached():\n is_selected = value == selected_value\n url = self.build_set_values_url(values=[value])\n choicesdata.append({\n 'url': url,\n 'label': label,\n 'is_selected': is_selected,\n 'dom_id': '{}_{}'.format(self.get_inputfield_dom_id(), value)\n })\n return choicesdata", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def get_poll_choices(self, games: [Game]) -> [dict]:\n answer_texts = []\n for g in games:\n answer_texts.append(g.name + \" - \" + g.genre)\n answer_texts = sorted(answer_texts, key=str.lower)\n poll_choices = []\n for at in answer_texts:\n poll_choices.append({\"text\": at})\n return poll_choices", "def __str__(self):\n return \"choice_text: \" + self.choice_text", "def _parse_choices(self, text):\n choices = dict()\n\n matches = re.findall(self.choice_regex, text)\n for match in matches:\n # remove the brackets\n match = match.replace('[[', '')\n match = match.replace(']]', '')\n\n if '|' in match:\n # format is {text}|{node_id}, the text and node id are different\n text, node_id = match.split('|')\n choices[node_id] = text\n else:\n choices[match] = match\n\n return choices", "def get_choices_new_protected():\n ret = []\n ret.append( (1, _(u'Nur Community-Mitglieder dürfen neue Beiträge leisten')) )\n ret.append( (-1, _(u'Offener Zugang')) )\n return ret", "def comboBox(args: list, slot) -> QComboBox:\n comboBox = QComboBox()\n comboBox.addItems(args[0])\n comboBox.currentTextChanged.connect(slot)\n return comboBox", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices", "def make_ddls( self, parent ):\n # ---- 0\n a_widget = ttk.Combobox( parent,\n width = self.arg_width,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n a_widget.bind( \"<<ComboboxSelected>>\", self.sync_ddl_0 )\n\n # AppGlobal.gui_style.style_combobox( a_widget )\n self.ddl_0_widget = a_widget\n\n # ---- 1\n a_widget = ttk.Combobox( parent,\n width = self.arg_width,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n self.ddl_1_widget = a_widget\n\n # ---- 2\n a_widget = ttk.Combobox( parent,\n width = self.arg_width + 5,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n # AppGlobal.gui_style.style_combobox( a_widget )\n self.ddl_2_widget = a_widget\n\n # self.ddl_widgets = [ self.ddl_0_widget, self.ddl_1_widget, self.ddl_2_widget, ]\n # print( self.ddl_widgets[ 0 ] == self.ddl_widgets[ 1 ])\n\n #self.load_ddl_0( )", "def __str__(self):\n return self.choice_text", "def as_choices():\n return (\n # Live is currently disabled as a choice\n # pending implementation\n (\"live\", \"Use working directory\"),\n (\"latest\", \"Use latest snapshot\"),\n (\"pinned\", \"Pinned to snapshot\"),\n )", "def comboBoxes(self):\r\n # Cities Combo Button\r\n self.comboCities = QComboBox()\r\n self.comboCities.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: 15px;\r\n background-color : #A8DBC5;\r\n border: 1px solid white;\r\n\r\n \"\"\")\r\n self.comboCities.addItems(\r\n ['Girón', 'Piedecuesta', 'Floridablanca', 'Bucaramanga'])\r\n self.grid.addWidget(self.comboCities, 6, 1, 1, 2)\r\n self.comboCities.setCurrentText(\"Bucaramanga\")\r\n # Payment Combo Button\r\n self.comboPayment = QComboBox()\r\n self.comboPayment.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: 15px;\r\n background-color : #A8DBC5;\r\n border: 1px solid white;\r\n\r\n \"\"\")\r\n self.comboPayment.addItems(['Efectivo', 'Nequi'])\r\n self.grid.addWidget(self.comboPayment, 7, 1, 1, 2)", "def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n # choicesdata = []\n # found_selected_value = False\n # for value, label in self.get_choices():\n # is_selected = value == selected_value\n # if is_selected:\n # found_selected_value = True\n # url = self.build_set_values_url(values=[value])\n # choicesdata.append({\n # 'url': url,\n # 'label': label,\n # 'is_selected': is_selected\n # })\n choicesdata, found_selected_value = self.__make_choicesdata_list(\n choices=self.get_choices(),\n selected_value=self.get_cleaned_value())\n if not found_selected_value and len(choicesdata) > 0:\n selected_index = self.get_default_is_selected_index(choicesdata=choicesdata)\n choicesdata[selected_index]['is_selected'] = True\n return choicesdata", "def _find_options(self):\r\n elements = self.xml.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def __str__(self):\n return gettext('One of %s') % self._get_choices_str()", "def initDefaultChoices(self):\n return []", "def list_selector_widget(members=None,\n preselect=None,\n entry=False,\n callback=None):\n store, i=generate_list_model(members,\n active_element=preselect)\n\n if entry:\n combobox=gtk.ComboBoxEntry(store, column=0)\n else:\n combobox=gtk.ComboBox(store)\n cell = gtk.CellRendererText()\n combobox.pack_start(cell, expand=True)\n combobox.add_attribute(cell, 'text', 0)\n combobox.add_attribute(cell, 'background', 2)\n\n combobox.set_active(-1)\n if i is None:\n i = store.get_iter_first()\n if i is not None:\n combobox.set_active_iter(i)\n\n if entry:\n def get_current_element(combo):\n try:\n return combo.get_model().get_value(combo.get_active_iter(), 1)\n except (TypeError, AttributeError):\n return unicode(combo.child.get_text())\n def set_current_element(combo, t):\n combo.child.set_text(t)\n else:\n def get_current_element(combo):\n if combo.get_active_iter() is not None:\n return combo.get_model().get_value(combo.get_active_iter(), 1)\n else:\n return None\n def set_current_element(combo, el):\n # Find the index of the element\n l=[ t[0] for t in enumerate(combo.get_model()) if t[1][1] == el ]\n if l:\n # The element is present.\n combo.set_active(l[0])\n else:\n combo.set_active_iter(combo.get_model().append( (unicode(el), el, None) ))\n\n # Bind the method to the combobox object\n combobox.get_current_element = get_current_element.__get__(combobox)\n combobox.set_current_element = set_current_element.__get__(combobox)\n\n if callback is not None:\n combobox.connect('changed', callback)\n\n return combobox", "def occurrence_choices():\n OCCURRENCE_CHOICES = [('one_off', 'One Off'), ('weekly', 'Weekly'), ('fortnightly', 'Fortnightly')]\n occurrence = forms.ChoiceField(choices=OCCURRENCE_CHOICES, widget=forms.RadioSelect())\n return occurrence", "def get_action_choices():\n from hardware.management.commands.gpio_buttons import Command\n import re\n pattern = re.compile(r'^on_(?P<name>\\w+)_press$')\n choices = []\n for member in dir(Command):\n match = pattern.match(member)\n if match:\n action = match.groupdict()['name']\n name = action.replace('_', ' ').title()\n choices.append((action, name))\n return choices", "def DrawComboBox(*args, **kwargs):\n return _gdi_.RendererNative_DrawComboBox(*args, **kwargs)", "def fill_combobox(self):\n sqlstring = \"SELECT first_name, last_name from employees WHERE enabled = 1 ORDER BY last_name ASC\"\n self.CB_employee.addItem(\"\")\n for employee in self.ms.c.execute(sqlstring):\n self.CB_employee.addItem(\" \".join((employee[0], employee[1])))\n sqlstring = \"SELECT first_name, last_name from employees WHERE enabled = 0 ORDER BY last_name ASC\"\n for employee in self.ms.c.execute(sqlstring):\n self.CB_employee.addItem(\" \".join((employee[0], employee[1])))", "def _get_choices ( self, context, path = '' ):\n choices = []\n gdc = context.get_data_context\n for name in context.data_contexts:\n next_path = path_for( path, name )\n choices.append( TemplateChoice( choice_value = next_path ) )\n choices.extend( self._get_choices( gdc( name ), next_path ) )\n \n return choices", "def get_context(self, name, value, attrs=None, choices=()):\n context = super(TriStateCheckboxSelectMultiple, self).get_context(\n name, value, attrs\n )\n\n choices = dict(it.chain(self.choices, choices))\n if value is None:\n value = dict.fromkeys(choices, False)\n else:\n value = dict(dict.fromkeys(choices, False).items() +\n value.items())\n\n context['values'] = [\n (choice, label, value[choice])\n for choice, label in choices.iteritems()\n ]\n\n return context", "def get_text_type():\n opt = ['text', 'email', 'password']\n inp = option_menu(opt, 'Select text type:')\n\n # mark text type with option\n OPTIONS['text-type'] = opt[inp]\n\n # add option to collected list\n add_to_collected('text type', opt[inp])\n\n return", "def set_dropdown_b_options(value):\n options_c = []\n if value=='C':\n options_c = [{'label': '1', 'value': '1'},\n {'label': '2', 'value': '2'}]\n if value == 'D':\n options_c = [{'label': '3', 'value': '3'},\n {'label': '4', 'value': '4'}]\n if value=='E':\n options_c = [{'label': '5', 'value': '5'},\n {'label': '6', 'value': '6'}]\n if value == 'F':\n options_c = [{'label': '7', 'value': '7'},\n {'label': '8', 'value': '8'}]\n return options_c", "def objects_to_choices(queryset):\n res = []\n for elm in queryset:\n res.append((elm.pk, unicode(elm)))\n return res", "def choice(text, choices, **kwargs):\n return click.prompt(click.style('> {}'.format(text), fg='blue', bold=True),\n type=click.Choice(choices),\n **kwargs)", "def on_comboBox_enceinte_activated(self, index):\n nom_enceinte = self.comboBox_enceinte.currentText()\n marque = [x[2] for x in self.enceintes if x[1] == nom_enceinte][0]\n n_serie = [x[4] for x in self.enceintes if x[1] == nom_enceinte][0]\n model =[x[3] for x in self.enceintes if x[1] == nom_enceinte][0]\n \n \n self.lineEdit_marque.setText(marque)\n self.lineEdit_n_serie.setText(n_serie)\n self.lineEdit_model.setText(model)", "def display_choose(self, text, choices):\n cur_index = 0\n key = None\n while key != 'KEY_NEWLINE':\n if key == 'KEY_UP':\n cur_index = max(cur_index - 1, 0)\n elif key == 'KEY_DOWN':\n cur_index = min(cur_index + 1, len(choices) - 1)\n self.stdscr.erase()\n for line in text:\n self.stdscr.addstr(f'{PADCHAR}{line}\\n')\n for index, value in enumerate(choices):\n self.stdscr.addstr('\\n')\n self.stdscr.addstr(PADCHAR)\n self.stdscr.addstr(value, color_pair(7 if index == cur_index else 1))\n self.stdscr.addstr(f'\\n\\n{PADCHAR}') \n key = self.get_key() \n return cur_index", "def list_selector(title=None,\n text=None,\n members=None,\n controller=None,\n preselect=None,\n entry=False):\n combobox = list_selector_widget(members=members,\n preselect=preselect,\n entry=entry)\n\n d = gtk.Dialog(title=title,\n parent=None,\n flags=gtk.DIALOG_DESTROY_WITH_PARENT,\n buttons=( gtk.STOCK_OK, gtk.RESPONSE_OK,\n gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL ))\n\n if text is not None:\n l=gtk.Label(text)\n l.show()\n d.vbox.add(l)\n\n d.vbox.add(combobox)\n combobox.show_all()\n\n d.connect('key-press-event', dialog_keypressed_cb)\n\n d.show()\n center_on_mouse(d)\n res=d.run()\n retval=None\n if res == gtk.RESPONSE_OK:\n retval=combobox.get_current_element()\n d.destroy()\n return retval", "def get_classes(self, code):\n \n select = v.Combobox(\n _metadata={'name':code}, \n items=self.items, \n v_model=None, \n dense=True,\n hide_details=True\n )\n \n select.observe(partial(self.store, code), 'v_model')\n \n return select", "def getOptionsNames(self) -> List[unicode]:\n ...", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def the_option_named(text: str) -> \"SelectByText\":\n return SelectByText(text)", "def prepareAutoComplete(editor, text, charPos, lineStartCharPos,\r\n wikiDocument, settings):\r\n return []", "def autocomplete_possibilities():\n try:\n # get data sent by client\n typed_input = request.args.get('q')\n print(' ')\n print('\\n------ getting autocomplete_possibilities ------')\n print(f\"recived: input:{typed_input}\")\n\n # call the google API\n results = gmaps.places_autocomplete(typed_input)\n data = [\n {'value': r['place_id'], 'text': r['description']}\n for r in results\n ]\n\n # Pass data to the front end\n print(f'returning: {data}')\n return jsonify(data)\n\n except Exception as e:\n print(\"AJAX excepted \" + str(e))\n return str(e)", "def _validate_selects(text, response):\n answer_options = re.split(settings.MULTISELECT_DELIMITER_RE, str(text))\n choices = map(lambda choice: choice.lower(), response.event.choices)\n logger.debug('Question (%s) answer choices are: %s, given answers: %s' % (datatype, choices, answer_options))\n new_answers = copy(answer_options)\n for idx, opt in enumerate(answer_options):\n logger.debug('Trying to format (m)select answer: \"%s\"' % opt)\n try: \n #in the case that we accept numbers to indicate option selection\n opt_int = int(opt)\n if not (1 <= opt_int <= len(choices)): \n return text, 'Answer %s must be between 1 and %s' % (opt_int, len(choices))\n else:\n new_answers[idx] = str(opt_int)\n\n except ValueError: \n # in the case where we accept the actual text of the question\n logger.debug('Caught value error, trying to parse answer string choice of: %s' % choices)\n if opt.lower() not in choices:\n return text, 'Answer must be one of the choices'\n else:\n new_answers[idx] = str(choices.index(opt.lower()) + 1)\n return ' '.join(new_answers), None", "def choice(choices=[], message=\"Pick something.\", title=None):\n return dialog(\n \"choice\",\n choices=choices,\n message=message,\n title=title,\n )", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def choices(self, cl):\n # TODO: Determine if non-static choices would be cleaner here.\n # Honestly, I tried a more generic version and it was even harder to\n # follow than this version.\n yield {\n 'selected': not (self.lookup_val_gte or self.lookup_val_lt),\n 'query_string': cl.get_query_string({}, [self.lookup_kwarg_gte,\n self.lookup_kwarg_lt]),\n 'display': 'All'\n }\n\n goal = settings.FACEBOOK_CLICK_GOAL\n yield {\n 'selected': self.lookup_val_gte and not self.lookup_val_lt,\n 'query_string': cl.get_query_string({self.lookup_kwarg_gte: goal},\n [self.lookup_kwarg_lt]),\n 'display': 'Yes'\n }\n yield {\n 'selected': self.lookup_val_lt and not self.lookup_val_gte,\n 'query_string': cl.get_query_string({self.lookup_kwarg_lt: goal},\n [self.lookup_kwarg_gte]),\n 'display': 'No'\n }", "def build_options(slot, snacks):\n \n if slot == 'Fast':\n return [\n {'text': 'Pizza', 'value': 'Pizza'},\n {'text': 'Fries', 'value': 'Fries'},\n {'text': 'Franky', 'value': 'Franky'},\n {'text': 'Burger', 'value': 'Burger'},\n {'text': 'Sandwich', 'value': 'Sandwich'}\n \n \n ]\n elif slot == 'drink':\n return [\n {'text': 'Coca-Cola', 'value': 'Coca-cola'},\n {'text': 'Appy', 'value': 'Appy'},\n \n {'text': 'Beer', 'value': 'Beer'},\n {'text': 'Frooti', 'value': 'Frooti'},\n {'text': 'Pepsi', 'value': 'Pepsi'}\n \n ]", "def sortedChoices(self, inText):\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return (sortedChoices, True)\n else:\n return (sortedChoices, False)", "def _getChoices(self, acronym):\n # get matches from acronymDB\n matches = []\n if(acronym in self.acronymDB):\n matches += self.acronymDB[acronym]\n if(acronym[-1] == \"s\" and acronym[:-1] in self.acronymDB):\n matches += self.acronymDB[acronym]\n\n # create training data\n X_train, y_train = [], []\n for definition, articleID, ignored_var in matches:\n text = self.articleDB[articleID]\n X_train.append(\n ExpansionChoice(article_id=articleID, article_text=text))\n y_train.append(definition)\n\n # create y labels to group similar acronyms\n y_labels, labelToExpansion = self._processChoices(y_train)\n\n return X_train, y_labels, labelToExpansion", "def multiple_choices(self, choices, response):\n for elem in self.method_order:\n if elem in choices:\n return [elem]\n raise NoData", "def set_dropdown_b_options(value):\n options_b = []\n if value=='A':\n options_b = [{'label': 'C', 'value': 'C'},\n {'label': 'D', 'value': 'D'}]\n if value == 'B':\n options_b = [{'label': 'E', 'value': 'E'},\n {'label': 'F', 'value': 'F'}]\n return options_b", "def choices(self, choices: Iterable[Tuple[str, str]]):\n try:\n iter(choices)\n except TypeError:\n raise TypeError(\"'choices' isn't a valid iterable\")\n\n apply_choices = []\n for i, (choice_id, choice_label) in enumerate(choices):\n apply_choices.append((str(choice_id), str(choice_label)))\n\n if len(apply_choices) < 2:\n raise ValueError(\"you need to specify at least two choices\")\n\n self._choices = apply_choices\n self.specific.refresh()\n self._selected = 0", "def before_choose_candidate_listener(self, session, task):\n choices = [PromptChoice('d', 'eDit', self.importer_edit)]\n if task.candidates:\n choices.append(PromptChoice('c', 'edit Candidates',\n self.importer_edit_candidate))\n\n return choices", "def _get_completion(self, original_text, remaining_text, options):\n if self.should_hide_completions(original_text=original_text,\n remaining_text=remaining_text,\n allowed_suffixes=(\" \", \".\")):\n return []\n\n return [(option, len(remaining_text)) for option in options\n if option.startswith(remaining_text) and not option.startswith(\"_\")]" ]
[ "0.64435107", "0.64435107", "0.6406375", "0.6369448", "0.6282898", "0.61602914", "0.61550856", "0.6096663", "0.6079173", "0.6074226", "0.599768", "0.5979722", "0.5946701", "0.59085536", "0.58665997", "0.5852769", "0.5851758", "0.5840527", "0.58387506", "0.5816007", "0.5803215", "0.5797734", "0.57951343", "0.57936877", "0.57807535", "0.5749749", "0.57457596", "0.5732043", "0.57093215", "0.57050306", "0.5695638", "0.5680274", "0.5638338", "0.5617769", "0.56013155", "0.5580123", "0.5573768", "0.55674773", "0.55656695", "0.5561425", "0.5539777", "0.5525614", "0.55243665", "0.55211055", "0.5516054", "0.55135965", "0.5486497", "0.54864573", "0.5484098", "0.5467698", "0.5450487", "0.5444694", "0.5435837", "0.5432833", "0.542561", "0.54099566", "0.5406829", "0.5394251", "0.53907686", "0.5388395", "0.53733003", "0.5353227", "0.5352402", "0.53441244", "0.5335833", "0.5330664", "0.5320152", "0.5317789", "0.53159815", "0.5291184", "0.52660507", "0.5261751", "0.52587485", "0.5247112", "0.52468276", "0.5246636", "0.52386904", "0.523807", "0.52264065", "0.52225775", "0.521434", "0.52137464", "0.5197637", "0.5192666", "0.51882684", "0.5188186", "0.5170487", "0.516354", "0.5163382", "0.5161854", "0.5159188", "0.5158065", "0.51575136", "0.51539713", "0.5146208", "0.5145707", "0.5143474", "0.5142414", "0.51338685", "0.51279086" ]
0.6981332
0
Return a list of choices for setting the init default
def initDefaultChoices(self): return [text for text in self.formatList]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initDefaultChoices(self):\n return []", "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def as_choices():\n return (\n # Live is currently disabled as a choice\n # pending implementation\n (\"live\", \"Use working directory\"),\n (\"latest\", \"Use latest snapshot\"),\n (\"pinned\", \"Pinned to snapshot\"),\n )", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices", "def get_choices(cls):\n return cls.values.items()", "def __init__(self,choices,caption='ListSelection',default=[],single=False,check=False,sort=False,*args,**kargs):\n InputDialog.__init__(self,caption=caption,items = [\n dict(name='input',value=default,itemtype='list',choices=choices,\n text='',single=single,check=check,sort=sort,*args,**kargs),\n ],)", "def choices(self):\n return tuple(self._choices)", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def multiple_choice():\n\n return [\"MAR\", \"MAR\", \"NI\", \"NI\", \"MCAR\"]", "def choices(cls):\n return tuple(item.as_tuple for item in list(cls))", "def as_choices(cls, key_type=None):\n if key_type is None:\n key_type = cls.get_default_choice_type()\n return cls.enum_class.as_choices(key_type)", "def _set_default_suits(self):\n # set up suits\n suit_types = [('Spades', 1), ('Hearts', 2), ('Diamonds', 3), ('Clubs', 4)]\n # populate the list of suits\n suit_list = list()\n for s in suit_types:\n suit_list.append(Suit(s[0], s[1]))\n\n return suit_list", "def choices(self) -> list:\n return [self.mapper(item) for item in self]", "def _resolve_defaults(self, **kwargs):\n res = list()\n for name, value in kwargs.items():\n if value is None:\n value = self.default(name)\n if value is None:\n raise RuntimeError(f\"Missing default {name}\")\n res.append(value)\n return res", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def get_setting_choices(cls, key, **kwargs):\n setting = cls.get_setting_definition(key, **kwargs)\n\n choices = setting.get('choices', None)\n\n if callable(choices):\n # Evaluate the function (we expect it will return a list of tuples...)\n return choices()\n\n return choices", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def create_default_repo_choice(self, default_repo):\n return (default_repo, default_repo)", "def get_template_base_dir_choices() -> list[tuple[str, str]]:\n # handle predefined choices\n choices, seen = [], set()\n for template_name in TemplateName:\n choices.append((template_name.value, template_name.label))\n seen.add(template_name.value)\n\n # handle custom choices via settings\n for template_name, display_name in getattr(settings, \"CAST_CUSTOM_THEMES\", []):\n if template_name not in seen:\n choices.append((template_name, display_name))\n seen.add(template_name)\n\n # search for template base directories\n template_directories = get_template_directories()\n template_base_dir_candidates = get_template_base_dir_candidates(template_directories)\n for candidate in template_base_dir_candidates:\n if candidate not in seen:\n choices.append((candidate, candidate))\n\n return choices", "def initialise_options():\r\n default_options = list(range(NUMBER_OF_TILES))\r\n default_weights = [1/NUMBER_OF_TILES]*NUMBER_OF_TILES\r\n return default_options, default_weights", "def default_controls(self):\n\t\tcontrol_list = []\n\t\tconfig = ConfigParser.ConfigParser()\n\t\tconfig.read(\"./config.ini\")\n\t\tcontrols = config.options(\"default_controls\")\n\t\tfor c in controls:\n\t\t\ttry: control_list.append( config.get(\"default_controls\", c) )\n\t\t\texcept:\n\t\t\t\tprint \"ERROR: missing control settings. Check config.ini.\"\n\t\t\t\traise(SystemExit)\n\t\treturn control_list", "def default_variation(random, candidates, args):\r\n return candidates", "def default_variation(random, candidates, args):\r\n return candidates", "def get_default_options():\n return GROUPS_.values()", "def __init__(self, *initial):\n self.prompt_list = list(initial)", "def get_choices_new_protected():\n ret = []\n ret.append( (1, _(u'Nur Community-Mitglieder dürfen neue Beiträge leisten')) )\n ret.append( (-1, _(u'Offener Zugang')) )\n return ret", "def choices(self, var):\r\n return (self.curr_domains or self.domains)[var]", "def choices(self, choices):\n\n self._choices = choices", "def get_choices_for_var(self, var):\n return self.choices[var]", "def get_options(self):\n return []", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def set_defaults(self):\r\n for name, option in self.options.iteritems():\r\n if not option.is_required():\r\n self.set_value(name, option, option.default)", "def default_value_list(sources: List[str] = None):\n if not default:\n return list()\n if not sources:\n return [default]\n else:\n return sources", "def _get_target_choices():\n apps = [('public', _(\"Public website\"))]\n for model, entity in registry.registry.items():\n if entity.menu:\n appname = model._meta.app_label.lower()\n apps.append((appname, unicode(entity.label)))\n return tuple(apps)", "def get_generic_choices(model, key, allow_null=False):\n CHOICES = [('', '--------')] if allow_null else []\n for i in model.objects.all().values_list(key, flat=True).distinct():\n CHOICES.append((str(i), str(i)))\n CHOICES.sort()\n\n return CHOICES", "def get_default_is_selected_index(self, choicesdata):\n\n return 0", "def _create_defaults(self):\n return DefaultCommandOptionValues(\n min_confidence=3, output_format='vs7')", "def __init__(self, choices, *args, **kwargs):\n super(RangePollChoiceForm, self).__init__(*args, **kwargs)\n nominees = [(i, '%d' % i) for i in range(0, choices.count()+1)]\n for choice in choices:\n self.fields['range_poll__%s' % str(choice.id)] = (\n forms.ChoiceField(widget=forms.Select(),\n choices=nominees,\n label=choice.nominee.get_full_name()))", "def create_options(self):\n return []", "def Choices(cls):\n attr = '_choice_attr_' + cls.__name__\n if hasattr(cls, attr):\n return getattr(cls, attr)\n\n choices = set()\n for (k, v) in cls.__dict__.items():\n if not k.startswith('_') and issubclass(type(v), (str, unicode)):\n choices.add(v)\n for base in cls.__bases__:\n if issubclass(base, ChoiceBase) and base is not ChoiceBase:\n choices = set.union(choices, base.Choices())\n setattr(cls, attr, choices)\n\n return choices", "def multiple_choices(self, choices, response):\n for elem in self.method_order:\n if elem in choices:\n return [elem]\n raise NoData", "def form_SelectChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(options)\n form['mySelect'].default = 2\n return form", "def season_choices():\n return [(s, s) for s in range(0, 3)]", "def is_a_list_of_choices(self):\n pass", "def setChoices(self, choices):\n self.getGtkObject('property_liststore').clear()\n for choice in choices:\n self.getGtkObject('property_liststore').append([str(choice)])", "def _get_choices ( self, context, path = '' ):\n choices = []\n gdc = context.get_data_context\n for name in context.data_contexts:\n next_path = path_for( path, name )\n choices.append( TemplateChoice( choice_value = next_path ) )\n choices.extend( self._get_choices( gdc( name ), next_path ) )\n \n return choices", "def choices(symbols, k):\n return [R.choice(symbols) for _ in range(k)]", "def episode_choices():\n return [(e, e) for e in range(0, 2)]", "def setAll(self):\n self.setValue(self._choices_)", "def configure_list_of_choices_type_question(self, question_data):\n self.driver.find_radio_button(LIST_OF_CHOICE_RB).click()\n index = 1\n for choice in fetch_(CHOICE, from_(question_data)):\n if index > 1:\n self.driver.find(ADD_CHOICE_LINK).click()\n self.driver.find_text_box(by_xpath(CHOICE_XPATH_LOCATOR + \"[\" + str(index) + \"]\" + CHOICE_TB_XPATH_LOCATOR)).enter_text(choice)\n index += 1\n choice_type = fetch_(ALLOWED_CHOICE, from_(question_data))\n if ONLY_ONE_ANSWER == choice_type:\n self.driver.find_radio_button(ONLY_ONE_ANSWER_RB).click()\n elif MULTIPLE_ANSWERS == choice_type:\n self.driver.find_radio_button(MULTIPLE_ANSWER_RB).click()\n return self", "def get_init_list(self):\n\n return self.convert_compartments_to_list(self.init_compartments)", "def __init__(self, choiceList=None, prompt=DEFAULT_PROMPT, title=DEFAULT_TITLE):\n self.choice = None\n \n wpf.LoadComponent(self, GUI_XAML_FILE)\n \n self.Title = title\n self.lblPrompt.Content = prompt\n \n self.choicesBox.ItemsSource = choiceList", "def initDefaults(self):\n return _libsbml.Species_initDefaults(self)", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def _get_choices_str(self):\n return ', '.join(\n '\"%s\"' % choice\n for choice in self.choices\n )", "def get_defaults(self):\n\t\treturn self.__defaults", "def choices(self):\n self._choices = self.getChoices()\n return len(self._choices)", "def init_defaults(self, defaults):\r\n for (sect, opt, default) in defaults:\r\n self._default(sect, opt, default)", "def choices(self, cl):\n # TODO: Determine if non-static choices would be cleaner here.\n # Honestly, I tried a more generic version and it was even harder to\n # follow than this version.\n yield {\n 'selected': not (self.lookup_val_gte or self.lookup_val_lt),\n 'query_string': cl.get_query_string({}, [self.lookup_kwarg_gte,\n self.lookup_kwarg_lt]),\n 'display': 'All'\n }\n\n goal = settings.FACEBOOK_CLICK_GOAL\n yield {\n 'selected': self.lookup_val_gte and not self.lookup_val_lt,\n 'query_string': cl.get_query_string({self.lookup_kwarg_gte: goal},\n [self.lookup_kwarg_lt]),\n 'display': 'Yes'\n }\n yield {\n 'selected': self.lookup_val_lt and not self.lookup_val_gte,\n 'query_string': cl.get_query_string({self.lookup_kwarg_lt: goal},\n [self.lookup_kwarg_gte]),\n 'display': 'No'\n }", "def choices(self, typename, value_field='description'):\n rows = self.type(typename).values('id', value_field)\n return [(r['id'], r[value_field]) for r in rows]", "def default_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"default_values\")", "def test_get_prior_string_list(self):\n categories = list(range(10))\n categories[0] = \"asdfa\"\n categories[2] = \"lalala\"\n dim = Categorical(\n \"yolo\", categories, shape=2, default_value=[\"asdfa\", \"lalala\"]\n )\n assert dim.get_prior_string() == (\n \"choices(['asdfa', 1, 'lalala', 3, 4, 5, 6, 7, 8, 9], \"\n \"shape=2, default_value=['asdfa', 'lalala'])\"\n )", "def get_choices(self, cutoff=None):\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict(\n [\n (\n # This line below is modifed.\n item.pk,\n self.display_value(item),\n )\n for item in queryset\n ]\n )", "def choices(self, choices: Iterable[Tuple[str, str]]):\n try:\n iter(choices)\n except TypeError:\n raise TypeError(\"'choices' isn't a valid iterable\")\n\n apply_choices = []\n for i, (choice_id, choice_label) in enumerate(choices):\n apply_choices.append((str(choice_id), str(choice_label)))\n\n if len(apply_choices) < 2:\n raise ValueError(\"you need to specify at least two choices\")\n\n self._choices = apply_choices\n self.specific.refresh()\n self._selected = 0", "def choose_option(self, state):\n options = [o for o in self.options if o.initiation_set[state] == 1]\n return random.choice(options)", "def setUp(self):\n current_date = date.today()\n name = 'name'\n possible_meals = [Meal(date=current_date, name=name)]\n self.possible_meals_choices = [(possible_meal.id, possible_meal.name)\n for possible_meal in possible_meals]", "def all_options():\n return _OptionRegistry.values()", "def __init__(self, name, attrs={}):\n ChoiceFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n ChoiceFormat.__init__(self, name, attrs)", "async def choices(self, ctx, *, options):\n choices = options.split('-')\n choice = random.choice(choices)\n await ctx.send(f'My choice is\\\"{choice}\\\"')", "def get_chosen_options(user):\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n ocl = eval(user_application.options_selected)\n chosen_options = []\n for oc in ocl:\n chosen_options.append(Option.objects.get(opt_code=int(oc))) \n return chosen_options", "def initialize_options(self):", "def initDefaults(self):\n return _libsbml.Reaction_initDefaults(self)", "def calendar_choices(self):\n if not self._calendars:\n if self.authenticated:\n default = self.account.schedule().get_default_calendar()\n # {\n # \"default\" : <DEFAULT_CALENDAR>,\n # \"<CALENDAR_NAME>: <CALENDAR>,\n # ...\n # }\n self._calendars = {\n DEFAULT_CALENDAR: default,\n **{\n c.name: c\n for c in self.account.schedule().list_calendars() if c.name != default.name\n }\n }\n\n return self._calendars", "def get_options(self):\r\n return self._option_values", "def getOptionsNames(self) -> List[unicode]:\n ...", "def default_args(self) -> Optional[list[str]]:\n _args: list[Arg] = []\n _ctx = self._select(\"defaultArgs\", _args)\n return _ctx.execute_sync(Optional[list[str]])", "def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n # choicesdata = []\n # found_selected_value = False\n # for value, label in self.get_choices():\n # is_selected = value == selected_value\n # if is_selected:\n # found_selected_value = True\n # url = self.build_set_values_url(values=[value])\n # choicesdata.append({\n # 'url': url,\n # 'label': label,\n # 'is_selected': is_selected\n # })\n choicesdata, found_selected_value = self.__make_choicesdata_list(\n choices=self.get_choices(),\n selected_value=self.get_cleaned_value())\n if not found_selected_value and len(choicesdata) > 0:\n selected_index = self.get_default_is_selected_index(choicesdata=choicesdata)\n choicesdata[selected_index]['is_selected'] = True\n return choicesdata", "def available_binary_choices() -> Iterable[str]:\n for name, _ in inspect.getmembers(sys.modules[__name__], inspect.isclass):\n if name.startswith('Binary'):\n yield name", "def initDefaults(self):\n return _libsbml.Event_initDefaults(self)", "def form_CheckboxMultiChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('multiChoice', schemaish.Sequence(schemaish.Integer()))\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['multiChoice'].widget = formish.CheckboxMultiChoice(options)\n form['multiChoice'].default = [2]\n return form", "def default_selection(random, population, args):\r\n return population", "def form_SequenceOfStringsWithDefault(request):\n schema = schemaish.Structure()\n schema.add( 'myList', schemaish.Sequence( schemaish.String() ))\n\n form = formish.Form(schema, 'form')\n form.defaults = {'myList': ['a','b']}\n return form", "def test_model_choices_all_models(self):\n unique_action_admin = UniqueActionAdmin(UniqueAction, self.site)\n\n self.assertFalse(getattr(unique_action_admin, '_model_choices', False))\n\n model_choices = unique_action_admin.model_choices()\n\n self.assertTrue(getattr(unique_action_admin, '_model_choices'))\n self.assertTrue(isinstance(model_choices, list))", "def sel_prep(self):\n sel_blob = []\n for sel in self.blob['options']:\n if self.blob['defaultValue'] == sel['name']:\n sel_blob.append({'value': sel['name'], 'selected': 'true'})\n else:\n sel_blob.append({'value': sel['name'], 'selected': 'false'})\n\n return sel_blob", "def test_default(self):\n for n in range(1, 5):\n for prefix in ['', 'git-', 'gbp-']:\n parser = GbpOptionParser('%scmd%d' % (prefix, n))\n self.assertEqual(parser.config['default_option'], 'default_default1')", "def setChoices(self,report):\n\t\tif report is not None:\n\t\t\tbrowser = report[1]['objects']\n\n\t\t\tif browser is not None:\n\t\t\t\tbrowserChoices = list()\n\t\n\t\t\t\t#compute select list\n\t\t\t\tfor b in browser:\n\t\t\t\t\tif \"chrome\" in b['name'].lower():\n\t\t\t\t\t\tformString = constConfig.HISTORY_FORM_CHROME\n\t\t\t\t\telif \"firefox\" in b['name'].lower():\n\t\t\t\t\t\tformString = constConfig.HISTORY_FORM_FF\n\t\t\t\t\telif \"thunderbird\" in b['name'].lower():\n\t\t\t\t\t\tformString = constConfig.HISTORY_FORM_TH\n\n\t\t\t\t\tfor p in b['profiles']:\n\t\t\t\t\t\tformValue = str(formString)+\"_\"+p['profileName']\t\n\t\t\t\t\t\tbrowserChoices.append((formValue,b['name']+\" - \"+p['profileName']))\n\t\t\t\n\t\t\t\tch = forms.ChoiceField(label=\"Profile\",widget=forms.Select(attrs={'class':'form-control'}),choices=browserChoices)\n\t\t\t\tself.fields['choices'] = ch", "def form_SelectWithOtherChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectWithOtherChoice(options)\n form['mySelect'].default = 2\n return form" ]
[ "0.8791058", "0.83096915", "0.7565213", "0.7451019", "0.699929", "0.699929", "0.680488", "0.67091656", "0.66209406", "0.65692645", "0.6532258", "0.6486172", "0.64289325", "0.6406578", "0.63146526", "0.62376446", "0.62375015", "0.62119025", "0.61605716", "0.6160515", "0.6089932", "0.6064072", "0.60535115", "0.60409874", "0.6025764", "0.6001356", "0.5992603", "0.5973309", "0.59606636", "0.5928593", "0.59253234", "0.59120667", "0.59013265", "0.5882774", "0.5882774", "0.58603424", "0.5836189", "0.58113027", "0.57965106", "0.5786334", "0.57581234", "0.5740283", "0.573565", "0.57340217", "0.57094455", "0.5690138", "0.56835073", "0.56539315", "0.5648747", "0.5648359", "0.5643329", "0.56336606", "0.5628389", "0.5607492", "0.5601162", "0.55952716", "0.5583834", "0.5582097", "0.55678433", "0.5567291", "0.5554405", "0.55435175", "0.5521484", "0.5509103", "0.549984", "0.5486964", "0.54801327", "0.5473168", "0.54703456", "0.5448587", "0.5415777", "0.5399514", "0.5390045", "0.5388922", "0.5384503", "0.5379113", "0.53730917", "0.53626585", "0.5349482", "0.53474087", "0.53474087", "0.534685", "0.5342996", "0.5342234", "0.5339548", "0.533718", "0.53333235", "0.5328711", "0.5322346", "0.53161764", "0.53090143", "0.5302724", "0.52999085", "0.52886415", "0.52831566", "0.5275521", "0.5271917", "0.52677983", "0.52644336", "0.525422" ]
0.8089902
2
Split textStr using editSep, double sep's become char
def splitText(self, textStr): return [text.strip().replace('\0', self.editSep) for text in textStr.replace(self.editSep * 2, '\0'). split(self.editSep)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split(string, sep='\\t'):\n return text_type.split(string, sep)", "def extended(self, new_char, new_char_index, sep=' '):\n if new_char == sep:\n return TextState(self.text + new_char, '', new_char_index), self.last_word\n if sep == '':\n return TextState(self.text + new_char, new_char, new_char_index), self.last_word\n return TextState(self.text + new_char, self.last_word + new_char, new_char_index), None", "def mysplit(s,delims):\r\n for c in delims:\r\n s = s.replace(c,' ')\r\n return s.split()", "def tsplit(s, sep):\n stack = [s]\n for char in sep:\n pieces = []\n for substr in stack:\n pieces.extend(substr.split(char))\n stack = pieces\n return stack", "def parse_text(text, delimiter, position):\n new_text = text.split(delimiter)[position]\n\n return new_text", "def multi_split(text, seps):\n if not seps: # split by whitespaces\n return text.split()\n else: # split by separators in `seps`\n\n ##### Topics on Stack Overflow\n # http://stackoverflow.com/questions/1059559/python-strings-split-with-multiple-separators\n\n ## Method 1: use `re.split()` (from gimel)\n return re.split(r'[%s]' % seps, text)\n\n ## Method 2: DIY (from pprzemek)\n '''\n res = [text]\n for sep in seps:\n text, res = res, []\n for s in text:\n res += s.split(sep)\n return res\n '''", "def two_split_delimiters(text: str, delimiters: list) -> list:\n split_text = []\n prev_split = -1\n\n for text_index in range(len(text)):\n for delimiter in delimiters:\n if(text[text_index] == delimiter):\n split_text.append(text[prev_split+1:text_index])\n prev_split = text_index\n\n split_text.append(text[prev_split+1:text_index+1])\n\n return split_text", "def separate(delim):\n # Return a function that takes an argument s, which when called will split\n # s over the delimiter specified (i.e. the delim parameter).\n return lambda s: s.split(delim)", "def __split_for_delimiter__(self, string):\n if not self.__delimiter__ == '':\n return string.split(self.__delimiter__)\n return string.split()", "def split(inp_str, sep_char, maxsplit=-1, escape_char='\\\\'):\n\n word_chars = []\n word_chars_append = word_chars.append\n\n inp_str_iter = iter(inp_str)\n\n for c in inp_str_iter:\n word_chars_append(c)\n if c == escape_char:\n try:\n next_char = next(inp_str_iter)\n except StopIteration:\n continue\n if next_char == sep_char:\n word_chars[-1] = next_char\n else:\n word_chars.append(next_char)\n elif c == sep_char:\n word_chars.pop()\n yield ''.join(word_chars)\n maxsplit -= 1\n if maxsplit == 0:\n yield ''.join(inp_str_iter)\n return\n del word_chars[:]\n\n yield ''.join(word_chars)", "def test_splitDelimiters(self):\n r = irc.split(\"xx yyz\", 2)\n self.assertEqual([\"xx\", \"yy\", \"z\"], r)\n r = irc.split(\"xx\\nyyz\", 2)\n self.assertEqual([\"xx\", \"yy\", \"z\"], r)", "def test_two_chars_and_separator():\n assert my_splitter(\",J\", \",\") == [\"\", \"J\"]", "def split(text, delim=','):\n return [x.strip() for x in text.split(delim)]", "def sep(self):\n return self.sep_index", "def splitAtSeparators(expressions):\n splitExpressions = []\n wordStart = 0\n for index, expression in enumerate(expressions):\n if expression.variant == TestExpression.Variant.Separator:\n splitExpressions.append(expressions[wordStart:index])\n wordStart = index + 1\n splitExpressions.append(expressions[wordStart:])\n return splitExpressions", "def split(self, sep=None, maxsplit=None):\n return split(self, sep, maxsplit)", "def split_strings_to_two_char(*, text: str) -> list:\n lenght_text_is_not_even = len(text) % 2 == 1\n\n if lenght_text_is_not_even:\n text += '_'\n\n result = []\n for iterable in range(len(text)//2):\n result.append(text[iterable * 2: iterable * 2 + 2])\n return result", "def test_words_with_sep():\n assert my_splitter(\"bla,bla\", \",\") == [\"bla\", \"bla\"]", "def splitLine(text):\r\n sp = text.split(\" \")\r\n try:\r\n a = sp[0]\r\n b = \" \".join(sp[1:])\r\n except:\r\n a = text\r\n b = \"\"\r\n return a, b", "def separate_pipe(s):\n return s.split('|')", "def extract_text(inp, sep=('(', ')')):\n if sep[0] in inp:\n lsep = inp.find(sep[0])\n rsep = inp.find(sep[1])\n content = inp[lsep+1:rsep]\n ret = \"\".join((inp[:lsep], inp[rsep+1:])).strip()\n return content, ret\n return '', inp", "def split_artist_title(text):\n for separator in SEPARATORS:\n try:\n idx = text.index(separator)\n except ValueError:\n continue\n if idx > -1 and not in_quotes(text, idx):\n return [text[:idx], text[idx + len(separator) :]]", "def test_without_separator():\n assert my_splitter(\"string with !@#$double spaces\") == \\\n [\"string\", \"with\", \"!@#$double\", \"spaces\"]", "def space_injector(space_sep_str: str, desired_str_len: int, sep=' ') -> str:\n sep_str = space_sep_str.split(sep)\n sum_len = 0\n for sstr in sep_str:\n sum_len += len(sstr)\n\n diff = desired_str_len - sum_len\n diff_adj = int(float(diff) / float((len(sep_str) - 1)))\n new_sep = ' ' * diff_adj\n new_str = new_sep.join(sep_str)\n\n return new_str", "def tokenize(text):\n return text.split(' ')", "def tokenize_pt(text):\n #primeiros padrões, separação de palavra de [. , ? ! ( ) [ ] : ; ' ' \" \" ]\n return split_level_two(split_level_one(text))", "def test_separators_only():\n assert my_splitter(\",ad,\", \"ad\") == [\",\", \",\"]", "def splitInPhrase(self,text):\n return self._support.splitInPhrase(text)", "def text_indentation(text):\n if not isinstance(text, str):\n raise TypeError(\"text must be a string\")\n\n new = text.split(\".\")\n new = [x.strip(\" \") for x in new]\n new = '.\\n\\n'.join(new)\n\n new = new.split(\"?\")\n new = [x.strip(\" \") for x in new]\n new = '?\\n\\n'.join(new)\n\n new = new.split(\":\")\n new = [x.strip(\" \") for x in new]\n new = ':\\n\\n'.join(new)\n\n print(new, end=\"\")", "def split_txt(data: str) -> str:\n items = split_txt_multiline(data)\n ret = ' '.join(items)\n\n return ret", "def shell_split(text):\n assert is_text_string(text) # in case a QString is passed...\n pattern = r'(\\s+|(?<!\\\\)\".*?(?<!\\\\)\"|(?<!\\\\)\\'.*?(?<!\\\\)\\')'\n out = []\n for token in re.split(pattern, text):\n if token.strip():\n out.append(token.strip('\"').strip(\"'\"))\n return out", "def split_preserve_tokens(s):\n return re.split(r'(\\W)', s)", "def split_escaped(string, separator):\n\n result = []\n current = ''\n escaped = False\n for char in string:\n if not escaped:\n if char == '\\\\':\n escaped = True\n continue\n elif char == separator:\n result.append(current)\n current = ''\n continue\n escaped = False\n current += char\n result.append(current)\n return result", "def split_lines_monospaced(string, line_width):\n \n raise NotImplementedException()", "def safe_split(string, sep=','):\n regex = re.escape(sep) + r'\\s*(?![^\\[\\]]*\\])(?![^()]*\\))'\n return re.split(regex, string)", "def split_escaped_delim (delimiter, string, count=0):\n assert len(delimiter) == 1\n\n split_expression = re.compile(r\"\"\"(?<!\\\\)%s\"\"\" % (delimiter))\n\n result = split_expression.split(string, count)\n\n return result", "def shlex_split(self, text):\n if six.PY2:\n text = text.encode('utf-8')\n return shlex.split(text)", "def split_param(text: str, prefixes: Sequence[str], sep: str) -> tuple[str, str, str]:\n stripped = text.strip()\n if not prefixes:\n prefix = ''\n rest = stripped\n else:\n try:\n prefix = next(filter(stripped.startswith, prefixes))\n except StopIteration:\n prefix = ''\n rest = stripped\n else:\n rest = stripped.split(prefix, maxsplit=1)[1].strip()\n assert len(prefix) >= 1\n assert rest\n arg, part_sep, descr = rest.partition(sep.join((' ', ' ')))\n if not part_sep:\n if rest.endswith(sep):\n arg = rest[:-1]\n elif sep + ' ' in rest:\n arg, _, descr = rest.partition(sep + ' ')\n # if we hit neither then there is no '-' in text, possible case of '[prefix] foo'?\n return prefix, arg.strip(), descr.lstrip()", "def splitInSentence(self,text):\n return self._support.splitInPhrase(text)", "def tokenize(text,split_str='\\s',chars=False):\n if not chars:\n text=re.split(split_str,text)\n return [token for token in text if token not in [\"\"]]", "def try_split(text, chars=(u'—', '-')):\n for c in chars:\n segments = text.split(c)\n if len(segments) > 1:\n return [s.strip() for s in segments]", "def split_quote(s, split_char='/', quote='\\\\'):\n\n buf = \"\"\n parse_str = iter(s)\n for char in parse_str:\n if char == split_char:\n yield buf\n buf = \"\"\n continue\n if char == quote:\n char = next(parse_str)\n if char != split_char:\n buf += quote\n buf += char\n yield buf", "def addSplit(self):\n pass", "def split(self, string):\n return (re.split('; |, |: |\"(\"|\"(\"|;|,|:| |', string))", "def sep_token(self):\r\n if self._sep_token is None:\r\n logger.error(\"Using sep_token, but it is not set yet.\")\r\n return self._sep_token", "def test_split_string(self):\n mytext = '2011 Senior PGA Championship presented by'\n string1, string2 = split_string(mytext, 25, 25)\n self.assertEqual(string1, '2011 Senior PGA')\n self.assertEqual(string2, 'Championship presented')", "def add_separator(self, structure, word, debug=False):\n word_segmented = list(word)\n char = 0\n forward = 0\n while char < len(structure) - 1:\n if structure[char] == '-':\n word_segmented.insert(char + forward, '-')\n elif structure[char] == 'T':\n forward += 1 # moving one forward because T is mapped to two characters\n char += 1\n\n if debug:\n print(structure, word, word_segmented, ''.join(word_segmented))\n\n return ''.join(word_segmented)", "def _splitbycharset(txt, charset):\n for firstpos, char in enumerate(txt):\n if char in charset:\n return txt[firstpos], txt[:firstpos], txt[firstpos + 1:]\n return '', txt, ''", "def split(string, separator, keep_separator):\n\t\t\tparts = string.split(separator)\n\t\t\tif keep_separator:\n\t\t\t\t*parts, last_part = parts\n\t\t\t\tparts = [part + separator for part in parts]\n\t\t\t\tif last_part:\n\t\t\t\t\treturn parts + [last_part]\n\t\t\treturn parts", "def split_string(text, chars_per_string):\n return [text[i:i + chars_per_string] for i in range(0, len(text), chars_per_string)]", "def split_string(text, chars_per_string):\n return [text[i:i + chars_per_string] for i in range(0, len(text), chars_per_string)]", "def separate_semicolon(s):\n return s.split(';')", "def line_split(self, line):\n\t\tline = re.sub(r\"`(.*?)'\", quote_replace, line)\n\t\tline = line.translate(None, '.:,()+*')\n\t\treturn line.split()", "def sentencesplit(doc):\n out = doc\n out = out.replace(\"? \", \"?.\")\n out = out.replace(\"! \", \"!.\")\n out = out.split(\".\")\n i = 0\n while \"\" in out or \" \" in out:\n if out[i] == \"\" or out[i] == \" \":\n out.pop(i)\n continue\n i += 1\n return out", "def text_prep(text):\n text1 = text.lower()\n text2 = re.sub('[.,!?\"\\'-\\\\\\/:;1-9+]', ' ', text1)\n text3 = text2.replace('\\n', ' ')\n text4 = re.sub(' +', ' ', text3)\n text_obrab = text4.split()\n return text_obrab", "def split_stem(sentence):\n sentence = re.sub('([a-z])([A-Z])', u'\\\\1 \\\\2', sentence)\n return sentence.split()", "def set_separator(self, separator):\n self._separator = separator", "def clean_and_split(text: str, compiled_pattern=TOKENIZER):\n\n text = text.lower().strip()\n if not hasattr(compiled_pattern, 'findall'):\n return text.split()\n return compiled_pattern.findall(text)", "def recordDelimiterChoice(self):\n# Thanks to https://stackoverflow.com/questions/610883\n grid = self.ids.delimiterGrid\n for x in grid.children:\n try:\n if x.active:\n self.delim = x.name\n except AttributeError:\n pass\n # This function cleans the data and puts it back in the same file\n# self.plotter.normalizeCSV(self.filename, self.delim)\n self.headers = self.plotter.get_headers(self.filename, self.delim)\n # Dynamically construct the screen for axis selection\n self.header_choices('x')", "def test_double_spaces():\n assert my_splitter(\"string with !@#$double spaces\", \" \") == \\\n [\"string\", \"\", \"with\", \"\", \"!@#$double\", \"\", \"spaces\"]", "def test_string_ends_with_sep():\n assert my_splitter(\"aaa,bbb,\", \",\") == [\"aaa\", \"bbb\", \"\"]", "def parse(text):\n # Make sure that there's text to be split\n if text == None:\n return text\n return text.split(',')", "def __init__(self, sep_text: Optional[str] = None):\n super().__init__()\n self.special_end_text = sep_text", "def __init__(self, sep_text: Optional[str] = None):\n super().__init__()\n self.special_end_text = sep_text", "def _tokenize_line(self, line: str, pattern='\\W'):\n # TODO check nltk tokenize\n # TODO check string not to lower\n line = re.sub(\"[.,;:]\", \" \", line)\n return re.split(pattern, line.lower())", "def bert_segment(tokenized_text):\n segment1 = [1 if token==\"[SEP]\" else 0 for token in tokenized_text]\n \n try:\n index_sep = segment1.index(1)\n except:\n index_sep = len(segment1)-1\n \n segment2 = index_sep*[0] + len(segment1[index_sep:])*[1]\n assert len(tokenized_text) == len(segment2)\n return segment2", "def tokenize_by_space(text: str) -> List[str]:\n return text.split(\" \")", "def test_split_string(self):\n self.assertEqual(('1-4', 14), split_string('1-4/14'))", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def split_sents(sents):\n space = '$'\n\n if sents[-1] in PUNC:\n sents = sents[:-1]\n\n return sents.translate(str.maketrans({',': space, '.': space, ' ': ''})).split(space)", "def setSplitChars(self, value):\n return self._set(splitChars=value)", "def setSplitChars(self, value):\n return self._set(splitChars=value)", "def __init__(self, sep):\n self.sep = sep", "def inner_split(s):\n\n return s.split(split_string)", "def splitWordList(self, text):\n result = list()\n if text is None:\n return result\n\n t = text + \"⁋\"\n t = t.replace('\\n', '⁋')\n t = re.sub(WordListProcessor.REFERENCE_PATTERN, \"\", t)\n t = re.sub(WordListProcessor.SUPERSCRIPT_PATTERN, \"\", t) # TODO: Extract sense!\n t = re.sub(WordListProcessor.HTML_REMOVER, \"\", t)\n t = t.replace(\"&quot\", \"\\\"\")\n t = t.replace(\",\", \"⁋,\")\n t = t.replace(\";\", \"⁋\")\n # print(t)\n # t = re.sub(WordListProcessor.BRACKETED_DELIMITER, \"$1$2$3$4$5$6\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER1, \"$1$2\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER2, \"$1$2\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER3, \"$1$2\", t)\n t = self.escapeDelimiters(t)\n # print(t)\n t = t.replace(\"⁋;\", \"⁋\")\n t = t.replace(\"⁋,\", \"⁋\")\n t = t.replace(\"]] or [[\", \"]]⁋[[\")\n t = t.replace(\"]] and [[\", \"]]⁋[[\")\n t = t.replace(\" - \", \"⁋\")\n # t = t.replace(\" / \", \"⁋\")\n j = t.find(\" / \") # Use ' / ' only as a delimiter if there are at least two of them!\n if j >= 0:\n j = t.find(\" / \", j)\n if j >= 0:\n t = t.replace(\" / \", \"⁋\")\n # print(t)\n\n # print(t)\n while True:\n delim = t.find('⁋')\n if delim >= 0:\n word = t[0:delim]\n if word:\n # Normalize the word.\n word = word.strip()\n if word.lower().startswith(\"see also\"):\n word = word[8:].strip()\n if word.lower().startswith(\"see\"):\n word = word[3:].strip()\n if word.startswith(\":\"):\n word = word[1:].strip()\n word = self.deWikify(word).strip()\n word = self.removeBrackets(word).strip()\n word = self.removeTemplates(word).strip()\n word = self.removeComments(word).strip()\n if word.lower().startswith(\"see also\"):\n word = word[8:].strip()\n if word.lower().startswith(\"see\"):\n word = word[3:].strip()\n if word.startswith(\":\"):\n word = word[1:].strip()\n if word.endswith(\".\"):\n word = word[:-1].strip()\n if word.endswith(\",\"):\n word = word[:-1].strip()\n\n # Check for slashes.\n word = word.replace(\" / \", \"/\")\n word = word.replace(\"/ \", \"/\")\n i = word.find('/')\n if word:\n if i >= 0 and word.find(' ') < 0:\n while True:\n result.append(word[0:i])\n word = word[i + 1:]\n i = word.find('/')\n if i < 0:\n break\n result.append(word)\n else:\n result.append(word)\n\n t = t[delim + 1:]\n\n else:\n break\n\n return result", "def _detab(self, text):\r\n if '\\t' not in text:\r\n return text\r\n return self._detab_re.subn(self._detab_sub, text)[0]", "def test_get_separator_space():\n # GIVEN a line with spaces\n line = \"one two three\"\n # WHEN getting the separator\n sep = samplesheet.get_separator(line)\n # THEN assert space is returned\n assert sep == \" \"", "def optSplit(opt, delim, empty = ''):\n\tdef getDelimeterPart(oldResult, prefix):\n\t\ttry:\n\t\t\ttmp = oldResult[0].split(prefix)\n\t\t\tnew = tmp.pop(1)\n\t\t\ttry: # Find position of other delimeters in string\n\t\t\t\totherDelim = min(filter(lambda idx: idx >= 0, map(lambda x: new.find(x), delim)))\n\t\t\t\ttmp[0] += new[otherDelim:]\n\t\t\texcept Exception:\n\t\t\t\totherDelim = None\n\t\t\treturn [str.join(prefix, tmp)] + oldResult[1:] + [new[:otherDelim]]\n\t\texcept Exception:\n\t\t\treturn oldResult + ['']\n\tresult = map(str.strip, reduce(getDelimeterPart, delim, [opt]))\n\treturn tuple(map(lambda x: QM(x == '', empty, x), result))", "def split(value: str, sep: str = \":\") -> Tuple:\n left, _, right = value.partition(sep)\n return (left, right) if right else (None, left)", "def separateByTag(soup, parent, splitters, separator=ITEM_SEPARATOR):\n for splittee in parent.find_all(name=splitters):\n insertSeparator(soup, splittee, separator)", "def soar_splitpart(value, index, split_chars=' - '):\n splits = value.split(split_chars)\n if len(splits) > index:\n return splits[index]\n\n return value", "def _handle_separator(sep):\n if sep is None or sep == \"\":\n return \",\"\n else:\n return str(sep)", "def split(self, s):\n punctuations = _SPLIT_RE.findall(s)\n texts = _SPLIT_RE.split(s)\n assert len(punctuations) + 1 == len(texts)\n new_texts = [self._split(x) for x in texts]\n for i, punctuation in enumerate(punctuations):\n new_texts.insert(2*i+1, punctuation)\n return [item for sublist in new_texts for item in sublist]", "def my_splitter(to_split, separator=None):\n if separator is None:\n split_list_regex = re.compile(r'[^\\s]+')\n return split_list_regex.findall(to_split)\n\n split_list = []\n\n while separator in to_split:\n separators_location = to_split.find(separator, 0)\n separated_word = to_split[:separators_location]\n split_list.append(separated_word)\n to_split = to_split[separators_location + len(separator):]\n\n split_list.append(to_split)\n\n return split_list", "def safe_split(self, text):\n try:\n words = self.shlex_split(text)\n return words\n except:\n return text", "def testSplit(self):\n\n s = StrObject(u\"first second\")\n result = s.call(u\"split\", [StrObject(u\" \")])\n pieces = [obj._s for obj in unwrapList(result)]\n self.assertEqual(pieces, [u\"first\", u\"second\"])", "def recovTextBetweenTags(texts: str, separator: str): \n text_clean = []\n lisI = []\n lisS = []\n\n for i in range(0, len(texts)):\n if str(texts[i]) == \"<\":\n lisI.append(i)\n if texts[i] == '>':\n lisS.append(i)\n\n len_lis = len(lisI)\n for h in range(0, len_lis):\n if h < (len_lis-1):\n text_clean.append(texts[lisS[h]:lisI[h+1]])\n\n if separator != 'non':\n description = str(text_clean).replace('>', '').replace(\n ',', '').replace('\\'', '').replace(',', '')\n description = description.split(separator)\n else:\n description = text_clean\n\n return description", "def _get_separator(num, sep_title, sep_character, sep_length):\n left_divider_length = right_divider_length = sep_length\n if isinstance(sep_length, tuple):\n left_divider_length, right_divider_length = sep_length\n left_divider = sep_character * left_divider_length\n right_divider = sep_character * right_divider_length\n title = sep_title.format(n=num + 1)\n\n return \"{left_divider}[ {title} ]{right_divider}\\n\".format(\n left_divider=left_divider, right_divider=right_divider, title=title\n )", "def parse_text(self, text):\n self._text_paragraph = text.split(\"\\n\")\n self._render()", "def rsplit(s, sep, maxsplits=0):\n L = s.split(sep)\n if not 0 < maxsplits <= len(L):\n return L\n return [sep.join(L[0:-maxsplits])] + L[-maxsplits:]", "def texto_para_lista(elemento, delimitador='|'):\n return elemento.split(delimitador)", "def replaceMultiStopMark(text):\n text = re.sub(r\"(\\.)\\1+\", ' multiStop ', text)\n return text", "def tokenize(s):\n return split_words(replace_numbers(r' \\1 ', s))", "def keep_tokens(spans: Generator[dict, None, None], text: str, separators: str) -> Generator[dict, None, None]:\n\n n = len(text)\n current_span = next(spans, None)\n while current_span is not None:\n # Check that a preceding symbol is a separator\n if current_span['start'] > 0 and text[current_span['start'] - 1] not in separators:\n current_span = next(spans, None)\n continue\n\n # Check that a succeeding symbol is a separator\n if current_span['end'] < n and text[current_span['end']] not in separators:\n current_span = next(spans, None)\n continue\n\n yield current_span\n current_span = next(spans, None)", "def get_separator(self):\r\n \r\n return self._separator", "def split(value, delimiter):\n return value.split(delimiter)", "def tokenize(self, start_pos=0, text=None):\n pass", "def detab(self, text):\r\n newtext = []\r\n lines = text.split('\\n')\r\n for line in lines:\r\n if line.startswith(' '*self.tab_length):\r\n newtext.append(line[self.tab_length:])\r\n elif not line.strip():\r\n newtext.append('')\r\n else:\r\n break\r\n return '\\n'.join(newtext), '\\n'.join(lines[len(newtext):])", "def word_split_by_char(s):\n old_words = []\n old_words.append(s)\n result = []\n while len(old_words) > 0:\n new_words = []\n for s in old_words:\n if '-' in s: # Case: ab-cd-ef\n new_words+=s.split('-')\n elif '.' in s: # Case: ab.cd.ef\n new_words+=s.split('.')\n elif '_' in s: # Case: ab_cd_ef\n new_words+=s.split('_')\n elif '/' in s: # Case: ab/cd/ef\n new_words+=s.split('/')\n elif '\\\\' in s: # Case: ab\\cd\\ef\n new_words+=s.split('\\\\')\n else:\n t = camel_case_split(s)\n if len(t) > 1:\n new_words += t\n result.append(s)\n old_words = new_words\n return result", "def separator(self):\n pass" ]
[ "0.61333054", "0.5867214", "0.5828028", "0.58185434", "0.5792178", "0.5787775", "0.573595", "0.5613339", "0.5568093", "0.5554935", "0.55341613", "0.5515633", "0.5514492", "0.55078864", "0.5440001", "0.5372514", "0.5367604", "0.53267324", "0.5326295", "0.52897936", "0.52483726", "0.5225409", "0.5223403", "0.522054", "0.52090126", "0.5185933", "0.5185923", "0.5171821", "0.5169567", "0.51586425", "0.51582724", "0.51311696", "0.5123349", "0.51232225", "0.5085432", "0.50723165", "0.5066263", "0.50600976", "0.50398487", "0.50358844", "0.50341034", "0.50318676", "0.5011947", "0.5008911", "0.49989188", "0.49926347", "0.49779534", "0.49732575", "0.49697044", "0.49677098", "0.49677098", "0.49555856", "0.4945089", "0.49169046", "0.49126008", "0.49100932", "0.49050984", "0.48973882", "0.48964867", "0.48845935", "0.48735368", "0.48730287", "0.4863024", "0.4863024", "0.48625824", "0.4862063", "0.48524418", "0.48450738", "0.48420686", "0.48371565", "0.48274294", "0.48274294", "0.482368", "0.48195392", "0.48133898", "0.4809447", "0.48048377", "0.4798978", "0.47928238", "0.4789453", "0.47859222", "0.47832847", "0.47806662", "0.47715732", "0.47692087", "0.47676465", "0.4764062", "0.4752586", "0.47522873", "0.47405258", "0.47358915", "0.47353724", "0.47274962", "0.47229505", "0.4718242", "0.47114554", "0.47114053", "0.47099113", "0.47082025", "0.47065467" ]
0.7283009
0
Any format, prefix, suffix, html info in attrs dict
def __init__(self, name, attrs={}): ChoiceFormat.__init__(self, name, attrs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def html_attrs(attrs):\n html = \"\"\n for a in attrs.items():\n if a[1]:\n html = html + \"%s=\\\"%s\\\" \"%(a)\n return html", "def attr(*attrs: ATTRIBUTE) -> str:\n return PyHTML.attr(*attrs)", "def attrs(context):\n result = \"\"\n for key, value in context.flatten().items():\n if key not in [\"True\", \"False\", \"None\", \"content\", \"element\"]:\n if \"hx_\" in key:\n key = key.replace(\"_\", \"-\")\n result += f' {key}=\"{value}\"'\n return mark_safe(result)", "def __get_attr_format (self, attrs):\r\n format = { \r\n 'editor': None,\r\n 'min': None,\r\n 'max': None,\r\n 'step': None,\r\n 'subtype': None,\r\n 'flags': None,\r\n 'enums': None\r\n }\r\n\r\n for attr in attrs: \r\n attr_type = attr[\"type\"]\r\n if \"editor\" == attr_type:\r\n format['editor'] = attr[\"value\"] \r\n if \"min\" == attr_type:\r\n format['min'] = attr[\"value\"] \r\n if \"max\" == attr_type:\r\n format['max'] = attr[\"value\"] \r\n if \"default\" == attr_type:\r\n format['default'] = attr[\"value\"] \r\n if \"step\" == attr_type:\r\n format['step'] = attr[\"value\"]\r\n if \"subtype\" == attr_type:\r\n format['subtype'] = attr[\"value\"]\r\n if \"flags\" == attr_type:\r\n format['flags'] = attr['value']\r\n if \"enums\" == attr_type:\r\n format['enums'] = attr['value']\r\n\r\n return format", "def _formatAttributes(self, attr=None, allowed_attrs=None, **kw):\n\n # Merge the attr dict and kw dict into a single attributes\n # dictionary (rewriting any attribute names, extracting\n # namespaces, and merging some values like css classes).\n attributes = {} # dict of key=(namespace,name): value=attribute_value\n if attr:\n for a, v in attr.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n if kw:\n for a, v in kw.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n\n # Add title attribute if missing, but it has an alt.\n if ('html', 'alt') in attributes and ('html', 'title') not in attributes:\n attributes[('html', 'title')] = attributes[('html', 'alt')]\n\n # Force both lang and xml:lang to be present and identical if\n # either exists. The lang takes precedence over xml:lang if\n # both exist.\n #if ('html', 'lang') in attributes:\n # attributes[('xml', 'lang')] = attributes[('html', 'lang')]\n #elif ('xml', 'lang') in attributes:\n # attributes[('html', 'lang')] = attributes[('xml', 'lang')]\n\n # Check all the HTML attributes to see if they are known and\n # allowed. Ignore attributes if in non-HTML namespaces.\n if allowed_attrs:\n for name in [key[1] for key in attributes if key[0] == 'html']:\n if name in _common_attributes or name in allowed_attrs:\n pass\n elif name.startswith('on'):\n pass # Too many event handlers to enumerate, just let them all pass.\n else:\n # Unknown or unallowed attribute.\n err = 'Illegal HTML attribute \"%s\" passed to formatter' % name\n raise ValueError(err)\n\n # Finally, format them all as a single string.\n if attributes:\n # Construct a formatted string containing all attributes\n # with their values escaped. Any html:* namespace\n # attributes drop the namespace prefix. We build this by\n # separating the attributes into three categories:\n #\n # * Those without any namespace (should only be xmlns attributes)\n # * Those in the HTML namespace (we drop the html: prefix for these)\n # * Those in any other non-HTML namespace, including xml:\n\n xmlnslist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if not k[0]]\n htmllist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] == 'html']\n otherlist = ['%s:%s=\"%s\"' % (k[0], k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] and k[0] != 'html']\n\n # Join all these lists together in a space-separated string. Also\n # prefix the whole thing with a space too.\n htmllist.sort()\n otherlist.sort()\n all = [''] + xmlnslist + htmllist + otherlist\n return ' '.join(all)\n return ''", "def get_format_attrs(self, name, field, alt_field_info={}):\n # important_props = ('initial', 'autofocus', 'widget')\n if name in alt_field_info:\n field = deepcopy(field)\n for prop, value in alt_field_info[name].items():\n setattr(field, prop, value)\n initial = field.initial\n initial = initial() if callable(initial) else initial\n attrs, result = {}, []\n if initial and not isinstance(field.widget, Textarea):\n attrs['value'] = str(initial)\n data_val = self.form.data.get(get_html_name(self.form, name), None)\n if data_val not in ('', None):\n attrs['value'] = data_val\n attrs.update(field.widget_attrs(field.widget))\n result = ''.join(f'{key}=\"{val}\" ' for key, val in attrs.items())\n if getattr(field, 'autofocus', None):\n result += 'autofocus '\n if issubclass(self.form.__class__, FormOverrideMixIn):\n # TODO: Expand for actual output when using FormOverrideMixIn, or a sub-class of it.\n result += '%(attrs)s' # content '%(attrs)s'\n else:\n result = '%(attrs)s' + result # '%(attrs)s' content\n return result", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def render_attr(key, value, attr_format='{key}=\"{value}\"'):\n\n if not key or ' ' in key:\n raise InvalidAttribute('Invalid name \"{}\"'.format(key))\n\n if value:\n if type(value) is RawNode:\n value = str(value)\n else:\n value = html.escape(str(value))\n\n return attr_format.format(key=key, value=value)\n\n return key", "def make_attrs(self, mixed):\n if isinstance(mixed, dict):\n return ''.join('%s=\"%s\" ' % (k, v) for k, v in mixed.items())\n return str(mixed)", "def gen_tag_attrs(self, *a, **kw):\n return gen_tag_attrs(self, *a, **kw)", "def attributes(self):\n _attrs = [\"label\"]\n if self.attrs:\n _attrs.append(\"attrs\")\n return _attrs", "def back_to_tag(tag, attrs):\n sol = '<' + tag\n for (prop, val) in attrs:\n sol += ' ' + prop + '=\"' + val + '\"'\n sol += '>'\n return sol", "def add_attrs(value, arg):\n try:\n # Split list on comma\n kv_pairs = arg.split(\",\")\n except ValueError:\n raise template.TemplateSyntaxError(\n \"add_attrs requires as an argument a string in the format 'key:value, key1:value1, key2:value2...'\"\n )\n\n\n # Create dictionary\n html_attrs = dict()\n\n # Clean items and add attribute pairs to dictionary\n for item in kv_pairs:\n item = item.strip()\n k, v = item.split(\":\")\n html_attrs.update({k.strip():v.strip()})\n\n return value.as_widget(attrs=html_attrs)", "def _wrap_attr(attr):\n return '={0},'.format(attr)", "def string_for_attrs(attrs):\n if not attrs: return ''\n return ''.join(' %s=\"%s\"' % (attr, value) for attr, value in attrs)", "def attr(*attrs: ATTRIBUTE) -> str:\n formatted = []\n for attr_ in attrs:\n if isinstance(attr_, str):\n formatted.append(attr_)\n elif isinstance(attr_, tuple) and len(attr_) == 2:\n formatted.append(f'{attr_[0]}=\"{attr_[1]}\"')\n else:\n raise ValueError(f\"Bad attribute: {attr_}\")\n return \" \".join(formatted)", "def attrs(self):\n return self.size, self.propSuffix, self.specified", "def attrs(**kwds):\n\n def decorate(f):\n for k in kwds:\n setattr(f, k, kwds[k])\n return f\n\n return decorate", "def dot_node_attrs(self):\n\n lbl_name = '%s' % self.format_name(True, True, 24)\n lbl_acc = '<font point-size=\"8.0\">%s</font>' % self.format_id()\n label = self.node_label_fmt % (self.url(), self.name,\n lbl_name, lbl_acc)\n\n node_attrs = {'label': label}\n return node_attrs", "def attrs(*attributes):\n return ';'.join([ str(i) for i in attributes ])", "def ATTRIBUTE():\n return \"author\", \"title\", \"publisher\", \"shelf\", \"category\", \"subject\"", "def getAttributeInfoDictionary(attr, format=None):\n format = format or _getDocFormat(attr)\n return {'name': attr.getName(),\n 'doc': renderText(attr.getDoc() or '', format=format)}", "def attrsToString(self, attrs):\n string = \"\"\n # for every attribut\n for attr in attrs:\n # converts its name and value to string and adds this to string\n string += \" {}=\\\"{}\\\"\".format(attr[0], attr[1])\n # no exception!\n print(\"Das Attribut ist zu lang!\") if len(attr) > 2 else None\n return string", "def format_attr(attr: str) -> str:\r\n prefix = query_params[Toml.REMOVE_PREFIX]\r\n suffix = query_params[Toml.REMOVE_SUFFIX]\r\n prefix_len = len(prefix)\r\n suffix_len = len(suffix)\r\n stripped = attr.strip()\r\n if stripped[:prefix_len] == prefix:\r\n stripped = stripped[prefix_len:]\r\n if stripped[-suffix_len:] == suffix:\r\n stripped = stripped[:-suffix_len]\r\n return constcase(stripped).replace('__', '_')", "def handleAttributes(text, parent):\r\n def attributeCallback(match):\r\n parent.set(match.group(1), match.group(2).replace('\\n', ' '))\r\n return ATTR_RE.sub(attributeCallback, text)", "def _attrs(self, element, attrs):\n for attr, val in list(attrs.items()):\n element.setAttribute(attr, val)\n return element", "def date_attrs(name):\n attrs = battrs(name)\n attrs.update({'class': 'form-control datepicker'})\n return attrs", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def extract_attrs(attr_string):\n attributes = {}\n for name, val in FIND_ATTRS.findall(attr_string):\n val = (\n val.replace(\"&lt;\", \"<\")\n .replace(\"&gt;\", \">\")\n .replace(\"&quot;\", '\"')\n .replace(\"&amp;\", \"&\")\n )\n attributes[name] = val\n return attributes", "def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):\n attrs = dict(base_attrs, **kwargs)\n if extra_attrs:\n attrs.update(extra_attrs)\n return attrs", "def get_attrs(foreground, background, style):\n return foreground + (background << 4) + style", "def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def extend_attribute_dictionary(attributedict, ns, name, value):\n\n key = ns, name\n if value is None:\n if key in attributedict:\n del attributedict[key]\n else:\n if ns == 'html' and key in attributedict:\n if name == 'class':\n # CSS classes are appended by space-separated list\n value = attributedict[key] + ' ' + value\n elif name == 'style':\n # CSS styles are appended by semicolon-separated rules list\n value = attributedict[key] + '; ' + value\n elif name in _html_attribute_boolflags:\n # All attributes must have a value. According to XHTML those\n # traditionally used as flags should have their value set to\n # the same as the attribute name.\n value = name\n attributedict[key] = value", "def _attribs(self, name=None, description=None):\n a = {}\n if name:\n a['name'] = name\n if description:\n a['description'] = description\n return a", "def _base_attrs(self, service):\n keys = ['name', 'desc', 'url']\n return {name:getattr(service, name, None) for name in keys}", "def addattrs(field, my_attrs):\n my_attrs = my_attrs.split(',')\n my_attrs = dict([attr.split('=') for attr in my_attrs])\n return field.as_widget(attrs=my_attrs)", "def attrs(xml):\r\n return lxml.html.fromstring(xml).attrib", "def parse_tag_attrs(tag_str, options_d=None, font_d=None, case=\"\", **kwargs):\n attr_b = kwargs.pop(\"attr\", \"\")\n auto_b = kwargs.pop(\"auto\", False)\n font_d = kwargs.pop(\"font_d\", font_d or {})\n options_d = kwargs.pop(\"options_d\", options_d or {})\n case = kwargs.pop(\"case\", case)\n widget = kwargs.pop(\"widget\", None)\n text_w = kwargs.pop(text_s, None)\n bad_opts = []\n # INTs: height repeatdelay repeatinterval underline width; size fun fov\n for keyval in split_attrs(tag_str):\n if \"=\" in keyval:\n key, val = keyval.split(\"=\")\n val = unquote(val)\n elif keyval:\n key, val = keyval, None\n else:\n continue\n key = key.lower()\n key2, key3, key4 = key[:2], key[:3], key[:4]\n lowval = val.lower() if val else val\n key = unalias(key)\n kalias = alias(key)\n if val == \"None\": # in ('False', 'None') #\n pass\n elif key3 in (\n bg_s,\n background_s[:3],\n fg_s,\n foreground_s[:3],\n ) or kalias in (bg_s, fg_s):\n options_d.update(**{key: val})\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n options_d.update(**{key: val})\n if auto_b and compound_s not in options_d:\n options_d.update(compound=tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],) or kalias == bd_s:\n options_d.update(borderwidth=val)\n elif key4 in (command_s[:4], compound_s[:4],) or kalias in (\n command_as,\n compound_as,\n ):\n options_d.update(**{key: val})\n elif (\n key2 in (height_s[:2], width_s[:2])\n or key3 in (repeatdelay_s[:3], repeatinterval_s[:3])\n or kalias\n in (height_as, width_as, repeatdelay_as, repeatinterval_as)\n ):\n options_d.update(**{key: int(val)})\n elif (\n key2 in (cursor_s[:2],)\n or key3 == font_s[:3]\n or kalias in (cursor_as, font_as)\n ):\n options_d.update(**{key: val})\n elif key2 in (\"r\", relief_s[:2],) or kalias == relief_as:\n options_d.update(relief=val)\n if auto_b and borderwidth_s not in options_d and val != tk.FLAT:\n options_d.update(borderwidth=str(1))\n elif key2 == underline_s[:2] or kalias == underline_as:\n options_d.update(underline=-1 if val is None else int(val))\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ) or kalias in (selectbackground_as, selectforeground_as):\n options_d.update(**{key: val})\n # special for fonts\n elif key2 in (family_s[:2],) or kalias == family_as:\n font_d[family_s] = val\n elif key2 in (size_s[:2],) or kalias == size_as:\n try:\n font_d[size_s] = int(val)\n except ValueError:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: ERROR Setting Font Size to %r\" % val,\n Raise=True,\n )\n elif key3 in (bold_as, tk_font.BOLD[:3]) or kalias == bold_as:\n font_d[weight_s] = (\n tk_font.BOLD\n if str(val) not in (\"0\", \"False\",)\n else tk_font.NORMAL\n )\n elif key2 in (weight_s[:2],) or kalias == weight_as:\n font_d[weight_s] = val\n elif key2 in (italic_as, tk_font.ITALIC[:2]) or kalias == italic_as:\n font_d[slant_s] = (\n tk_font.ITALIC\n if str(val) not in (\"0\", \"False\",)\n else tk_font.ROMAN\n )\n elif key2 in (slant_s[:2],) or kalias == slant_as:\n font_d[slant_s] = val\n elif (\n key3 in (funderline_as, funderline_s[:3])\n or kalias == funderline_as\n ):\n font_d[underline_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n elif (\n key3 in (foverstrike_as, foverstrike_s[:3])\n or kalias == foverstrike_as\n ):\n font_d[overstrike_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n # special \"case\" implementation\n elif key3 in (case_s[:3],) or kalias == case_as:\n for s in (upper_s, capitalize_s, lower_s, title_s, swapcase_s):\n if s.startswith(lowval):\n case = s if s != capitalize_s else upper_s\n break\n elif (\n key2 == upper_s[:2]\n or key3 in (capitalize_s[:3],)\n or kalias in (upper_as, capitalize_as)\n ):\n if str(val) not in (\"0\", \"False\",):\n case = upper_s\n elif key2 in (lower_s[:2],) or kalias == lower_as:\n if str(val) not in (\"0\", \"False\",):\n case = lower_s\n elif key2 == title_s[:2] or kalias == title_as:\n if str(val) not in (\"0\", \"False\",):\n case = title_s\n elif key2 == swapcase_s[:2] or kalias == swapcase_as:\n if str(val) not in (\"0\", \"False\",):\n case = swapcase_s\n elif key in ():\n bad_opts.append((key, val))\n else:\n options_d.update(**{key: val})\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n if attr_b:\n return (\n case\n if attr_b == case_s\n else options_d.get(attr_b, font_d.get(attr_b))\n )\n return options_d, font_d, case", "def gen_tag_attrs(widget=None, options_d=None, font=None, case=None, **kwargs):\n auto_b = kwargs.get(\"auto\", False)\n case = kwargs.get(case_s, case)\n extend_b = kwargs.get(\"extend\", False)\n font = kwargs.pop(\"font\", font or {})\n index_i = kwargs.pop(\"index\", None)\n kmode_s = kwargs.get(\"kmode\", \"\") # a=alias, o=options, ''=unchanged\n options_d = kwargs.pop(\"options\", options_d or {})\n pare_b = kwargs.get(\"pare\", True)\n widget = kwargs.pop(\"widget\", widget)\n text_w = kwargs.get(text_s, None)\n recurse_b = kwargs.pop(\"recurse\", widget and isinstance(widget, TTWidget))\n fmt_s = \"\"\n font_d = {}\n w_font_d, w_options_d = {}, {}\n if index_i is not None and widget is None:\n raise Exception(\"Cannot set 'index' when 'widget' is None\")\n if widget: # and isinstance(widget, TTWidget): #\n excludes_t = () if widget.emulation_b else ()\n w_options_d = {\n k: v[-1]\n for k, v in widget.config().items()\n if len(v) == 5 and str(v[-1]) != str(v[-2]) and k not in excludes_t\n }\n try:\n w_options_d[case_s] = widget.case\n except AttributeError:\n pass\n w_font = widget.cget(font_s) # w_options_d.pop(font_s, None)\n w_font_d = get_font_dict(w_font) if w_font else {}\n if pare_b and w_font_d:\n def_w_font = widget.config(font_s)[-2]\n def_w_font_d = get_font_dict(def_w_font)\n w_font_d = pare_dict(w_font_d, def_w_font_d)\n if font:\n if isinstance(font, str):\n try:\n font = tk_font.nametofont(font)\n except tk.TclError:\n pass\n elif type(font) in (list, tuple):\n font = tk_font.Font(font=font)\n if isinstance(font, tk_font.Font):\n font = font.actual()\n if isinstance(font, dict):\n font_d = font\n if case: # is not None:\n options_d = _merge_dicts(options_d, dict(case=case))\n d = _merge_dicts(\n w_options_d,\n convert_font_dict_to_ttoptions_dict(w_font_d),\n options_d,\n convert_font_dict_to_ttoptions_dict(font_d),\n kwargs,\n )\n bad_opts = []\n for key, val in d.items():\n key = key.lower()\n if key in (\"auto\", \"extend\", \"kmode\", \"pare\",): # text_s, ): #\n continue\n key2, key3, key4 = key[:2], key[:3], key[:4]\n kalias = alias(key)\n koption = unalias(key)\n if kmode_s:\n if kmode_s[0] == \"a\": # alias\n keyout = kalias\n kfunc = alias\n auto_cpd, auto_bd = compound_as, bd_s\n elif kmode_s[0] == \"o\": # option\n keyout = koption\n kfunc = unalias\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n else:\n keyout = key\n kfunc = str\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n if val:\n val = quote(val)\n if (\n key3 in (bg_s, background_s[:3], fg_s, foreground_s[:3])\n or key2 == underline_s[:2]\n or kalias in (bg_s, fg_s, underline_as)\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_cpd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_cpd, tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],):\n if \"%s=%s \" % (auto_bd, 1) in fmt_s:\n if val != 1:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_bd, 1), \"%s=%s \" % (keyout, val)\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key4 in (compound_s[:4],) or kalias == compound_as:\n if \"%s=%s \" % (auto_cpd, tk.CENTER) in fmt_s:\n if val != tk.CENTER:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_cpd, tk.CENTER),\n \"%s=%s \" % (keyout, val),\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == cursor_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == font_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, get_named_font(val))\n elif key2 in (relief_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_bd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_bd, 1)\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sbd_s,\n selectborderwidth_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n # special for fonts\n elif key2 in (family_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (size_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (weight_s[:2],):\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.BOLD),\n 1\n if isinstance(val, str) and val.lower() == tk_font.BOLD\n else 0,\n )\n elif key2 == slant_s[:2]:\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.ITALIC),\n 1\n if isinstance(val, str) and val.lower() == tk_font.ITALIC\n else 0,\n )\n elif key3 in (funderline_as, funderline_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(funderline_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n elif key3 in (foverstrike_as, foverstrike_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(foverstrike_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n # special \"case\" implementation\n elif key3 == case_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(case_s), val)\n elif key2 == upper_s[:2] or key3 == capitalize_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(upper_s), val)\n elif key2 in (lower_s[:2], title_s[:2], swapcase_s[:2]):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key in ():\n bad_opts.append((key, val))\n elif key in (text_s, text_as):\n if extend_b or widget:\n fmt_s += \"%s=%s \" % (keyout, val)\n else:\n # bad_opts.append((key, val))\n fmt_s += \"%s=%s \" % (keyout, val)\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n fmt = fmt_s.strip()\n if widget and isinstance(widget, TTWidget) and recurse_b:\n fmt = [\n fmt,\n ]\n for _, gathering in widget._get_kids(items=True):\n child = gathering[\"label\"]\n case = gathering.get(case_s, \"\")\n kid_options = {\n k: v[-1]\n for k, v in child.config().items()\n if len(v) == 5\n and str(v[-1]) != str(v[-2])\n and (k, v[-1]) not in w_options_d.items()\n and not (k in label_override_d and str(v[-1]) == \"0\")\n } #\n cf = kid_options.pop(font_s, None)\n cdf = child.config(font_s)[-2]\n if cf != cdf:\n c_font_d = pare_dict(get_font_dict(cf), get_font_dict(cdf))\n else:\n c_font_d = {}\n if case:\n kid_options.update(case=case)\n fmt.append(\n gen_tag_attrs(options=kid_options, font=c_font_d, **kwargs)\n )\n return fmt if index_i is None else fmt[index_i]", "def format_attributes(attributes):\n return ';'.join([k + '=' + v for k, v in attributes.items()])", "def handle_meta(self, tag, attrs):\n ad = {}\n for tup in attrs:\n ad[tup[0]] = tup[1]\n if 'name' in ad.keys() \\\n and 'keywords' == ad['name'] \\\n and 'content' in ad.keys():\n self.filetype = ad['content']\n if 'name' in ad.keys() \\\n and 'description' == ad['name']:\n self.description = 'present'\n if 'charset' in ad.keys():\n self.charset = 'present'", "def add_attributes(self, attrs):\n self.attrs.add_container(attrs)", "def set_attrs(dict, elem, attrs):\n for attr in attrs:\n if attr in elem.keys():\n dict[attr] = elem.get(attr)", "def __init__(self, attrs: Dict[str, Any]) -> None:\n self.attrs = attrs", "def prepare_node_attrs(self):", "def get_attribute_data(self, attrs):\n return {\n 'id': attrs['data-id'],\n }", "def get_attrs(self):\n req_attrv = self._ptr.contents.attrv\n attrs = {}\n if bool(req_attrv):\n i = 0\n while 1:\n s = bytestostr(req_attrv[i])\n i += 1\n if s == None:\n break\n try:\n k, v = s.split(\"=\", 1)\n attrs[k] = v\n except:\n pass\n return attrs", "def attkey_to_SVG_attribs(self,k):\n atts= k.split('@')\n o= ''\n acodes= {'C':'stroke','W':'stroke-width','S':'stroke-dasharray','O':'stroke-opacity'}\n for a in atts:\n if a[0] in acodes:\n o+= '%s=\"%s\" ' % (acodes[a[0]],a[1:])\n# elif a[0] == 'S': # Maybe do something special like this.\n# o+= 'stroke-dasharray=\"%\" ' % a[1:]\n return o", "def add_attributes(self, attrs):\n self.attrs.add_attributes(attrs)", "def fix_attributes(string):\n defs = re.compile('<dl class=\"attribute\">(?P<descrip>.*?)</dl>',flags=re.DOTALL)\n name = re.compile('<code class=\"descclassname\">(?P<name>[^<]*)</code>')\n prefix = ''\n remain = string\n \n match = defs.search(remain)\n while match:\n prefix += remain[:match.start(1)]\n prefsub = ''\n remnsub = remain[match.start(1):match.end(1)]\n descrip = name.search(remnsub)\n if descrip:\n prefix += remnsub[:descrip.start()]\n prefix += remnsub[descrip.end():]\n prefix += remain[match.end(1):match.end(0)]\n else:\n prefix += remain[match.start(1):match.end(0)]\n remain = remain[match.end(0):]\n match = defs.search(remain)\n return prefix+remain", "def a_attr_dict (self) :\n return dict (href = self.abs_href)", "def attributes(table,attrs): \n if isinstance(table,Table):\n table.html_attributes = attrs\n return table", "def read_attribs(self):\n\n attribs = {}\n while self.index < self.length:\n self.ignore_whitespaces()\n if self.xtext[self.index] == '>':\n break\n name = self.read_until('=')\n self.index += 1\n self.read_until('\"')\n self.index += 1\n value = self.read_until('\"')\n self.index += 1\n\n attribs[name] = value\n\n return attribs", "def extensible_attributes():\n return 'extensibleattributedef?'", "def transform_attributes(attrs):\n transformed = {}\n for key, value in attrs.items():\n if key in [\"raw_message\", \"text\"]:\n transformed[\"raw_content\"] = value\n elif key in [\"diaspora_handle\", \"sender_handle\", \"author\"]:\n transformed[\"handle\"] = value\n elif key == \"recipient_handle\":\n transformed[\"target_handle\"] = value\n elif key == \"parent_guid\":\n transformed[\"target_guid\"] = value\n elif key == \"first_name\":\n transformed[\"name\"] = value\n elif key == \"image_url\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"large\"] = value\n elif key == \"image_url_small\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"small\"] = value\n elif key == \"image_url_medium\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"medium\"] = value\n elif key == \"tag_string\":\n transformed[\"tag_list\"] = value.replace(\"#\", \"\").split(\" \")\n elif key == \"bio\":\n transformed[\"raw_content\"] = value\n elif key == \"searchable\":\n transformed[\"public\"] = True if value == \"true\" else False\n elif key == \"target_type\":\n transformed[\"entity_type\"] = DiasporaRetraction.entity_type_from_remote(value)\n elif key == \"remote_photo_path\":\n transformed[\"remote_path\"] = value\n elif key == \"remote_photo_name\":\n transformed[\"remote_name\"] = value\n elif key == \"status_message_guid\":\n transformed[\"linked_guid\"] = value\n transformed[\"linked_type\"] = \"Post\"\n elif key in BOOLEAN_KEYS:\n transformed[key] = True if value == \"true\" else False\n elif key in DATETIME_KEYS:\n try:\n # New style timestamps since in protocol 0.1.6\n transformed[key] = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n # Legacy style timestamps\n transformed[key] = datetime.strptime(value, \"%Y-%m-%d %H:%M:%S %Z\")\n elif key in INTEGER_KEYS:\n transformed[key] = int(value)\n else:\n transformed[key] = value or \"\"\n return transformed", "def tag(func):\n @functools.wraps(func)\n def wrapper(**kwargs):\n\n name = func.__name__\n\n if kwargs:\n \n try:\n\n check_text = kwargs['text']\n del kwargs['text']\n \n \n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n open('index.html', 'a+').write(f\"{check_text}\")\n open('index.html', 'a+').write(f\"</{name}>\")\n\n except KeyError:\n\n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n else:\n\n open('index.html', 'a+').write(f\"\\n<{name}>\")\n\n\n func(**kwargs)\n \n return wrapper", "def getAttrsDict(attrs):\r\n attrsDict = json.loads(re.sub('/\\\"(?!(,\\s\"|}))','\\\\\"',attrs).replace(\"\\t\",\" \").replace(\"\\n\",\" \")) if len(attrs)>0 else {}\r\n return attrsDict", "def parse_tag_attrs(\n self, tags_str, options_d=None, font_d=None, case=\"\", **kwargs\n ):\n return parse_tag_attrs(\n tags_str,\n options_d,\n font_d,\n case,\n widget=self,\n text=getattr(self, \"debug_text\", None),\n **kwargs\n )", "def _get_annotation_data_attr(self, index, el):\r\n\r\n data_attrs = {}\r\n attrs_map = {\r\n 'body': 'data-comment-body',\r\n 'title': 'data-comment-title',\r\n 'problem': 'data-problem-id'\r\n }\r\n\r\n for xml_key in attrs_map.keys():\r\n if xml_key in el.attrib:\r\n value = el.get(xml_key, '')\r\n html_key = attrs_map[xml_key]\r\n data_attrs[html_key] = {'value': value, '_delete': xml_key}\r\n\r\n return data_attrs", "def get_attributes(self) -> Dict[str, str]:\n pass", "def transform(attrs: dict) -> dict:\n\n pass", "def get_html_element_attributes(self):\n html_element_attributes = {\n 'class': self.css_classes or False, # Fall back to false to avoid class=\"\"\n }\n if self.should_render_as_link():\n html_element_attributes['href'] = self.url\n return html_element_attributes", "def create_descr(self, attr_name):", "def _checkTableAttr(self, attrs, prefix):\n if not attrs:\n return {}\n\n result = {}\n s = [] # we collect synthesized style in s\n for key, val in attrs.items():\n # Ignore keys that don't start with prefix\n if prefix and key[:len(prefix)] != prefix:\n continue\n key = key[len(prefix):]\n val = val.strip('\"')\n # remove invalid attrs from dict and synthesize style\n if key == 'width':\n s.append(\"width: %s\" % val)\n elif key == 'height':\n s.append(\"height: %s\" % val)\n elif key == 'bgcolor':\n s.append(\"background-color: %s\" % val)\n elif key == 'align':\n s.append(\"text-align: %s\" % val)\n elif key == 'valign':\n s.append(\"vertical-align: %s\" % val)\n # Ignore unknown keys\n if key not in self._allowed_table_attrs[prefix]:\n continue\n result[key] = val\n st = result.get('style', '').split(';')\n st = '; '.join(st + s)\n st = st.strip(';')\n st = st.strip()\n if not st:\n try:\n del result['style'] # avoid empty style attr\n except:\n pass\n else:\n result['style'] = st\n #logging.debug(\"_checkTableAttr returns %r\" % result)\n return result", "def set_attrs(self, username, attrs):\n pass", "def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,\r\n prettyPrint=False, indentLevel=0):\r\n\r\n encodedName = self.toEncoding(self.name, encoding)\r\n\r\n attrs = []\r\n if self.attrs:\r\n for key, val in self.attrs:\r\n fmt = '%s=\"%s\"'\r\n if isString(val):\r\n if self.containsSubstitutions and '%SOUP-ENCODING%' in val:\r\n val = self.substituteEncoding(val, encoding)\r\n\r\n # The attribute value either:\r\n #\r\n # * Contains no embedded double quotes or single quotes.\r\n # No problem: we enclose it in double quotes.\r\n # * Contains embedded single quotes. No problem:\r\n # double quotes work here too.\r\n # * Contains embedded double quotes. No problem:\r\n # we enclose it in single quotes.\r\n # * Embeds both single _and_ double quotes. This\r\n # can't happen naturally, but it can happen if\r\n # you modify an attribute value after parsing\r\n # the document. Now we have a bit of a\r\n # problem. We solve it by enclosing the\r\n # attribute in single quotes, and escaping any\r\n # embedded single quotes to XML entities.\r\n if '\"' in val:\r\n fmt = \"%s='%s'\"\r\n if \"'\" in val:\r\n # TODO: replace with apos when\r\n # appropriate.\r\n val = val.replace(\"'\", \"&squot;\")\r\n\r\n # Now we're okay w/r/t quotes. But the attribute\r\n # value might also contain angle brackets, or\r\n # ampersands that aren't part of entities. We need\r\n # to escape those to XML entities too.\r\n val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)\r\n\r\n attrs.append(fmt % (self.toEncoding(key, encoding),\r\n self.toEncoding(val, encoding)))\r\n close = ''\r\n closeTag = ''\r\n if self.isSelfClosing:\r\n close = ' /'\r\n else:\r\n closeTag = '</%s>' % encodedName\r\n\r\n indentTag, indentContents = 0, 0\r\n if prettyPrint:\r\n indentTag = indentLevel\r\n space = (' ' * (indentTag-1))\r\n indentContents = indentTag + 1\r\n contents = self.renderContents(encoding, prettyPrint, indentContents)\r\n if self.hidden:\r\n s = contents\r\n else:\r\n s = []\r\n attributeString = ''\r\n if attrs:\r\n attributeString = ' ' + ' '.join(attrs)\r\n if prettyPrint:\r\n s.append(space)\r\n s.append('<%s%s%s>' % (encodedName, attributeString, close))\r\n if prettyPrint:\r\n s.append(\"\\n\")\r\n s.append(contents)\r\n if prettyPrint and contents and contents[-1] != \"\\n\":\r\n s.append(\"\\n\")\r\n if prettyPrint and closeTag:\r\n s.append(space)\r\n s.append(closeTag)\r\n if prettyPrint and closeTag and self.nextSibling:\r\n s.append(\"\\n\")\r\n s = ''.join(s)\r\n return s", "def attributes(self):\n _attrs = [\"type\", \"name\", \"value\"]\n if self.confidence is not None:\n _attrs.append(\"confidence\")\n if self.constant:\n _attrs.append(\"constant\")\n if self.tags:\n _attrs.append(\"tags\")\n\n return _attrs", "def Attribute(name, value=None):\r\n if value:\r\n return '{}=\"{}\"'.format(name, value)\r\n else:\r\n return ''", "def convert_attributes(cls, attrs):\n return {}", "def get_switched_form_field_attrs(self, prefix, input_type, name):\n attributes = {'class': 'switched', 'data-switch-on': prefix + 'field'}\n attributes['data-' + prefix + 'field-' + input_type] = name\n return attributes", "def AttributeString(self) -> str:", "def AttributeString(self) -> str:", "def proc_attr(inp):\n dic = {}\n for att in inp.attrs.keys():\n if getattr(inp.attrs[att], \"dtype\", None) is None:\n dic[att] = inp.attrs[att]\n elif inp.attrs[att].dtype.char == 'S':\n dic[att] = [\n x.strip() for x in inp.attrs[att].tostring().decode('ascii').split(',')\n ]\n else:\n dic[att] = (\n inp.attrs[att][0]\n if isinstance(inp.attrs[att],np.ndarray) and\n inp.attrs[att].size==1\n else inp.attrs[att]\n )\n return dic\n pass", "def add_attr(self, section_name: str, attr_name: str, value: str) -> None:\n pass", "def _get_attrs_symbols():\n return {\n 'True', 'False', 'None', # those are identifiers in Python 2.7\n 'self',\n 'parent',\n 'id',\n 'uid',\n 'context',\n 'context_today',\n 'active_id',\n 'active_ids',\n 'allowed_company_ids',\n 'current_company_id',\n 'active_model',\n 'time',\n 'datetime',\n 'relativedelta',\n 'current_date',\n 'abs',\n 'len',\n 'bool',\n 'float',\n 'str',\n 'unicode',\n }", "def widget_attrs(self, widget):\n\n attrs = super(RelateField, self).widget_attrs(widget)\n\n attrs.update({'content_type': self.content_types})\n\n return attrs", "def attributes(doc, header, renderer=Attribute, item_class=DefinitionItem):\n items = doc.extract_items(item_class)\n lines = []\n renderer = renderer()\n for item in items:\n renderer.item = item\n lines += renderer.to_rst()\n lines.append('')\n return lines", "def info_from_entry(self, entry):\n info = super().info_from_entry(entry)\n info[ATTR_NAME] = info[ATTR_PROPERTIES]['Name'].replace('\\xa0', ' ')\n return info", "def img(self, **kwargs):\n attrs = ''\n for item in kwargs.items():\n if not item[0] in IMGATTRS:\n raise AttributeError, 'Invalid img tag attribute: %s'%item[0]\n attrs += '%s=\"%s\" '%item\n return '<img src=\"%s\" %s>'%(str(self),attrs)", "def gen_tag_attrs(self, *a, **kw):\n if kw.get(\"widget\", sentinel) is not None:\n raise Exception(\n \"TTToolTip.gen_tag_attrs(): 'widget' keyword must be set\"\n \" to None\"\n )\n return gen_tag_attrs(None, *a, **kw)", "def init_attrs(self):\n raise NotImplementedError", "def _arg_attr(identifier, attr1, attr2):\n return attr1 if identifier.startswith('t') else attr2", "def _get_var_attrs(var):\n\n generic_dict = {'instrument': '', 'valid_range': (-1e+35,1e+35),\n 'missing_value': -9999, 'height': '',\n 'standard_name': '', 'group_name': '',\n 'serial_number': ''}\n\n generic_dict.update(attrs_dict[var])\n return generic_dict", "def parseAttrLine(line):\n\tpre, post = line.strip().split(':')\n\tnumber, attr = pre.strip().split('.')\n\tattr = attr.strip().replace('%','').replace(' ', '-')\n\tvals = [clean(x) for x in post.strip().strip('.').split(',')]\n\treturn {'num':int(number), 'attr':clean(attr), 'vals':vals}", "def _style_to_basic_html_attributes(self, element, style_content,\n force=False):\n if style_content.count('}') and \\\n style_content.count('{') == style_content.count('{'):\n style_content = style_content.split('}')[0][1:]\n\n attributes = {}\n for rule in style_content.split(';'):\n split = rule.split(':')\n if len(split) != 2:\n continue\n key = split[0].strip()\n value = split[1]\n\n if key == 'text-align':\n attributes['align'] = value.strip()\n elif key == 'background-color':\n attributes['bgcolor'] = value.strip()\n elif key == 'width' or key == 'height':\n value = value.strip()\n if value.endswith('px'):\n value = value[:-2]\n attributes[key] = value\n\n for key, value in list(attributes.items()):\n if key in element.attrib and not force or key in self.disable_basic_attributes:\n # already set, don't dare to overwrite\n continue\n element.attrib[key] = value", "def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}", "def domAttributesToString( node ):\n strOut = \"node has %d attribute(s):\\n\" % node.attributes.length;\n for i in range(node.attributes.length):\n attr = node.attributes.item(i);\n strOut += \"- %s:'%s'\\n\" % (attr.name, attr.value );\n return strOut;", "def dict_with_attrs2(*args):\n class CustomDict(object):\n __slots__ = args\n __dict__ = {}\n\n def __init__(self, *args, **kwargs):\n super(CustomDict, self).__init__()\n if args:\n self.__dict__.update(*args)\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __getattr__(self, name):\n return self.__dict__[name]\n\n return CustomDict", "def attributes(self):\n attrs_ = [\"name\", \"type\"]\n if self.exclusive:\n attrs_.append(\"exclusive\")\n if self.default is not None:\n attrs_.append(\"default\")\n\n return attrs_", "def replace_tag_attributes(code_attrs, tag, tag_attrs):\n\n new_attrs = code_attrs.copy()\n for key, value in tag_attrs.items():\n if key in new_attrs:\n new_attrs[key] = new_attrs[key].replace(tag, value)\n\n return new_attrs", "def _iterattrs(self, handle=\"\"):\n if not handle:\n handle = self.handle\n attr = gv.firstattr(handle)\n while gv.ok(attr):\n yield gv.nameof(attr), decode_page(gv.getv(handle, attr))\n attr = gv.nextattr(handle, attr)", "def as_html(self):\n return mark_safe(\" \".join([\n self.attribute_template % (k, escape(v if not callable(v) else v()))\n for k, v in six.iteritems(self) if not v in EMPTY_VALUES]))", "def strpatt(self, name):\n return name.replace(\"att.\", \"\")", "def format_link(attrs: Dict[tuple, str], new: bool = False):\n try:\n p = urlparse(attrs[(None, 'href')])\n except KeyError:\n # no href, probably an anchor\n return attrs\n\n if not any([p.scheme, p.netloc, p.path]) and p.fragment:\n # the link isn't going anywhere, probably a fragment link\n return attrs\n\n c = urlparse(settings.SITE_URL)\n if p.netloc != c.netloc:\n # link is external - secure and mark\n attrs[(None, 'target')] = '_blank'\n attrs[(None, 'class')] = attrs.get((None, 'class'), '') + ' external'\n attrs[(None, 'rel')] = 'nofollow noopener noreferrer'\n\n return attrs", "def extractAttrs(obj, justLabel=False, dictName=''):\n return extractAttrsCore(obj, {}, justLabel, dictName)", "def parseAttrs(self,attrs,date_type):\n\tattrs=copy.copy(attrs) #make sure we don't change user/group attributes\n \tattr_holders=self.getAttrHolders(attrs)\n\tmap(lambda x:x.setDateType(date_type),attr_holders)\n\tmap(lambda x:attrs.update(x.getParsedDic()),attr_holders)\n\treturn attrs", "def add_attributes(self, attrs):\n for attr in attrs:\n self.add_attribute(attr)", "def _parse_attr(self, attr_proto):\n attrs = {}\n for a in attr_proto:\n for f in ['f', 'i', 's']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['floats', 'ints', 'strings']:\n if list(getattr(a, f)):\n assert a.name not in attrs, \"Only one type of attr is allowed\"\n attrs[a.name] = tuple(getattr(a, f))\n for f in ['t', 'g']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['tensors', 'graphs']:\n if list(getattr(a, f)):\n raise NotImplementedError(\"Filed {} is not supported in mxnet.\".format(f))\n if a.name not in attrs:\n raise ValueError(\"Cannot parse attribute: \\n{}\\n.\".format(a))\n return attrs" ]
[ "0.735201", "0.6754294", "0.67166066", "0.67071074", "0.66780305", "0.65807486", "0.6522693", "0.6522693", "0.65187657", "0.6471306", "0.6269984", "0.62653935", "0.6153201", "0.6090701", "0.60323846", "0.60278016", "0.6011661", "0.60042846", "0.59841794", "0.5941162", "0.59205276", "0.5918955", "0.59121054", "0.5903962", "0.5884743", "0.5876164", "0.5857109", "0.5851559", "0.583173", "0.58274394", "0.5816038", "0.58061635", "0.5784312", "0.5755998", "0.5755998", "0.57360405", "0.57051307", "0.5701552", "0.5687975", "0.5650812", "0.5618766", "0.561154", "0.5605911", "0.56030387", "0.5602799", "0.55926436", "0.5587559", "0.5571399", "0.5567558", "0.55631375", "0.555545", "0.5550559", "0.55490625", "0.55470836", "0.55410224", "0.5519966", "0.55098814", "0.5492064", "0.547102", "0.5470936", "0.54692423", "0.5467515", "0.54661024", "0.54518676", "0.54405665", "0.5438651", "0.54003173", "0.5388153", "0.5382598", "0.5375904", "0.5375076", "0.53706104", "0.5359634", "0.5354708", "0.5354708", "0.5331472", "0.5324531", "0.53227526", "0.5316361", "0.5309617", "0.5308968", "0.53067", "0.5306182", "0.5299369", "0.52990687", "0.5287107", "0.52791494", "0.5277907", "0.5276578", "0.52742803", "0.5270845", "0.52608305", "0.52524847", "0.5244876", "0.5239417", "0.5234171", "0.5224983", "0.5215326", "0.521457", "0.5212088", "0.5203955" ]
0.0
-1
Called by base init, after class change or format text change
def initFormat(self): ChoiceFormat.initFormat(self) fullFormat = ''.join(self.formatList) try: self.sep = [sep for sep in CombinationFormat.outputSepList if sep not in fullFormat][0] + ' ' except IndexError: self.sep = CombinationFormat.outputSepList[0] + ' '
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initFormat(self):\n pass", "def init_text(self):\n d = self.declaration\n if d.text:\n self.set_text(d.text)\n if d.text_color:\n self.set_text_color(d.text_color)\n if d.text_alignment:\n self.set_text_alignment(d.text_alignment)\n if d.font_family or d.text_size:\n self.refresh_font()\n if hasattr(d, 'max_lines') and d.max_lines:\n self.set_max_lines(d.max_lines)", "def initFormat(self):\n self.formatList = self.splitText(self.format)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, text):\n\n self.text = text", "def __init__(self, text):\n self.text = text", "def __init__(self, text):\n self.text = text", "def __init__(self):\n self.text = ''", "def set_text(self):\n pass", "def post_init(self):\n\t\tpass", "def init_widget(self):\n super(UiKitTextView, self).init_widget()\n self.init_text()", "def __init__(self,\n text: str) -> None:\n\n super().__init__(text)", "def initFormat(self):\n self.html = True", "def initFormat(self):\n self.html = True", "def initFormat(self):\n self.formatList = []", "def _post_init(self):\n pass", "def __post_init__(self):\n pass", "def initWidgets(self):\n self.lambdtext.setText(str(self.lambd))\n self.ptext.setText(str(self.p))", "def done_adding_strings(self):\n #placeholder in case there's some additional init we need to do.\n pass", "def done_adding_strings(self):\n #placeholder in case there's some additional init we need to do.\n pass", "def _init_display(self):\n raise NotImplementedError", "def __init__(self):\n super(Command, self).__init__()\n self.style.TITLE = self.style.SQL_FIELD\n self.style.STEP = self.style.SQL_COLTYPE\n self.style.ITEM = self.style.HTTP_INFO\n disconnect_objectapp_signals()", "def __init__(self, font='mediumbold'):\n\tself.set_font(font)", "def __init__(self):\r\n self.label = \"Bulk Layout Text Replace\"\r\n self.alias = \" Jake's Toolbox Alias Property True\"\r\n self.description = \"\"\r\n self.canRunInBackground = False", "def __post_init__(self):\n super().__post_init__()", "def __init__(self):\n super(Command, self).__init__()\n self.style.TITLE = self.style.SQL_FIELD\n self.style.STEP = self.style.SQL_COLTYPE\n self.style.ITEM = self.style.HTTP_INFO\n disconnect_gstudio_signals()", "def after_parsing(self):", "def __init__(self, as_text=False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.as_text = as_text", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def on_origEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()\n self.__updateTranslateButton()", "def after_init(self) -> None:\n if self.options.format.lower() != \"default_notebook\":\n self.error_format = self.options.format\n if not hasattr(self, \"color\"):\n self.color = True", "def afterInit(self):", "def post_init(self, msg='hello'):\n print(\"post init ! height=%s, color=%s, msg=%s\" % (self.height, self.color, msg))\n self.non_field_attr = msg", "def __init__(self, name, time, text):\n pass", "def init(self):", "def init(self):", "def __init__(self,txt=u'',unicodeEncoding='utf-8',verbose=False,tagID=0):\n # __document capture the document level structure\n # for each sentence and then put in the archives when the next sentence\n # is processed\n super(ConTextMarkup,self).__init__(__txt=None,__rawTxt=txt,\n __SCOPEUPDATED=False,__VERBOSE=verbose,\n __tagID=tagID,\n __unicodeEncoding=unicodeEncoding)\n self.__cleanText()", "def __init__(self):\n\t\t# Setup fonts\n\t\tself.large_font = self._get_font(1,Annotator.THICK)\n\t\tself.large_font_outline = self._get_font(1,Annotator.THICK + Annotator.BORDER)\n\t\t\n\t\tself.small_font = self._get_font(0.5,Annotator.THIN)\n\t\tself.small_font_outline = self._get_font(0.5,Annotator.THIN + Annotator.BORDER)\n\t\t\n\t\t# Text colour\n\t\tself.colour = Annotator.COLOUR_BUSY\n\t\t\n\t\tself.forehead = (0,0,1,1)\n\t\tself.face = (0,0,1,1)", "def onInit(self):\n pass", "def _afterInit(self):\n pass", "def __init__(self, **kwargs):\n # We set it to True so that starting empty lines are\n # not counting as separators\n self.last_line_was_empty = True", "def _post_init(self) -> None:\n return", "def _init(self):", "def update_editor ( self ):\n super( SimpleFontEditor, self ).update_editor()\n set_font( self )", "def __init__(self, text=\"\", widget=None):\n self._label_text = text\n self._widget = widget\n self._widget.on_change = self._update\n super().__init__(text=f\"{text} {widget.value}\")", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self): \r\n pass", "def init_widget(self):", "def __init__(self):\n ## Global initialization\n self.default_initialization()\n ## Initial function set\n self.selfdriven = False\n self._format_default_functions()\n ## Check descriptormodel\n self._assert_correctness()", "def __init__(self):\n self.content = \"\"", "def setInitDefault(self, editText):\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def initWidgets(self):\n self.loctext.setText(\"{0:g}\".format(self.loc))\n self.scaletext.setText(\"{0:g}\".format(self.scale))", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def initDocTagText(self):\n self.doc, self.tag, self.text = Doc().tagtext()", "def __init__(\n self,\n type,\n text):\n self.type = type\n self.text = text", "def _init(self):\n pass", "def _initialize(self):\n \n self.view.lineEdit_3.setText(\"C,H,N,O,P,S\")\n self.view.spin_hit.setValue(20)\n self.view.lineEdit_2.setValue(10.)\n self.view.checkBox_8.setChecked(True)", "def format(self):\n ...", "def init(self) -> None:", "def update_editor ( self ):\n super( TextFontEditor, self ).update_editor()\n set_font( self )", "def __init__(self, msg='hello'):\n print(\"post init ! height=%s, color=%s, msg=%s\" % (self.height, self.color, msg))\n self.non_field_attr = msg", "def __init__(self):\n\t\tprint(\"Class initilised\")", "def __init__(self, text='', **kwargs):\n Control.__init__(self, text=text, **kwargs)", "def on_transEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()", "def __init__(self, text, idx):\n self.text = text\n self.idx = idx", "def __init__(self):\n self.update_state()", "def set_initial_values(self):\n #Stores each line of the text file in a list\n self.text = []\n \n #Scrolling distance\n self.scroll = 0\n\n #Zooming level (font size) \n self.zoom = 12\n\n #Factor by which is decrement self.zoom\n self.factor = 0\n\n #Number of tabs spaces before a line\n self.indent = 0\n\n #Flag to only set up pango descriptions only once \n self.set_pc = 1\n\n #list of indetation level of all lines\n self.tab_index = []\n\n #Total line count\n self.line_count = 0\n\n #line number of line rendered off top of window \n self.min_text = 0\n #line number of line rendered off bottom of window \n self.max_text = 50\n\n #y position for cairo for the text at the top\n self.min_cairo = 20\n\n #y position for text at bottom\n self.max_cairo = 20\n\n #x positiong for indented text\n self.tab_cairo = 20", "def __init__(self):\n fmt = \"%(message)s\"\n super().__init__(fmt=fmt)\n\n self.baseline = None\n self.cut = None\n self.manual_push = 0", "def setInitDefault(self, editText):\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def __init__(self, text):\n self.text = text\n self.letters = [letters[c] for c in self.text]\n self.width = sum(let.width + 1 for let in self.letters)\n self._offset = width\n self.is_done = False", "def __init__(self, text=None, settings=None, style='General', language='en'):\n\n self._text = None\n self._settings = None\n self._style = None\n self._language = None\n\n self.text = text\n self.settings = settings\n self.style = style\n self.language = language", "def init(self) -> None:\n self.started = False\n self.lines = []\n self.text = ''\n self.graphics = ''\n self.ids = {}\n self.first_line_added = False\n\n self.used_fonts = set()\n self.current_line_used_fonts = set()\n self.current_height = 0\n self.lines = []\n\n line_width = self.width - (self.indent if self.is_first_line else 0)\n self.current_line = PDFTextLine(\n self.fonts, line_width, self.text_align, self.line_height\n )\n\n self.last_indent = 0\n self.last_state = self.last_factor = self.last_fill = None\n self.last_color = self.last_stroke_width = None\n\n self.y_ = 0", "def _settext(self, textEntered):\n if textEntered.strip() == '':\n textEntered=self.data['initialtext']\n self.entry.enterText(textEntered)\n else:\n if callable(self.data['callback']): self.data['callback'](textEntered)\n if self.data['autoexit'] and callable(self.data['exit']):\n # NOTE not safe to call here user callback...\n taskMgr.doMethodLater(.5, self.data['exit'], '_ntryxt')", "def __init__(self, edit: QtWidgets.QTextEdit, out=None, color=None):\n self.edit = edit\n self.out = out\n self.color = color", "def on_load(self):\n self.__init__()", "def __init__():", "def __init__(self) -> None:\n str.__init__(self)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._ansi_escape_codes = True", "def do_init(self):\n\n pass", "def initialize(self):\n\t\tpass", "def run_init(self):\n InitEditor(self.root, self)", "def __init(self):\n print(\"Welkam tu mobail lejen\")", "def __init__(self, text, tag, start ,end):\n\n self.text = six.text_type(text)\n self.tag = copy.copy(tag)\n self.end = end\n self.start = start" ]
[ "0.7095915", "0.70883477", "0.6957401", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6801035", "0.67764556", "0.67764556", "0.6772573", "0.67218834", "0.6665987", "0.6530844", "0.6495981", "0.6494592", "0.6494592", "0.6490198", "0.6401653", "0.6355695", "0.63224435", "0.627716", "0.627716", "0.62600374", "0.6241324", "0.6241043", "0.6223984", "0.6216441", "0.6214059", "0.62072545", "0.6179023", "0.61773074", "0.6165903", "0.6150355", "0.61494476", "0.6145963", "0.6123563", "0.6106276", "0.6106276", "0.61052555", "0.6075407", "0.606871", "0.60595924", "0.6050179", "0.6039118", "0.6025508", "0.60182106", "0.60180503", "0.5996569", "0.5996569", "0.5996569", "0.5996569", "0.5993615", "0.5956698", "0.59549457", "0.59410423", "0.5936671", "0.5926797", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.5922803", "0.59159535", "0.59074825", "0.59036523", "0.59019417", "0.5898051", "0.58926487", "0.5887501", "0.5887218", "0.58803314", "0.5877826", "0.5868464", "0.58638364", "0.5862526", "0.58605254", "0.5853759", "0.5833662", "0.58296865", "0.5820315", "0.5815491", "0.58068454", "0.579537", "0.57909584", "0.57830495", "0.5776756", "0.5769101", "0.5765869", "0.5761965", "0.5755533", "0.57552737" ]
0.0
-1
Return tuple of choices from inText sorted like format and True if all splits are valid and included
def sortedChoices(self, inText): choices = self.splitText(inText) sortedChoices = [text for text in self.formatList if text in choices] if len(choices) == len(sortedChoices): return (sortedChoices, True) else: return (sortedChoices, False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def complete_opt_allow_select_scan(self, text, *_):\n return [t for t in (\"true\", \"false\", \"yes\", \"no\") if t.startswith(text.lower())]", "def complete_set(self, text, line, begidx, endidx):\n tokens = split(line[:begidx])\n if len(tokens) == 1:\n return [i for i in ('filter ', 'default ', 'time-format ') if i.startswith(text)]\n if len(tokens) == 2 and tokens[1] == 'time-format':\n return [i for i in ('long', 'short') if i.startswith(text)]\n return []", "def _determine_guess(\n sentences: List[List[Literal]]) -> Tuple[bool, Tuple[str, bool]]:\n literals = [x[0] for x in sentences if len(x) == 1]\n if len(literals) != 0:\n literals.sort(key=lambda x: x.atom)\n selected = literals[0]\n if selected.negation:\n return [True, [selected.atom, False]]\n return [True, [selected.atom, True]]\n atoms = [atom for atom in chain.from_iterable(sentences)]\n atoms.sort(key=lambda x: x.atom)\n selected = atoms[0]\n return [False, [selected.atom, True]]", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def parse(question):\n # Handle things like \"should ___ X or Y\"\n if question.lower().startswith('should'):\n question = ' '.join(question.split()[2:])\n\n question = question.strip('?')\n # split on both ',' and ' or '\n choices = question.split(',')\n choices = sum((c.split(' or ') for c in choices), [])\n # Get rid of empty strings\n choices = filter(bool, (c.strip() for c in choices))\n return choices", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def IsValid(self):\n return not TickerFull.DelimiterSplit in self.Text", "def _parse_choices(self, text):\n choices = dict()\n\n matches = re.findall(self.choice_regex, text)\n for match in matches:\n # remove the brackets\n match = match.replace('[[', '')\n match = match.replace(']]', '')\n\n if '|' in match:\n # format is {text}|{node_id}, the text and node id are different\n text, node_id = match.split('|')\n choices[node_id] = text\n else:\n choices[match] = match\n\n return choices", "def check_series(text_list, set_list):\n in_list = []\n for word in text_list:\n all_words = re.sub('\\(.*?\\)', ',', word).split(',')\n all_words = list(filter(None, all_words))\n component_in_list = [component.strip(' ') in set_list for component in all_words]\n this_word_in_list = all(component_in_list)\n in_list.append(this_word_in_list)\n return in_list", "def check_order(self, filename: str, section: str, texts: List[str]):\n alphas = sorted(texts, key=lambda x: x.split(':')[0].lower())\n if texts == alphas:\n return\n for text, alpha in zip(texts, alphas):\n if text != alpha:\n print(f'{filename}: {section}: {text} vs {alpha}')\n break", "def check(self, text):\n lt = s = n = 0\n result = False\n for g in text:\n if g in LETTERS and lt < self.letters:\n lt += 1\n if g in NUMBERS and n < self.numbers:\n n += 1\n if g in SYMBOLS and s < self.symbols:\n s += 1\n if n == self.numbers and s == self.symbols and lt == self.letters:\n result = True\n break\n return result", "def test_tokenize_en(self):\n input = \"\"\"This is a paragraph. It's not very special, but it's designed\n2 show how the splitter works with many-different combos\nof words. Also need to \"test\" the handling of 'quoted' words.\"\"\"\n output = [\n (\"This\", 0), (\"is\", 5), (\"a\", 8), (\"paragraph\", 10), (\"It's\", 22),\n (\"not\", 27), (\"very\", 31), (\"special\", 36), (\"but\", 45), (\"it's\", 49),\n (\"designed\", 54), (\"show\", 65), (\"how\", 70), (\"the\", 74),\n (\"splitter\", 78), (\"works\", 87), (\"with\", 93), (\"many\", 98),\n (\"different\", 103), (\"combos\", 113), (\"of\", 120), (\"words\", 123),\n (\"Also\", 130), (\"need\", 135),\n (\"to\", 140), (\"test\", 144), (\"the\", 150), (\"handling\", 154),\n (\"of\", 163), (\"quoted\", 167), (\"words\", 175)\n ]\n for (itmO, itmV) in zip(output, tokenize_en(input)):\n self.assertEqual(itmO, itmV)", "def choose(inp):\n if not inp.text:\n return lex.input.missing\n options = [i.strip() for i in inp.text.split(',') if i.strip()]\n if not options:\n return lex.input.incorrect\n return random.choice(options)", "def _validate_selects(text, response):\n answer_options = re.split(settings.MULTISELECT_DELIMITER_RE, str(text))\n choices = map(lambda choice: choice.lower(), response.event.choices)\n logger.debug('Question (%s) answer choices are: %s, given answers: %s' % (datatype, choices, answer_options))\n new_answers = copy(answer_options)\n for idx, opt in enumerate(answer_options):\n logger.debug('Trying to format (m)select answer: \"%s\"' % opt)\n try: \n #in the case that we accept numbers to indicate option selection\n opt_int = int(opt)\n if not (1 <= opt_int <= len(choices)): \n return text, 'Answer %s must be between 1 and %s' % (opt_int, len(choices))\n else:\n new_answers[idx] = str(opt_int)\n\n except ValueError: \n # in the case where we accept the actual text of the question\n logger.debug('Caught value error, trying to parse answer string choice of: %s' % choices)\n if opt.lower() not in choices:\n return text, 'Answer must be one of the choices'\n else:\n new_answers[idx] = str(choices.index(opt.lower()) + 1)\n return ' '.join(new_answers), None", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def _instance_complete(self, text, line, begidx, endidx, notstates):\n choices = []\n for x in self._instancelist():\n if x[1] not in notstates:\n choices.append(x[0])\n\n matches = []\n for x in choices:\n if x.startswith(text):\n matches.append(x)\n return matches", "def is_valid(text):\n return is_all_word_segment_in_text(WORDS, text)", "def solution1(inp):\n rules, _, nearby = inp.strip().split(\"\\n\\n\")\n rules = rules.split(\"\\n\")\n nearby = nearby.split(\"\\n\")[1:]\n\n rrules = []\n for rule in rules:\n a, b = rule.split(\" or \")\n r1 = a.strip().split(\" \")[-1]\n r2 = b.strip()\n def to_range(r):\n i, j = list(map(int, r.split(\"-\")))\n return range(i, j + 1)\n rrules.append((to_range(r1), to_range(r2)))\n\n s = 0\n for ticket in nearby:\n ticket = list(map(int, ticket.split(\",\")))\n for v in ticket:\n valid = False\n for r in rrules:\n valid |= v in r[0] or v in r[1]\n if not valid:\n s += v\n return s", "def complete_set(self, text, line, begidx, endidx):\n # text = line[begidx:endidx] is the word we want to complete\n # split the completed words, should either be ['set'], or ['set', <option_key>]\n split_line = line[:begidx].split()\n if len(split_line) == 1:\n return [option for option in self.get_option_names() if option.startswith(text) or '.' + text in option]\n\n if len(split_line) == 2:\n key = split_line[1]\n options = self.get_options(key)\n if options is not None:\n scoped_key = key.split('.')[1] if '.' in key else key\n values = options.get_acceptable_values(scoped_key)\n if values is not None:\n return [value for value in values if value.startswith(text)]\n\n return []", "def IsValid(self):\n return (TickerFull.DelimiterSplit not in self.Text) and (TickerDataType.DelimiterData in self.Text)", "def test(s, approach):\n s_split = s.split()\n parsed_s = nlp(s)\n for i in xrange(len(parsed_s)):\n if parsed_s[i].tag_ == \"VBZ\":\n if approach(s, i) == 1:\n print str(1) + \":\", \" \".join(s_split[:i]), \\\n \"[{}=>{}]\".format(s_split[i], transform[s_split[i]]), \\\n \" \".join(s_split[i + 1:]) + \"\\t({} {})\".format(parsed_s[i], parsed_s[i].tag_)\n else:\n print str(0) + \":\", s + \"\\t({} {})\".format(parsed_s[i], parsed_s[i].tag_)", "def pre_validate(self, form):\n for item1,item2 in self.choices:\n if isinstance(item2, (list, tuple)):\n group_label = item1\n group_items = item2\n for val,label in group_items:\n if val == self.data:\n return\n else:\n val = item1\n label = item2\n if val == self.data:\n return\n raise ValueError(self.gettext('Not a valid choice!'))", "def main_completer_handler(self, text, state):\r\n response = None\r\n all_equals = []\r\n value = False\r\n equals = []\r\n\r\n # Build match list on first iteration else continue\r\n if state == 0:\r\n origline = readline.get_line_buffer()\r\n begin = readline.get_begidx()\r\n end = readline.get_endidx()\r\n being_completed = origline[begin:end]\r\n words = origline.split()\r\n\r\n if not words:\r\n # option for words list\r\n self.current_candidates = sorted(self.options.keys())\r\n else:\r\n # traverse all words entries and passing accordingly\r\n try:\r\n if begin == 0:\r\n # first word\r\n candidates = list(self.options.keys())\r\n else:\r\n # later word\r\n if '=' in words[len(words)-1] and len(words) > 1:\r\n #use possible values as candidates\r\n value = True\r\n equals = words[len(words)-1].split('=')\r\n if equals[1]:\r\n all_equals = [i.split('=') for i in words if '=' in i]\r\n\r\n if len(all_equals) > 1 and not all_equals[-2]\\\r\n [0] == all_equals[-1][0]and self.val_pos > 1:\r\n #reset candidates if new item\r\n candidates = []\r\n else:\r\n candidates = self.options[\"val\"]\r\n else:\r\n #use properties as candidates\r\n first = words[0]\r\n candidates = self.options[first]\r\n else:\r\n #use command items as candidates\r\n first = words[0]\r\n candidates = self.options[first]\r\n self.possible_vals = []\r\n if being_completed or equals:\r\n #possible value being_completed\r\n if equals:\r\n if equals[1] and not equals[1] in candidates:\r\n #match value\r\n being_completed = equals[1]\r\n else:\r\n #match property\r\n being_completed = equals[0]\r\n # match options with portion of input being completed\r\n self.current_candidates = [w for w in candidates\\\r\n if w and w.lower().startswith(being_completed.lower())]\r\n\r\n # return possible vals\r\n self.possible_vals = []\r\n if len(self.current_candidates) == 1 and 'set' in words[0] or equals:\r\n # second tab, return vals\r\n if being_completed == self.current_candidates[0]:\r\n #grab possible values\r\n for item in self.options['infovals']:\r\n if being_completed == item:\r\n val = self.options['infovals'][item]\r\n try:\r\n if 'Enumeration' in val['Type']:\r\n self.possible_vals = \\\r\n [v['ValueName'] for v in val['Value']]\r\n except:\r\n if 'boolean' in val['type']:\r\n self.possible_vals = [w for w in ['True', 'False']]\r\n elif 'string' in val['type']:\r\n self.possible_vals = [w for w \\\r\n in val['enum'] if w is not None]\r\n\r\n if self.possible_vals and 'null' \\\r\n in val['type']:\r\n self.possible_vals.append('None')\r\n break\r\n if self.possible_vals:\r\n self.options[\"val\"] = self.possible_vals\r\n self.val_pos = 0\r\n # first tab, complete\r\n else:\r\n self.possible_vals.append(self.current_candidates[0])\r\n self.val_pos += 1\r\n else:\r\n # matching empty string so use all candidates\r\n self.current_candidates = candidates\r\n\r\n except (KeyError, IndexError):\r\n self.current_candidates = []\r\n\r\n # Return the state from the match list if found otherwise return None.\r\n try:\r\n if self.possible_vals:\r\n response = self.possible_vals[state]\r\n else:\r\n response = self.current_candidates[state]\r\n except:\r\n # No candidate found for state\r\n response = None\r\n\r\n # Response return\r\n return response", "def check(self, text):\n p = self.d\n i = 0\n j = 0\n result = []\n ln = len(text)\n while i + j < ln:\n t = text[i + j].lower()\n # print i,j,hex(ord(t))\n if not (t in p):\n j = 0\n i += 1\n p = self.d\n continue\n p = p[t]\n j += 1\n # print p,i,j\n if chr(11) in p:\n p = self.d\n result.append(text[i:i + j])\n i = i + j\n j = 0\n return result", "def test_bug1591450(self):\n input = \"\"\"Testing <i>markup</i> and {y:i}so-forth...leading dots and trail--- well, you get-the-point. Also check numbers: 999 1,000 12:00 .45. Done?\"\"\"\n output = [\n (\"Testing\", 0), (\"i\", 9), (\"markup\", 11), (\"i\", 19), (\"and\", 22),\n (\"y\", 27), (\"i\", 29), (\"so\", 31), (\"forth\", 34), (\"leading\", 42),\n (\"dots\", 50), (\"and\", 55), (\"trail\", 59), (\"well\", 68),\n (\"you\", 74), (\"get\", 78), (\"the\", 82), (\"point\", 86),\n (\"Also\", 93), (\"check\", 98), (\"numbers\", 104), (\"Done\", 134),\n ]\n for (itmO, itmV) in zip(output, tokenize_en(input)):\n self.assertEqual(itmO, itmV)", "def create_word_list(self):\n return set(self.split(self.title)+self.split(self.conditions)+self.split(self.interventions))", "def priority_split(text, *splitters):\n present = [s for s in splitters if s in text]\n # fall back to non-present splitter; ensures we have a splitter\n splitters = present + list(splitters)\n splitter = splitters[0]\n return [seg.strip() for seg in text.split(splitter) if seg.strip()]", "def split_into_phrases (self, phrase):\r\n\r\n if not self.contains(phrase,'()'):\r\n\r\n #For a phrase without parantheses\r\n \r\n\r\n if '|' in phrase:\r\n return ['@']+[x for x in phrase.split('|')]\r\n elif '&' in phrase:\r\n return [x for x in phrase.split('&')]\r\n\r\n #If the phrase contains parantheses.\r\n \r\n phrase = list (phrase)\r\n #convert string into a list of chars\r\n level = 0\r\n found = False # if one of the operators is found in the phrase \r\n\r\n for operator in ['#','>','|','&']:\r\n level = 0 # reset level\r\n if not found:\r\n \r\n \r\n for x,char in enumerate(phrase):\r\n if char == '(':\r\n level += 1\r\n if char == ')':\r\n level -=1\r\n # level indicates level within hierarchy established by parantheses\r\n\r\n if level == 0 and x+1 < len(phrase) and phrase[x+1] == operator:\r\n phrase[x+1] = '<<'+operator+'>>'\r\n found = True\r\n break\r\n \r\n \r\n\r\n if '<<&>>' in phrase:\r\n # For AND\r\n phrases = ''.join(phrase).split('<<&>>')\r\n elif '<<|>>' in phrase:\r\n # For OR \r\n phrases = ['@']+''.join(phrase).split('<<|>>')\r\n elif '<<>>>' in phrase:\r\n # For INFERENCE \r\n premise = ''.join(phrase).split('<<>>>')[0]\r\n conclusion = ''.join(phrase).split('<<>>>')[1]\r\n phrases = ['@','~'+premise,conclusion]\r\n # A => B translated as ~A OR B\r\n elif '<<#>>' in phrase:\r\n # FOR EQUIVALENCY \r\n premise = ''.join(phrase).split('<<#>>')[0]\r\n conclusion = ''.join(phrase).split('<<#>>')[1]\r\n \r\n phrase1 = '~'+'('+premise+'&'+'~'+conclusion+')'\r\n phrase2 = '~'+'('+conclusion+'&'+'~'+premise+')'\r\n phrases = [phrase1,phrase2]\r\n # A<>B translated as (~A or B) & (~B or A) \r\n \r\n return [x for x in phrases]", "def _complete_groups(self, text):\r\n groups = []\r\n for info in self._get_complete_info():\r\n if info['group'] not in groups:\r\n groups.append(info['group'])\r\n return [ g + ' ' for g in groups if g.startswith(text) ]", "def is_limerick(self, text):\n \n sentences = text.splitlines()\n \n #remove blank setences\n sentences = [sentence for sentence in sentences if sentence.strip()] \n \n if len(sentences) != 5 : return False \n #remove punctuations for all sentences\n words_sentence1 = word_tokenize(sentences[0].translate(None, string.punctuation).lower())\n words_sentence2 = word_tokenize(sentences[1].translate(None, string.punctuation).lower())\n words_sentence3 = word_tokenize(sentences[2].translate(None, string.punctuation).lower())\n words_sentence4 = word_tokenize(sentences[3].translate(None, string.punctuation).lower())\n words_sentence5 = word_tokenize(sentences[4].translate(None, string.punctuation).lower())\n \n #check rhymes for AAA BB and not rhymes for AB\n ret_flag = (self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence2[len(words_sentence2) - 1]) and\n self.rhymes(words_sentence3[len(words_sentence3) - 1 ],\n words_sentence4[len(words_sentence4) - 1 ]) and\n self.rhymes(words_sentence2[len(words_sentence2) - 1 ],\n words_sentence5[len(words_sentence5) - 1 ]) and\n self.rhymes(words_sentence1[len(words_sentence1) - 1 ],\n words_sentence5[len(words_sentence5) - 1 ]) and \n (not self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence1[len(words_sentence1) - 1],\n words_sentence4[len(words_sentence4) - 1])) and \n (not self.rhymes(words_sentence2[len(words_sentence2) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence2[len(words_sentence2) - 1],\n words_sentence4[len(words_sentence4) - 1])) and \n (not self.rhymes(words_sentence5[len(words_sentence5) - 1],\n words_sentence3[len(words_sentence3) - 1])) and \n (not self.rhymes(words_sentence5[len(words_sentence5) - 1],\n words_sentence4[len(words_sentence4) - 1])))\n \n if ret_flag == False: return False\n \n \n # Check additional constraints\n \n sum_of_syl1 = 0\n for word in words_sentence1 : sum_of_syl1 += self.num_syllables(word)\n \n if sum_of_syl1 < 4 : return False\n sum_of_syl2 = 0\n for word in words_sentence2 : sum_of_syl2 += self.num_syllables(word)\n \n if sum_of_syl2 < 4 : return False\n \n \n sum_of_syl_A_diff = 0\n if sum_of_syl1 > sum_of_syl2 : sum_of_syl_A_diff = sum_of_syl1 - sum_of_syl2\n else : sum_of_syl_A_diff = sum_of_syl2 - sum_of_syl1\n \n if sum_of_syl_A_diff > 2 : return False \n \n sum_of_syl3 = 0\n for word in words_sentence3 : sum_of_syl3 += self.num_syllables(word)\n \n if sum_of_syl3 < 4 : return False\n sum_of_syl4 = 0\n for word in words_sentence4 : sum_of_syl4 += self.num_syllables(word)\n \n if sum_of_syl4 < 4 : return False\n \n \n sum_of_syl_B_diff = 0\n if sum_of_syl3 > sum_of_syl4 : sum_of_syl_B_diff = sum_of_syl3 - sum_of_syl4\n else : sum_of_syl_B_diff = sum_of_syl4 - sum_of_syl3\n \n if sum_of_syl_B_diff > 2 : return False \n \n if (sum_of_syl3 > sum_of_syl1 and sum_of_syl3 > sum_of_syl2 \n and sum_of_syl4 > sum_of_syl1 and sum_of_syl4 > sum_of_syl2) : return False\n \n \n sum_of_syl5 = 0\n for word in words_sentence5 : sum_of_syl5 += self.num_syllables(word) \n \n if sum_of_syl5 < 4 : return False\n \n sum_of_syl_A_diff = 0\n if sum_of_syl1 > sum_of_syl5 : sum_of_syl_A_diff = sum_of_syl1 - sum_of_syl5\n else : sum_of_syl_A_diff = sum_of_syl5 - sum_of_syl1\n \n if sum_of_syl_A_diff > 2 : return False \n \n sum_of_syl_A_diff = 0\n if sum_of_syl2 > sum_of_syl5 : sum_of_syl_A_diff = sum_of_syl2 - sum_of_syl5\n else : sum_of_syl_A_diff = sum_of_syl5 - sum_of_syl2\n \n \n if sum_of_syl_A_diff > 2 : return False \n \n if (sum_of_syl3 > sum_of_syl5 and sum_of_syl4 > sum_of_syl5) : return False\n \n \n return ret_flag", "def match_all_phrases(self, inphrases):\n# temporary - attempted matches\n attempted_matches = []\n phrase_attempts = {}\n phrase = \"\"\n step = \"A\"\n # ALL full phrases \n for phrase in inphrases:\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n #return match_choices, attempted_matches, phrase\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # Normalised version of ALL all full phrases \n phrases = [self.get_normalised_phrase(p) for p in inphrases]\n\n # 3 all prefix trigrams \n step = \"3\"\n for ngram in [p.split()[0:3] for p in phrases if len(p.split()) > 2]:\n phrase = ' '.join(ngram)\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # 2 all prefix bigrams \n step = \"2\"\n for ngram in [p.split()[0:2] for p in phrases if len(p.split()) > 1]:\n phrase = ' '.join(ngram)\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # 1 all valid words \n step = \"1\"\n for phr_elem in phrases:\n #print phr_elem.split()\n for phrase in [w.strip() for w in phr_elem.split() \n if self.isExcluded(w.strip()) == False and w.strip() not in phrase_attempts]:\n #print \"***\", phrase\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n return [], attempted_matches, phrase, None", "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def check(text):\n text = text.copy()\n if not isinstance(text,list): # TEST\n raise TypeError(\"text must be a listlike :\\n{}\".format(text))\n \n # managing latex genuine tag\n for i, line in enumerate(text):\n if '\\\\' in line:\n utils.underlineall(line,'\\\\')\n logger.warning(\"Genuine latex tags were found, but won't be evaluated on line {}\".format(i))\n \n # check placeholders # TEST\n parsers['v'].check_syntax(text)\n \n for i,line in enumerate(text):\n # checking ends of lines TEST\n space_before_match = re.search(\"[^ ],,\",line)\n if space_before_match:\n utils.underlineall(line,space_before_match.group())\n raise SyntaxError(\"Please put a space before EOL tag in line {}\".format(i))\n space_after_match = re.search(\",,[^ ]\",line)\n if space_after_match:\n utils.underlineall(line,space_after_match.group())\n raise SyntaxError(\"Please put a space or a carriage return after EOL tag in line {}\".format(i))\n \n # checking illegal closing tags TEST\n for parser, module in parsers.items():\n if not module.has_closing_tag:\n if closing_mark + parser in line:\n utils.underlineall(line,closing_mark+parser)\n raise SyntaxError(\"{} parser has no closing tag: check line {}\".format(parser,i))\n \n # checking other tags\n if opening_mark in line:\n fline,nothing, sline = line.partition(opening_mark)\n while True:\n # checking each sub parser\n mark_to_test = sline.split()[0]\n parser = parsers[mark_to_test[0]]\n checker.checkmark(mark_to_test,parser,line,i)\n checker.checkargs(parser,mark_to_test,sline,line,i)\n \n # checking closing tag TEST BUG\n if parser.has_closing_tag:\n closing_tag = closing_mark + mark_to_test\n opening_tag = opening_mark + mark_to_test\n if opening_tag in sline:\n utils.underlineall(sline,opening_tag)\n raise SyntaxError(\"{} opening tag has been found before closing tag expected on line {}\".format(opening_tag,i))\n if closing_tag in sline:\n part1,tag,part2 = sline.partition(closing_tag)\n sline = part1 + part2\n else: # looking for closing tag in the rest of the text\n for j,line2 in enumerate(text[i+1:]):\n j+=i+1\n fline2, mark_expected, sline2 = line2.partition(closing_tag)\n if opening_tag in fline2:\n print(\"Opening tag not closed, line {}\".format(i))\n print(fline,nothing,utils.underlineall(sline,opening_tag,False))\n print(\"Opening tag found too soon, line {}\".format(j))\n utils.underlineall(line2,opening_tag)\n raise SyntaxError(\"{} opening tag has been found before closing tag expected\".format(opening_tag))\n if mark_expected:\n text[j] = fline2 + sline2\n break\n else:\n print(fline,nothing,utils.underlineall(sline,opening_tag,False))\n raise SyntaxError(\"No closing tag found for {} in line {}\".format(opening_tag,i))\n new_partition = sline.partition(opening_mark)\n fline = fline + nothing + new_partition[0]\n nothing, sline = new_partition[1:]\n \n if opening_mark not in sline: # condition to break loop\n line = fline + nothing + sline\n break\n \n # checking alone closing tags -> closing tags are supposed to be deleted TEST\n if closing_mark in line: \n alone_closing_tag = utils.wrappedchars(line,closing_mark)\n utils.underlineall(line,alone_closing_tag)\n raise SyntaxError(\"An only closing tag has been found in line {}\".format(i))\n \n return True", "def smarter_check_and_normalizer(in_str):\n counter1, counter2, counter3 = 0, 0, 0\n length, bool_val = len(input_str), False\n if length > 0: \n bool_val = True\n curr_index, next_index = 0, 1\n curr_word, next_word = \"\", \"\"\n while current_index < length:\n pass \n\n\n return [bool_val, input_str]", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def validate(candidates: List[str], choices: List[str]) -> List[str]:\n for candidate in candidates:\n assert candidate in choices, f\"Specified {candidate}, but not in available list: {choices}.\"\n return candidates", "def candidates(word):\r\n return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word])", "def test_delimiter_all_valid_options(self):\n val = DwcaValidator(yaml.load(self.yaml_delimited6, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'sex': 'male'}\n self.assertTrue(val.validate(document))\n document = {'sex': 'female'}\n self.assertTrue(val.validate(document))\n document = {'sex': 'male | female'}\n self.assertTrue(val.validate(document))\n document = {'sex': 'female | male'}\n self.assertTrue(val.validate(document))\n document = {'sex': ''}\n self.assertTrue(val.validate(document))", "def guess_splitwords():\n\n if t_word[:2] == 'un' and (t_pos == 'ADJD' or t_pos == 'ADJA'):\n create_splitword_tags(t_word[:2], t_word[2:])\n create_negation_frame()\n create_splitword_target(t_word[:2])\n create_splitword_focus(t_word[2:])\n create_splitword_negated(t_word[2:])\n create_splitword_scope(t_word[2:])", "def _split(self):\n \n self._words = []\n \n # (1) Expand contractions\n text = self._text.replace(\"'m \", \" am \")\n text = text.replace(\"'d \", \" would \")\n text = text.replace(\"'ll \", \" will \")\n text = text.replace(\"'ve \", \" have \")\n text = text.replace(\"'re \", \" are \")\n text = text.replace(\"can't \", \"can not \")\n text = text.replace(\"won't \", \"will not \")\n text = text.replace(\"n't \", \" not \")\n # Assume possesives are contractions of is\n text = text.replace(\"'s \", \" is \")\n text = text.replace(\"s' \", \"s \")\n \n # (2) Replace newlines, carriage returns, tabs, form feed with space.\n text = re.sub('[\\r\\n\\t\\f]', ' ', text)\n \n # (3) remove duplicate spaces\n text = re.sub(' +', ' ', text.strip())\n \n # Empty text\n if len(text) == 0:\n return \n \n # (4) Split text by whitespace (tokenize).\n words = text.split(' ')\n \n # (5) Separate out punctuation\n for word in words:\n length = len(word)\n \n begin = 0\n for i in range(0,length):\n if not word[i].isdigit() and not word[i].isalpha():\n # decimal, thousandths, fraction symbol\n if word[i] in ['.', ',', '/'] and i < length-1 and word[i+1].isdigit():\n continue\n # degree\n if word[i] in ['°'] and i < length-1 and word[i+1] in [ 'f', 'F', 'c', 'C']:\n continue\n # sign symbol\n if word[i] in ['-', '+'] and i < length-1 and (word[i+1].isdigit() or word[i+1] in ['.', ',']):\n # first char or exponent\n if begin == i or word[i-1] in ['e', 'E']:\n continue\n \n if begin != i:\n self._words.append( { 'word': word[begin:i], 'tag': Vocabulary.UNTAG } )\n if word[i] in [ '.', '?', '!', ',', ':', ';', '(', ')', '[', ']', '\"', '\\'', '¿', '¡']:\n self._words.append( { 'word': word[i], 'tag': Vocabulary.PUNCT } )\n # non-printable ascii\n elif (ord(word[i]) >= 0 and ord(word[i]) <= 7) or (ord(word[i]) >= 14 and ord(word[i]) <= 31):\n pass\n else:\n self._words.append( { 'word': word[i], 'tag': Vocabulary.SYMBOL } )\n begin = i + 1\n if begin < length:\n self._words.append( { 'word': word[begin:], 'tag': Vocabulary.UNTAG } )", "def parse_questions_text(advertise: Dict[str, Any]) -> Optional[List[str]]:\n if \"questions_text\" in advertise.keys():\n\n tmp: List[str] = advertise[\"questions_text\"].split(\"Denunciar\")\n excepts: List[str] = []\n\n for part in tmp:\n\n tmp_vec: List[str] = re.split(\"[ \\t\\n]{2,}\", part)\n if '' in tmp_vec:\n tmp_vec.remove('')\n\n excepts.extend(tmp_vec)\n\n excepts = set(excepts)\n\n if '' in excepts:\n excepts.remove('')\n\n return list(excepts)", "def test_CombinedFilter(self):\n tkns = get_tokenizer(\"en_US\", filters=(URLFilter, WikiWordFilter, EmailFilter))(self.text)\n out = [t for t in tkns]\n exp = [(\"this\", 0), (\"text\", 5), (\"with\", 10),\n (\"and\", 30), (\"not\", 103), (\"quite\", 108),\n (\"a\", 114), (\"url\", 116),\n (\"as\", 157), (\"well\", 160)]\n self.assertEqual(out, exp)", "def test_wordMatch(self):\n words = []\n for line in self.output:\n words.extend(string.split(line))\n self.failUnless(self.sampleSplitText == words)", "def parser(sent_list): #input: list of sentences", "def is_valid(self, text):\n return any(p.lower() in text.lower() for p in self.get_phrases())", "def split_precondition(\n tokens: Sequence[str], words: Sequence[str], word_ends: Sequence[str]\n) -> bool:\n duplicated_word_ends = []\n for end1, end2 in zip(word_ends, word_ends[1:]):\n if end1 == end2:\n duplicated_word_ends.append(end1)\n\n if not duplicated_word_ends:\n return False\n\n duplicate_not_word = False\n for duplicate in duplicated_word_ends:\n if duplicate not in words:\n duplicate_not_word = True\n break\n\n if not duplicate_not_word:\n return False\n\n return True", "def canSplit(self):\n return False", "def is_limerick(self, text):\n # TODO: provide an implementation!\n text = text.lower()\n p = []\n p = text.split('\\n')\n p = [i.strip(' ') for i in p]\n p = list(filter(None, p))\n\n # all limericks must have 5 lines AABBA\n if len(p) != 5:\n return False\n\n #words list stores the list of words in each line of the limerick\n words = []\n for i in range(0, 5):\n p[i] = p[i].strip(\".,:;?!\")\n temp = []\n T = p[i]\n temp = self.apostrophe_tokenize(T)\n words.append(temp)\n\n count = []\n #print len(words)\n for i in range(0, 5):\n #print words[i]\n n = 0\n for j in words[i]:\n n = n + self.num_syllables(j)\n count.append(n)\n\n # check if any line has fewer than 4 syllables\n for i in count:\n if i < 4:\n return False\n\n A1 = count[0]\n A2 = count[1]\n B1 = count[2]\n B2 = count[3]\n A3 = count[4]\n\n # check if B1 has fewer syllables than A1, A2 and A3\n if B1 > A1 or B1 > A2 or B1 > A3:\n return False\n\n # check if B2 has fewer syllables than A1, A2 and A3\n if B2 > A1 or B2 > A2 or B2 > A3:\n return False\n\n # check if the no of syllables in B1 and B2 differs by more than 2\n if abs(B1 - B2) > 2:\n return False\n\n # check if any two A's differ in no of syllables by more than 2\n if abs(A1 - A2) > 2 or abs(A1 - A3) > 2 or abs(A2 - A3) > 2:\n return False\n\n #check if A1, A2 and A3 rhyme with each other\n if self.rhymes(words[0][-1], words[1][-1]) and self.rhymes(words[0][-1], words[4][-1]) and self.rhymes(words[1][-1], words[4][-1]):\n #check if B1 and B2 rhyme with each other\n if self.rhymes(words[2][-1],words[3][-1]):\n #check if A and B do not rhyme\n if (not self.rhymes(words[0][-1], words[2][-1]) and\n not self.rhymes(words[0][-1], words[3][-1]) and\n not self.rhymes(words[1][-1], words[2][-1]) and\n not self.rhymes(words[1][-1], words[3][-1]) and\n not self.rhymes(words[4][-1], words[2][-1]) and\n not self.rhymes(words[4][-1], words[3][-1])\n ):\n return True\n\n return False", "def test_with_3_items(self):\n self.assertEqual(humanize_list(['a', 'b', 'c']),\n 'a, b and c')", "def test_basic_tokenize(self):\n input = \"\"\"This is a paragraph. It's not very special, but it's designed\n2 show how the splitter works with many-different combos\nof words. Also need to \"test\" the (handling) of 'quoted' words.\"\"\"\n output = [\n (\"This\", 0), (\"is\", 5), (\"a\", 8), (\"paragraph\", 10), (\"It's\", 22),\n (\"not\", 27), (\"very\", 31), (\"special\", 36), (\"but\", 45), (\"it's\", 49),\n (\"designed\", 54), (\"2\", 63), (\"show\", 65), (\"how\", 70), (\"the\", 74),\n (\"splitter\", 78), (\"works\", 87), (\"with\", 93), (\"many-different\", 98),\n (\"combos\", 113), (\"of\", 120), (\"words\", 123),\n (\"Also\", 130), (\"need\", 135),\n (\"to\", 140), (\"test\", 144), (\"the\", 150), (\"handling\", 155),\n (\"of\", 165), (\"quoted\", 169), (\"words\", 177)\n ]\n self.assertEqual(output, [i for i in basic_tokenize(input)])\n for (itmO, itmV) in zip(output, basic_tokenize(input)):\n self.assertEqual(itmO, itmV)", "def hasConstantForm(self, sentence):", "def test_betas(node, comps, data=None, engine=None):\n return all([word(node, data=data, engine=engine) for word in comps])", "def get_valid_phrases():\n return [x[0] for x in all_topics if x[1] == \"1\"]", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def test_with_4_items(self):\n self.assertEqual(humanize_list(['a', 'b', 'c', 'd']),\n 'a, b, c, and d')", "def valid_value(self, value):\n for val in value.split(','):\n valid = super(MultiSelectField, self).valid_value(val)\n if not valid:\n return False\n return True", "def check_valid(indexes):\n # Check if any substrings has any versions that are the opposite of it anywhere in that line.\n valid = False\n for k, v in indexes.items():\n swapped = k[::-1]\n other = indexes.get(swapped)\n # Check to see if the swapped version exists in the dictionary.\n if other:\n # 'aaa' case, these are invalid matches, don't bother checking further.\n if k == swapped:\n continue\n # single occurence case\n if len(v) == 1 and len(other) == 1:\n # Case where both occur inside or outsid brackets.\n if (int(v[0]) % 2) == (int(other[0]) % 2):\n continue\n else:\n valid = True\n else:\n # Use sets to eliminate duplicates in the same chunk.\n v_s = set(v)\n other_s = set(other)\n possible_combinations = [(x % 2, y % 2) for x in v_s for y in other_s]\n # For a pairing to be valid, one part needs to be in an even chunk and the other in an odd ([]) chunk.\n if (1, 0) in possible_combinations or (0, 1) in possible_combinations:\n valid = True\n return valid", "def validate_and_split_constraints(word, ctx=None, engine=None):\n # TODO make this a node semantic\n if CONSTRAINT_S not in word._data:\n return ([], [], [], set())\n\n comps = [word.verify(ctx=ctx, engine=engine) for word in word._data[CONSTRAINT_S] if isinstance(word, QueryComponent)]\n others = set([word for word in word._data[CONSTRAINT_S] if not isinstance(word, QueryComponent)])\n alphas = []\n betas = []\n sub_binds = []\n for c in comps:\n if c.is_sub_bind_test:\n sub_binds.append(c)\n elif c.is_alpha_test:\n alphas.append(c)\n else:\n betas.append(c)\n\n return (alphas, betas, sub_binds, others)", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def consequence_filter(line, index, consequence_list):\n consequence = re.split(r'\\t+', line.rstrip('\\t'))[index]\n if not any(variant_type in consequence for variant_type in\n consequence_list):\n return True\n else:\n return False", "def compareWithAll(lijst, previouslist, feedback = 0):\n\n global usedcombos\n\n results = []\n\n\n if feedback == 2: #to make sure there's a 2 letter combination with gaps\n for i in previouslist:\n for letter1, letter2 in lijst:\n if letter1 in i and letter2 in i:\n results.append(i)\n\n elif feedback == 3: #to make sure there's a 3 letter combination with gaps\n for i in previouslist:\n for letter1, letter2, letter3 in lijst:\n if letter1 in i and letter2 in i and letter3 in i:\n results.append(i)\n else:\n for i in previouslist:\n\n for j in range(len(lijst)):\n\n if lijst[j] in i:\n\n results.append(i)\n\n results = [item for item in results if item not in usedcombos]\n results = list(dict.fromkeys(results))\n\n print(f\"It seems I only {len(results)} options left!\")\n\n return AIguessing(results)", "def processwords(list_of_matches, lemmatag = False):\n list_of_matches = [w.lower() for w in list_of_matches]\n # remove nonwords, strip . to normalise \"dr.\"\n if translated_option != 'o' and translated_option != 'u':\n list_of_matches = [w.lstrip('.').rstrip('.') for w in list_of_matches if re.search(regex_nonword_filter, w)]\n \n list_of_matches.sort()\n \n # tokenise if multiword:\n if phrases and not n_gramming:\n from nltk import word_tokenize as word_tokenize\n list_of_matches = [word_tokenize(i) for i in list_of_matches]\n\n # this is just for plaintext ... should convert to unicode on file open\n if datatype == 'plaintext':\n try:\n list_of_matches = [unicode(w, errors = 'ignore') for w in list_of_matches]\n except TypeError:\n pass\n\n if not dependency and exclude and 'w' in exclude.keys():\n list_of_matches = [w for w in list_of_matches if not re.match(exclude['w'], w)]\n\n if lemmatise or 'l' in show:\n if not dependency:\n tag = gettag(query, lemmatag = lemmatag)\n lemmata = lemmatiser(list_of_matches, tag)\n tups = zip(list_of_matches, lemmata)\n res = []\n for w, l in tups:\n single_result = []\n if exclude and 'l' in exclude.keys():\n if re.match(exclude['l'], l):\n continue\n if 'w' in show:\n single_result.append(w)\n if 'l' in show:\n single_result.append(l)\n # bad fix:\n # this currently says, if pos in show, there must only be pos ...\n if 'p' in show:\n if lemmatise:\n single_result.append(l)\n else:\n single_result.append(w)\n\n single_result = '/'.join(single_result)\n res.append(single_result)\n list_of_matches = res\n\n if titlefilter and not dependency:\n list_of_matches = titlefilterer(list_of_matches)\n if spelling:\n list_of_matches = convert_spelling(list_of_matches, spelling = spelling)\n\n # use blacklist option in gui\n if 'blacklist' in kwargs.keys():\n stopwords = False\n if kwargs['blacklist'] is not False:\n if kwargs['blacklist'] is True:\n from dictionaries.stopwords import stopwords as my_stopwords\n stopwords = [i.lower() for i in my_stopwords]\n list_of_matches = [w for w in list_of_matches if w not in stopwords]\n else:\n if type(kwargs['blacklist']) == list:\n stopwords = [i.lower() for i in kwargs['blacklist']]\n list_of_matches = [w for w in list_of_matches if w not in stopwords]\n else:\n regexblacklist = re.compile(kwargs['blacklist'])\n list_of_matches = [w for w in list_of_matches if not re.search(regexblacklist, w)]\n\n #if not split_con:\n # list_of_matches = unsplitter(list_of_matches)\n \n # turn every result into a single string again if need be:\n if phrases:\n output = []\n for res in list_of_matches:\n joined = ' '.join(res)\n output.append(joined)\n return output\n else:\n return list_of_matches", "def test_words_with_sep():\n assert my_splitter(\"bla,bla\", \",\") == [\"bla\", \"bla\"]", "def possibilities(param):\n print(param)\n exit_list = []\n qmark_indices = []\n word_as_list = list(param)\n for index, letter in enumerate(word_as_list):\n if letter is \"?\":\n qmark_indices.append(index)\n for num in qmark_indices:\n word_as_list[num] = \"1\"\n exit_list.append(''.join(word_as_list))\n word_as_list[num] = \"0\"\n exit_list.append(''.join(word_as_list))\n for num in qmark_indices:\n word_as_list[num] = \"0\"\n exit_list.append(''.join(word_as_list))\n word_as_list[num] = \"1\"\n exit_list.append(''.join(word_as_list))\n new_list = list(set(word for word in exit_list if word.count(\"?\") == 0))\n new_list.sort()\n return new_list", "def parse(self, inp):\n\n tokens = self.tokenizer.tokenize(inp)\n tokens_left = len(tokens)\n\n # print(tokens)\n\n while tokens_left:\n\n for rule in self.grammar:\n tokens = tokens[rule.match(tokens):]\n\n if len(tokens) < tokens_left:\n tokens_left = len(tokens)\n else:\n # nothing is matching any more - stop\n break\n\n return len(tokens) == 0, tokens", "def exact_match_candidates(self) -> List[List[TokenType]]:\n return [\n [TokenType.GENE, TokenType.AMPLIFICATION]\n ]", "def _parse_multi(choice, end=None):\n end = end or str(g.model.size)\n pattern = r'(?<![-\\d])(\\d+-\\d+|-\\d+|\\d+-|\\d+)(?![-\\d])'\n items = re.findall(pattern, choice)\n alltracks = []\n\n for x in items:\n\n if x.startswith(\"-\"):\n x = \"1\" + x\n\n elif x.endswith(\"-\"):\n x = x + str(end)\n\n if \"-\" in x:\n nrange = x.split(\"-\")\n startend = map(int, nrange)\n alltracks += _bi_range(*startend)\n\n else:\n alltracks.append(int(x))\n\n return alltracks", "def test_with_2_items(self):\n self.assertEqual(humanize_list(['a', 'b']),\n 'a and b')", "def identify_topn_langs(\n self,\n text: str,\n topn: int = 3,\n with_probs: bool = False,\n ) -> List[str] | List[Tuple[str, float]]:\n if not self._is_valid_text(text):\n results = [(\"un\", 1.0)]\n else:\n text_ = utils.to_collection(text, str, list)\n results = models.get_topn_preds_and_probs(\n self.model.predict(text_), topn, self.classes\n )[0]\n return [lang for lang, _ in results] if with_probs is False else results", "def _candidates(self, token):\n token_as_list = [token]\n token_1_edits = NorvigCorrector._one_edit_token_distances(token)\n token_2_edits = NorvigCorrector._two_edits_token_distances(token)\n return (\n self._known_in(token_as_list) or self._known_in(token_1_edits) or self._known_in(token_2_edits) or\n token_as_list)", "def parse_selection(selection_str: str) -> List[int]:\n indices = []\n for group in selection_str.split(','):\n if not re.match(r'^(?:-?\\d+)|(?:\\d+(?:-\\d+))$', group):\n print(\"Invalid selection\", group)\n sys.exit()\n spl = group.split('-')\n if len(spl) == 1:\n indices.append(int(spl[0]))\n elif len(spl) == 2:\n begin = int(spl[0]) if spl[0] else 0\n end = int(spl[1])\n indices.extend(range(begin, end + 1))\n return indices", "def process_input(fname,onlynugget,onlyarg):\n content=utils.readFileEncode(fname,'utf8')\n lines = content.split('\\n')[:-1]\n sentences=[]\n labels=[]\n sent=[]\n label=[]\n for i in range(len(lines)):\n if len(lines[i])>3:\n words=lines[i].split('\\t')\n word={'originalText':words[0],'offset':int(words[1])}\n sent.append(word)\n if onlynugget:\n if words[2] in NuggetList10:\n label.append(words[2]) \n else:\n label.append('O')\n elif onlyarg:\n if words[2] in ArgumentList:\n\n if 'Software' in words[2]:\n label.append(words[2][0:2]+'System')\n else:\n label.append(words[2])\n else:\n label.append('O')\n else:\n if len(sent)>0 and len(label)>0: \n sentences.append(sent)\n labels.append(label) \n sent=[]\n label=[]\n elif len(sent)==0 and i < len(lines)-1:\n sentences.append([])\n labels.append([])\n \n return sentences,labels", "def get_section_choices(sections):\n ret = []\n if sections == None:\n return ret\n sections = string.splitfields(decode_html(sections), '\\n')\n for s in sections :\n s = string.strip(s)\n ret.append((s, s))\n return ret\n # if s != '':\n # yield(encode_html(s), s)", "def parse_int_set(nputstr=\"\"):\n selection = set()\n invalid = set()\n # tokens are comma seperated values\n tokens = [x.strip() for x in nputstr.split(',')]\n for i in tokens:\n try:\n # typically tokens are plain old integers\n selection.add(int(i))\n except:\n # if not, then it might be a range\n try:\n token = [int(k.strip()) for k in i.split('-')]\n if len(token) > 1:\n token.sort()\n # we have items seperated by a dash\n # try to build a valid range\n first = token[0]\n last = token[len(token)-1]\n for x in range(first, last+1):\n selection.add(x)\n except:\n # not an int and not a range...\n invalid.add(i)\n # Report invalid tokens before returning valid selection\n # print \"Invalid set: \" + str(invalid)\n return selection", "def collate_sections(self,paper_text,section_list:List[Section],split_upto=0.2,split_bins=10):\n current_text_split = []\n prev_section = None\n curr_text = str(paper_text)\n unfound_sections = []\n some_section_not_found = False\n for index,s in enumerate(section_list):\n curr_text,section_status = self.split_and_find_section(curr_text,s.name,prev_section,split_upto=split_upto,split_bins=split_bins)\n if not section_status: # If couldn't match section add it here. \n some_section_not_found = True\n # print('\\n\\t'+s.name) \n prev_section = s \n for ss in s.subsections:\n curr_text,section_status = self.split_and_find_section(curr_text,ss.name,prev_section,split_upto=split_upto,split_bins=split_bins)\n if not section_status:\n some_section_not_found = True\n # print(\"Cannot Match For :\",ss.name)\n prev_section = ss\n # print('\\n\\t\\t'+ss.name)\n if index == len(section_list)-1:\n s.text = curr_text\n return section_list,some_section_not_found", "def validateOpts(opts):\n titlePresent = False\n linesPerFile = -1\n outputFileName = \"\"\n sepChar = \",\"\n for option, argval in opts:\n if (option in (\"-t\", \"--title\")):\n titlePresent = True\n if (option in (\"-l\", \"--lines\")):\n linesPerFile = int(argval)\n if (option in (\"-s\", \"--sep\")):\n sepChar = argval\n if (option in (\"-o\", \"--output\")):\n outputFileName = argval\n if (option in (\"-h\", \"--help\")):\n usage()\n return titlePresent, linesPerFile, sepChar, outputFileName", "def splitInPhrase(self,text):\n return self._support.splitInPhrase(text)", "def read_trans_prompts(lines: List[str], lowercase=True) -> List[Tuple[str,str]]:\n\n ids_prompts = []\n first = True\n for line in lines:\n if lowercase:\n line = line.strip().lower()\n else:\n line = line.strip()\n # in a group, the first one is the KEY. \n # all others are part of the set. \n if len(line) == 0:\n first = True\n else:\n if first:\n key, prompt = line.split(FIELDSEP)\n ids_prompts.append((key, prompt))\n first = False\n\n return ids_prompts", "def test_separators_only():\n assert my_splitter(\",ad,\", \"ad\") == [\",\", \",\"]", "def in_suit3(list, list0):\n text = list.replace(\"-\", \"\")\n text0 = list0.replace(\"-\", \"\")\n if (\"-\" in list) and (\"-\" in list0) and (text.isdigit() is True) and (text0.isdigit() is True):\n\n list1 = list.split(\"-\")\n x = int(list1[0])\n suit = set()\n suit.add(x)\n while x < int(list1[len(list1) - 1]):\n x += 1\n suit.add(x)\n suit.add(int(list1[len(list1) - 1]))\n\n list2 = list0.split(\"-\")\n y = int(list2[0])\n suit0 = set()\n suit0.add(y)\n while y < int(list2[len(list2) - 1]):\n y += 1\n suit0.add(y)\n suit0.add(int(list2[len(list2) - 1]))\n temp = [item for item in suit if item in suit0]\n if len(temp) > 0: return True\n\n return False", "def candidates(self, word):\n return (self.known([word]) or self.known(self.edits1(word)) or self.known(self.edits2(word)) or [word])", "def test_unusual_misc():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n sentences = \"{:C}\".format(doc).split(\"\\n\\n\")\n assert len(sentences) == 2\n sentence = sentences[0].split(\"\\n\")\n assert len(sentence) == 14\n\n for word in sentence:\n pieces = word.split(\"\\t\")\n assert len(pieces) == 1 or len(pieces) == 10\n if len(pieces) == 10:\n assert all(piece for piece in pieces)", "def _choose_babble_phrases(self) -> tuple:\n noun_choices = ('singular nouns', 'plural nouns')\n noun_choice = self.random_element(noun_choices)\n\n adjective_choices = (\n 'adjectives starting with consonant',\n 'adjectives starting with vowel')\n\n if noun_choice == 'singular nouns':\n article_choice = self.random_element(self.article_choices)\n else:\n article_choice = 'the'\n\n if article_choice == 'an':\n adjective_choice = 'adjectives starting with vowel'\n elif article_choice == 'a':\n adjective_choice = 'adjectives starting with consonant'\n else:\n adjective_choice = self.random_element(adjective_choices)\n\n return (\n self.technobabble['verbs'],\n article_choice,\n self.technobabble[adjective_choice],\n self.technobabble[noun_choice])", "def get_ls_ab(text,option):\n text = re.sub(r'<ab>.*?</ab>',' ',text)\n text = re.sub(r'<lex>.*?</lex>',' ',text)\n text = re.sub(r'<bot>.*?</bot>',' ',text)\n text = re.sub(r'<is>.*?</is>',' ',text)\n text = re.sub(r'{%.*?%}',' ',text)\n text = re.sub(r'{#.*?#}',' ',text)\n # replace <x> with <ln>x</ln> when x starts with a digit\n # 10-26-2021 This change made in temp_pw_AB_02.txt\n #text = re.sub(r'<([1-9].*?)>',r'<ln>\\1</ln>',text)\n lsarr0 = re.findall(r'<l[sn]>.*?</l[sn]>',text)\n lsarr1 = list(generate_ab_ls(lsarr0))\n if option == 'ab':\n lsarr = lsarr1\n else: # ab1\n lsarr = lsab1_merge(lsarr1)\n return lsarr", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def get_features(self, para, label_list, tokenizer, max_seq_length):\n\t\tlabel_map = {label : i for i, label in enumerate(label_list)}\n# self.reverse_label_map = {v: k for k, v in label_map.items()}\n\t\tguid = \"%s-%s\" % (\"test\", 1)\n\t\ttext_a = para[\"model_answer\"]\n\t\ttext_b = para[\"candidate_answer\"]\n\t\tlabel = label_list[0]\n\t\texample = InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)\n\t\t\n\t\ttokens_a = tokenizer.tokenize(example.text_a)\n\n\t\ttokens_b = tokenizer.tokenize(example.text_b)\n\t\tif example.text_b:\n\t\t\ttokens_b = tokenizer.tokenize(example.text_b)\n\t\t\t# Modifies `tokens_a` and `tokens_b` in place so that the total\n\t\t\t# length is less than the specified length.\n\t\t\t# Account for [CLS], [SEP], [SEP] with \"- 3\"\n\t\t\tself._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n\t\telse:\n\t\t\t# Account for [CLS] and [SEP] with \"- 2\"\n\t\t\tif len(tokens_a) > max_seq_length - 2:\n\t\t\t\ttokens_a = tokens_a[:(max_seq_length - 2)]\n\n\t\t# The convention in BERT is:\n\t\t# (a) For sequence pairs:\n\t\t# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n\t\t# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n\t\t# (b) For single sequences:\n\t\t# tokens: [CLS] the dog is hairy . [SEP]\n\t\t# type_ids: 0 0 0 0 0 0 0\n\t\t#\n\t\t# Where \"type_ids\" are used to indicate whether this is the first\n\t\t# sequence or the second sequence. The embedding vectors for `type=0` and\n\t\t# `type=1` were learned during pre-training and are added to the wordpiece\n\t\t# embedding vector (and position vector). This is not *strictly* necessary\n\t\t# since the [SEP] token unambigiously separates the sequences, but it makes\n\t\t# it easier for the model to learn the concept of sequences.\n\t\t#\n\t\t# For classification tasks, the first vector (corresponding to [CLS]) is\n\t\t# used as as the \"sentence vector\". Note that this only makes sense because\n\t\t# the entire model is fine-tuned.\n\t\ttokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n\t\tsegment_ids = [0] * len(tokens)\n\n\t\tif tokens_b:\n\t\t\ttokens += tokens_b + [\"[SEP]\"]\n\t\t\tsegment_ids += [1] * (len(tokens_b) + 1)\n\n\t\tinput_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n\t\t# The mask has 1 for real tokens and 0 for padding tokens. Only real\n\t\t# tokens are attended to.\n\t\tinput_mask = [1] * len(input_ids)\n\n\t\t# Zero-pad up to the sequence length.\n\t\tpadding = [0] * (max_seq_length - len(input_ids))\n\t\tinput_ids += padding\n\t\tinput_mask += padding\n\t\tsegment_ids += padding\n\n\t\tassert len(input_ids) == max_seq_length\n\t\tassert len(input_mask) == max_seq_length\n\t\tassert len(segment_ids) == max_seq_length\n\t\tlabel_id = label_map[example.label]\n# print(\"*** Example ***\")\n# print(\"guid: %s\" % (example.guid))\n# print(\"tokens: %s\" % \" \".join(\n# [str(x) for x in tokens]))\n# print(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n# print(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n# print(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n\n\t\t\n\t\treturn InputFeatures(input_ids=input_ids,\n\t\t\t\t\t\t\t input_mask=input_mask,\n\t\t\t\t\t\t\t segment_ids=segment_ids,\n\t\t\t\t\t\t\t label_id=label_id)", "def sortChoices(self):\n self.formatList.sort()", "def extract_want_line_capabilities(text):\n split_text = text.rstrip().split(b\" \")\n if len(split_text) < 3:\n return text, []\n return (b\" \".join(split_text[:2]), split_text[2:])", "def all_simple (phrase):\r\n\r\n\r\n for x in phrase:\r\n if (x not in self.operations and not (isinstance(x,(int,type(ListType()),float,bool) or (isinstance(x,str) and quoted(x)))) or self.current_register.contains(x)):\r\n return False\r\n return True", "def candidates(self, word):\n return (self.known([word]) or \\\n self.known(self.edits1(word)) or \\\n self.known(self.edits2(word)) or \\\n set([word]))", "def vectorize(self,text):\r\n \r\n lv_active = set()\r\n words = word_tokenize(text)\r\n for word in words:\r\n if word in self.tree:\r\n ancestors = self.tree.word_ancestors(word)\r\n lv_active.update(ancestors)\r\n \r\n return self.nl.isin(lv_active).values", "def parse_input(question_ids, answer_ids):\r\n input_ids = list()\r\n input_ids.append(BERT_CLS)\r\n input_ids.extend(question_ids)\r\n input_ids.append(BERT_SEP)\r\n input_ids.extend(answer_ids)\r\n input_ids_truncated = input_ids[:BERT_INPUT_WORD_LEN]\r\n # print(input_ids_truncated)\r\n assert len(input_ids_truncated) <= BERT_INPUT_WORD_LEN, 'input_ids len can not exceed %d' % BERT_INPUT_WORD_LEN\r\n # print('input_ids_truncated_len ', len(input_ids_truncated))\r\n segment_ids = list()\r\n segment_question_ids = ['0'] * (len(question_ids) + 2)\r\n segment_answer_ids = ['1'] * (len(input_ids_truncated) - len(question_ids) - 2)\r\n segment_ids.extend(segment_question_ids)\r\n segment_ids.extend(segment_answer_ids)\r\n input_masks = ['1'] * len(input_ids_truncated)\r\n input_ids_parsed = RECORD_SPLIT_FLAG.join(input_ids_truncated)\r\n segment_ids_str = RECORD_SPLIT_FLAG.join(segment_ids)\r\n input_masks_str = RECORD_SPLIT_FLAG.join(input_masks)\r\n # print('segmend_ids ', segment_ids_str)\r\n # print('input_masks ', input_masks_str)\r\n return input_ids_parsed, segment_ids_str, input_masks_str", "def is_accepting(self):\n for item_id, lookahead in self.id_to_lookahead.items():\n if lookahead.includesEndOfText():\n item = self.id_to_item[item_id]\n if item.is_accepting():\n return True\n return False", "def TestInput( data, options ) :\n columns = data.columns\n return all( x in columns for x in options)", "def _validate_options(self):\r\n valid_choices = ('correct', 'partially-correct', 'incorrect')\r\n for option in self.options:\r\n choice = option['choice']\r\n if choice is None:\r\n raise ValueError('Missing required choice attribute.')\r\n elif choice not in valid_choices:\r\n raise ValueError('Invalid choice attribute: {0}. Must be one of: {1}'.format(\r\n choice, ', '.join(valid_choices)))", "def test_boolean_and_selection(self):\n\n # The selection loop:\n sel = list(mol_res_spin.residue_loop(\"#Ap4Aase:4 & :Pro\"))\n\n # Test:\n self.assertEqual(len(sel), 1)\n for res in sel:\n self.assert_(res.name == \"Pro\" and res.num == 4)", "def prog_sent(text):\n\n patterns = [r'\\b(?i)'+'plan'+r'\\b',\n r'\\b(?i)'+'programme'+r'\\b',\n r'\\b(?i)'+'scheme'+r'\\b',\n r'\\b(?i)'+'campaign'+r'\\b',\n r'\\b(?i)'+'initiative'+r'\\b',\n r'\\b(?i)'+'conference'+r'\\b',\n r'\\b(?i)'+'agreement'+r'\\b',\n r'\\b(?i)'+'alliance'+r'\\b']\n\n output = []\n flag = 0\n\n for pat in patterns:\n if re.search(pat, text) != None:\n flag = 1\n\n break\n\n return flag", "def checker(self, match=\"xw\", ranges=\"0,1\", in_a_row=True, reverse=False):\n\n res = []\n length = len(self.parse_type)\n if ranges != None:\n ranges = str(ranges)\n index_array = self.indexes(ranges)\n substring = \"\"\n\n for idx in range(length*reverse-reverse, length*(-reverse+1)-reverse, (-reverse)*2+1): #xd lol\n if idx in index_array:\n substring += self.parse_type[idx]\n\n if in_a_row == True:\n return (match in substring)\n if in_a_row == False:\n target = 0\n for i in substring:\n target += (match[target] == i)\n return (target == maxi)\n if in_a_row == None:\n for i in self.parse_type:\n if i in match:\n match = match.replace(i, '', 1)\n return (match == \"\")\n return None" ]
[ "0.58024824", "0.5749306", "0.5671507", "0.55511814", "0.5483213", "0.5446786", "0.54443884", "0.54241514", "0.54228777", "0.52902573", "0.52757835", "0.5264248", "0.5196836", "0.5174921", "0.5168787", "0.5146945", "0.51274663", "0.5122783", "0.5096883", "0.509225", "0.5079589", "0.5070989", "0.50292575", "0.49920854", "0.49731225", "0.4968269", "0.4955272", "0.49335572", "0.49249843", "0.4910313", "0.49090913", "0.49076098", "0.4906703", "0.4898591", "0.48766723", "0.48764372", "0.4851883", "0.48441938", "0.48273826", "0.48194703", "0.48114222", "0.48007408", "0.47960186", "0.4790127", "0.478619", "0.47764626", "0.47758245", "0.47745278", "0.47538546", "0.47513348", "0.47496182", "0.4746881", "0.47401914", "0.4731858", "0.47294125", "0.47248986", "0.47086027", "0.4708507", "0.4699868", "0.4699236", "0.46975484", "0.46930373", "0.46699023", "0.46629298", "0.4661819", "0.4659042", "0.46533892", "0.46462598", "0.46398368", "0.46366236", "0.46363887", "0.46244785", "0.46160632", "0.4613659", "0.46024266", "0.45991042", "0.45990813", "0.4598158", "0.45944583", "0.4593011", "0.45915318", "0.45887688", "0.4586329", "0.45854586", "0.45846754", "0.45803413", "0.45759356", "0.45754948", "0.45742232", "0.45740163", "0.45672286", "0.4566234", "0.45550358", "0.45518008", "0.45451105", "0.45407206", "0.45279133", "0.4527819", "0.4523092", "0.45183566" ]
0.7824299
0
Return formatted text, properly escaped if not in titleMode
def formatOutput(self, storedText, titleMode, internal=False): choices, valid = self.sortedChoices(storedText) if valid: result = self.sep.join(choices) else: result = _errorStr return TextFormat.formatOutput(self, result, titleMode, internal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def title(text, level=0):\n return '\\n' + text + '\\n' + '=-~_#%^' [level] * len(text) + '\\n\\n'", "def format_title(self, data):\n return data", "def output_plain_sep_title(title):\n print(f\"{plain_sep_mark}\\t{title}{plain_sep_mark}\")", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_text(downgrade_titles=False):", "def PROPER(text):\n return text.title()", "def title(self, string):\n return self.bold(string)", "def html_title(title):\n return '<center><h1>%s</h1></center>' % (title)", "def _get_title_text(self):\n return Text(\n self,\n self.settings.font_bold_filename,\n 96,\n self.settings.font_color,\n 'zuckbot',\n {'center': self.screen_rect.center},\n 0,\n -50,\n )", "def get_title_repr(self) -> str:\n try:\n return Title[self.title].value\n except (KeyError, ValueError):\n pass", "def editModeHeading(text):\n return u'<p style=\"editModeHeading\">%s</p>' % text", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def emph_text(text):\n\n if use_color():\n return colorama.Style.BRIGHT + text + colorama.Style.RESET_ALL\n else:\n return text", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()", "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "def formatOutput(self, storedText, titleMode, internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = ['<img src=\"%s\">' % escape(url, treedoc.escDict) for url\n in paths]\n return u'<br />'.join(results)", "def _text_formatting(bs4_tag):\n return bs4_tag.get_text().replace('\\n', '')", "def print_with_title(title, content, before='', after='', hl='='):\n cont_maxlen = max(len(s) for s in content.split('\\n'))\n hl_len = max(cont_maxlen, len(title))\n print('{}{}\\n{}\\n{}{}'.format(before, title, hl * hl_len, content, after))", "def title(self, txt):\n num = len(txt)\n ticks = \"=\" * num\n print(ticks)\n print(txt)\n print(ticks)", "def test_title(self):\n self.assertEquals(\"Title\\n=====\", trans(\"== Title ==\"))\n self.assertEquals(\"Title\\n-----\", trans(\"=== Title ===\"))\n self.assertEquals(\"#### Title\", trans(\"==== Title ====\"))\n self.assertEquals(\"##### Title\", trans(\"===== Title =====\"))", "def helptext(self):\n return \"\"", "def book_title(book_text):\n search = re.search(\"Title:(.*)\", book_text)\n title = search.group(1).replace(\"\\r\", \" \").strip()\n return title", "def get_title():", "def __str__(self):\n date_str = self.date.strftime(self.journal.config['timeformat'])\n title = date_str + \" \" + self.title\n body = self.body.strip()\n\n return \"{title}{sep}{body}\\n\".format(\n title=title,\n sep=\"\\n\" if self.body else \"\",\n body=body\n )", "def __str__(self) -> str:\n return textwrap.wrap(self.title, _POST_TITLE_MAX_LENGTH // 4)[0]", "def title_draw():\n nonlocal width\n widthTitle = len(self.str_title)\n if widthTitle > width:\n self.str_title = self.str_title[0:width-5] + '...'\n widthTitle = len(self.str_title)\n h_len = widthTitle + self.l_padding + self.r_padding\n top = ''.join(['┌'] + ['─' * h_len] + ['┐']) + '\\n'\n result = top + \\\n '│' + \\\n ' ' * self.l_padding + \\\n self.str_title + \\\n ' ' * self.r_padding + \\\n '│' + self.str_shadow + '\\n'\n offset = 2 + self.l_padding + len(self.str_title) + self.r_padding\n return result, offset", "def show_title():\r\n complement = (\r\n '\\n __ ')\r\n title = ('\\n _______ _______________ ____ _______ __ ___ _ _______/ /_ ____ _____ ____ ____ ')\r\n title += ('\\n / ___/ / / / ___/ ___/ _ \\/ __ \\/ ___/ / / / / _ \\| |/_/ ___/ __ \\/ __ `/ __ \\/ __ `/ _ \\ ')\r\n title += ('\\n/ /__/ /_/ / / / / / __/ / / / /__/ /_/ / / __/> </ /__/ / / / /_/ / / / / /_/ / __/ ')\r\n title += ('\\n\\___/\\__,_/_/ /_/ \\___/_/ /_/\\___/\\__, / \\___/_/|_|\\___/_/ /_/\\__,_/_/ /_/\\__, /\\___/ ')\r\n title += ('\\n /____/ /____/ ')\r\n # Add Styles\r\n break_line = ('-' * len(complement) + \"\\n\") * 2\r\n print(\"{}\\n{}\\n{}\\n\".format(break_line, title, break_line))", "def format_title(self, title):\n new_title = ''.join(word.lower().strip('!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ ') for word in title)\n return new_title", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def pretty_title(title):\n output = '-' * 5 + ' ' + title + ' ' + '-' * 5\n return output", "def html_title(self, title=None):\r\n if title is None:\r\n return \"<title>PyBossa</title>\"\r\n else:\r\n return \"<title>PyBossa &middot; %s</title>\" % title", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def output_sep_title(title):\n print(f\"{sep_mark}\\t{title}{sep_mark}\")", "def make_main_title(self, end, end_center=False):\n main_title = r\"\\begin{center}\"\n if self.detector is not None:\n main_title += \"%s \"%self.detector\n if self.selection is not None:\n main_title += \"%s Event Selection \"%self.selection\n main_title += end\n if end_center:\n main_title += r\"\\end{center}\"\n return main_title", "def title(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"=\")))", "def get_title(self):\n if not hasattr(self, '_title'):\n self._title = 'NO TITLE'\n if self._title:\n title = _(self._title)\n title = title.replace('&', '&amp;') \n title = title.replace('\"', '&quot;')\n return title\n else:\n return u''", "def escape_if_needed(text, options):\n if hasattr(text, '__html__'):\n # Text has escape itself:\n return to_string(text.__html__())\n if need_to_escape(options):\n return escape(to_string(text))\n return to_string(text)", "def ftitle(self, text):\n return \"{} - {}\".format(self._app_name, text)", "def get_rst_title_char(level):\n chars = (u'=', u'-', u'`', u\"'\", u'.', u'~', u'*', u'+', u'^')\n if level < len(chars):\n return chars[level]\n return chars[-1]", "def process_title(self, title):\n\t\t# strip apostrophes\n\t\tif '\\'' in title:\n\t\t\ttitle = re.sub('\\'', '', title)\n\t\tif '.' in title:\n\t\t\ttitle = re.sub('.', '', title)\n\t\treturn title", "def pretty_title(title):\n output = '-' * 5 + ' ' + title.lower() + ' ' + '-' * 5\n return output", "def html(self):\n bop = ('<b>' if self._bold else '')\n iop = ('<i>' if self._italic else '')\n icl = ('</i>' if self._italic else '')\n bcl = ('</b>' if self._bold else '')\n txt = escape(self._text)\n s = '%s%s%s%s%s' % (bop, iop, txt, icl, bcl)\n return '%s' % s", "def escape_for_display(s) :\n if len(s) == 0 :\n return \"[EMPTY]\"\n return s.replace(\"\\n\",\"[NL]\").replace(\"\\t\",\"[TAB]\") #.replace(\" \",\"[SP]\") # Escape newlines so not to confuse debug output.", "def style_title(self) -> str:\n style_title = \"\"\".title\n {margin-bottom: 10px}\\n\"\"\"\n self.html_doc = self.html_doc + style_title\n return self.html_doc", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def getHTMLText(self, s):\r\n\r\n # Removes any \"<\" or \">\" from the text, and replaces line ends with <br> tags\r\n if s is not None:\r\n res = str(s)\r\n res = string.replace(res, \">\", \"&gt;\")\r\n res = string.replace(res, \"<\", \"&lt;\")\r\n res = string.replace(s, \"\\n\", \"<br style='mso-data-placement:same-cell;'/>\")\r\n else:\r\n res = \"\"\r\n\r\n # Inserts formatting tag around text, if defined\r\n if self.formatBeginTag:\r\n res = self.formatBeginTag + res + self.formatEndTag\r\n\r\n return res", "def escape_single_quotes(custom_data):\n # https://stackoverflow.com/questions/10569438/how-to-print-unicode-character-in-python\n # https://regex101.com/r/nM4bXf/1\n if re.search(\"(?<!u)'(?!:|}|,)\", custom_data.get('title_name', '')):\n z = re.sub(r\"(?<!u)'(?!:|}|,)\", '\\\\\\'', custom_data.get('title_name', None))\n\n custom_data['title_name'] = z\n return custom_data\n return custom_data", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def format_title(self, ticket_id, subject):\n # TODO: strip block tags?\n title = \"#%i %s\" % (ticket_id, subject)\n return title.strip()", "def subtitle(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"-\")))", "def title(self, value):\n if len(value):\n self._title = self._wrap_line(value, self._width)\n\n # Add a blank line\n self._title.append('')", "def emphasize(text: str, tablefmt: str | TableFormat, strong: bool = False) -> str:\n # formats a title for a table produced using tabulate,\n # in the formats tabulate understands\n if tablefmt in [\"html\", \"unsafehtml\", html_with_borders_tablefmt]: # type: ignore\n if strong:\n emph_text = f\"<strong>{text}</strong>\"\n else:\n emph_text = f\"<em>{text}</em>\"\n elif tablefmt in [\"latex\", \"latex_raw\", \"latex_booktabs\", \"latex_longtable\"]:\n if strong:\n emph_text = r\"\\textbf{\" + text + r\"}\"\n else:\n emph_text = r\"\\emph{\" + text + r\"}\"\n else: # use the emphasis for tablefmt == \"pipe\" (Markdown)\n star = \"**\" if strong else \"*\"\n emph_text = f\"{star}{text}{star}\"\n return emph_text", "def textual(title, ordering_field=None):\n def decorator(func):\n def wraps(self, obj):\n result = func(self, obj)\n return result if result else u'---'\n\n wraps.short_description = title\n wraps.allow_tags = True\n\n if ordering_field:\n wraps.admin_order_field = ordering_field\n\n return wraps\n return decorator", "def outputText(self, item, titleMode, internal=False):\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''", "def inclusive_title(self):\n return self.title + (\" %s\" % (self.episode_to_string(self.latest_season, self.latest_episode),) if self.is_series() else \"\")", "def group_title(self, group):\n group_title = group.getProperty('title')\n if self.short:\n splitted = group_title.split('(')\n if len(splitted) > 1:\n group_title = group_title.split('(')[-1][:-1]\n return html.escape(group_title)", "def outputText(self, item, titleMode, internal=False):\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)", "def format_heading(self, level, text):\n underlining = ['=', '-', '~', ][level-1] * len(text)\n return '%s\\n%s\\n\\n' % (text, underlining)", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)", "def formatted(self) -> str:\r\n ...", "def __str__(self):\n return \"{title}\".format(title=self.title)", "def gen_title_rst(txt):\n # Just add a few useful directives\n txt = \".. highlight:: cmake\\n\\n\" + txt\n return txt", "def _prettyfilename(self):\n return self.title", "def wrap_title(title, mpl_layout):\n fig = mpl_layout.canvas.figure\n ax = fig.axes[0]\n ext_pixels = ax.get_window_extent()\n ext_inches = ext_pixels.transformed(fig.dpi_scale_trans.inverted())\n magic_number = 10\n letters_per_line = int(ext_inches.width * magic_number)\n title_wrapped = '\\n'.join(textwrap.wrap(title, letters_per_line))\n return title_wrapped", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title} :\" + s\n\n return s", "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "def get_title(text, uuid=None):\n if uuid is not None:\n text += get_provenance_link(uuid)\n title = pn.Row(pn.pane.HTML('<h2>{}</h2>'.format(text)), align='start')\n\n return title", "def print_title(title):\n print \"\\n\"+\"#\"*32+\"\\n# \"+title+\"\\n\"+\"#\"*32+\"\\n\"", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''", "def transform(text: str) -> str:\n return text.title()", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n # add prefix/suffix within the executable path:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' %\n (escape(path, treedoc.escDict), altText or url))\n return u'<br />'.join(results)", "def complete_alt_title(self, obj):\n return str(obj)", "def clean_title(\r\n title: str,\r\n mode: Literal[\"soft\", \"hard\", \"safe\"],\r\n allow_dot: bool = False,\r\n n: Optional[int] = None,\r\n) -> str:\r\n ...", "def text(self) -> str:", "def get_title(self):\n\n title = ''\n doc = self.article.doc\n\n title_element = self.parser.getElementsByTag(doc, tag='title')\n # no title found\n if title_element is None or len(title_element) == 0:\n return title\n\n # title elem found\n title_text = self.parser.getText(title_element[0])\n used_delimeter = False\n\n # split title with |\n if '|' in title_text:\n title_text = self.split_title(title_text, PIPE_SPLITTER)\n used_delimeter = True\n\n # split title with -\n if not used_delimeter and '-' in title_text:\n title_text = self.split_title(title_text, DASH_SPLITTER)\n used_delimeter = True\n\n # split title with »\n if not used_delimeter and u'»' in title_text:\n title_text = self.split_title(title_text, ARROWS_SPLITTER)\n used_delimeter = True\n\n # split title with :\n if not used_delimeter and ':' in title_text:\n title_text = self.split_title(title_text, COLON_SPLITTER)\n used_delimeter = True\n\n title = MOTLEY_REPLACEMENT.replaceAll(title_text)\n return title", "def print_title(title):\n\n print(\"\\n\" + title)\n print(\"=\" * len(title))", "def format_rich_quote(rich_text_quote):\n rich_text = format_rich_text(rich_text_quote)\n return \"> \" + \"\\n> \".join(rich_text.split(\"\\n\")) + \"\\n\"", "def SearchableText(self):\n ctool = getToolByName(self, 'portal_cpscalendar')\n if getattr(ctool, 'event_fulltext_index', False):\n return '%s %s' % (self.title, self.description)\n return ''", "def __str__(self):\n t = Template(\n \"\"\"\n <h4>$title</h4>\n $imgs\n $footnotes\n <hr/>\"\"\")\n # Return result.\n return t.substitute({\n \"title\": self.title,\n \"imgs\": self.render_images(),\n \"footnotes\": self.render_footnotes()\n })", "def visit_title_reference(self, node):\n self.body.append('\\\\emph{\\\\textbf{')", "def render(resolve_unicode,\n title_force_uppercase,\n msdos_eol_style,\n output_encoding,\n omit_fields=[]):", "def inject_title(self,template,title):\n return re.sub('TITLE',title,template)", "def format_screen(self,str):\n # Paragraph continue\n par_re = re.compile(r'\\\\$',re.MULTILINE)\n str = par_re.sub('',str)\n return str", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def title_content(label=\"A title\"):\n return {'label':label}", "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def title_p(self):\n self.run_command('title_p')", "def title(self):\n # Use the first line of the articles text as title, if not title\n # exists.\n title = self._text[:min(32, self._text.find(\"\\n\"))]\n return title", "def numbered_title(self):\n return f\"{self.title}\"", "def numbered_title(self):\n return f\"{self.title}\"", "def render_text(self):\n if self.text_type == 1:\n return markdown.markdown(self.text)\n else:\n return self.text", "def dumps(self):\n\n if not self.numbering:\n num = '*'\n else:\n num = ''\n\n string = Command(self.latex_name + num, self.title).dumps()\n string += '%\\n' + self.dumps_content()\n\n return string", "def processTitle(title):\n\n cleaned = re.sub(r'[@#\\\"]+', '', title.lower().strip())\n cleaned = re.sub(r'\\(\\d{4}.*\\)', '', cleaned)\n cleaned = re.sub(r':.+', '', cleaned).strip()\n return cleaned", "def show(self) -> str:\n return f'[{self.font}]{self.text}[{self.font}]' if self.font else self.text" ]
[ "0.6751716", "0.6623542", "0.6494783", "0.63471186", "0.6307567", "0.62159866", "0.6210506", "0.60685015", "0.6067435", "0.6066372", "0.6042128", "0.60192853", "0.59935206", "0.5980245", "0.59791267", "0.5953967", "0.59486204", "0.5939183", "0.59032184", "0.58724236", "0.58521456", "0.5838723", "0.58354163", "0.5834274", "0.5832575", "0.5830333", "0.5823236", "0.5819712", "0.5818831", "0.58183473", "0.5813447", "0.581237", "0.57893", "0.57773894", "0.57736665", "0.5759921", "0.5756203", "0.57513857", "0.5736763", "0.57218635", "0.57155806", "0.56936747", "0.56578994", "0.5652421", "0.565177", "0.56417406", "0.5639705", "0.5630354", "0.5623633", "0.5607862", "0.55989605", "0.5597861", "0.55936474", "0.55868113", "0.55762315", "0.55701536", "0.5561567", "0.5556922", "0.55473757", "0.5539643", "0.5532413", "0.5531824", "0.55130166", "0.54796785", "0.5477491", "0.5476842", "0.5473423", "0.5468229", "0.5464621", "0.54521966", "0.54459375", "0.5437748", "0.54370165", "0.5422997", "0.5415197", "0.5415197", "0.5399382", "0.53941417", "0.53889894", "0.53829426", "0.5382856", "0.535636", "0.53530765", "0.53529996", "0.5352499", "0.534711", "0.53337544", "0.53332794", "0.5332417", "0.533173", "0.53306735", "0.5330429", "0.53293246", "0.53273845", "0.5326961", "0.5326961", "0.5323916", "0.5316936", "0.53147906", "0.5314135" ]
0.5575392
55
Return tuple of text in edit format and bool validity, using edit format option
def formatEditText(self, storedText): for choice in self.splitText(storedText): if choice not in self.formatList: return (storedText, not storedText) return (storedText, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def reformat(ctx):\n pass", "def task_format_check():\n return {\"actions\": [[\"black\", HERE, \"--check\"]], \"verbosity\": 1}", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def formatted(s):\n matches = re.findall(_format_re, normalize(s))\n if len(matches) == 1 and matches[0][0] != '':\n return matches[0][0]\n def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):\n if txt_none != '':\n return FORMAT_NONE, txt_none\n elif txt_sw != '':\n return FORMAT_SW, txt_sw\n elif txt_rem != '':\n return FORMAT_REM, txt_rem\n elif txt_em != '':\n return FORMAT_EM, txt_em\n elif txt_a != '':\n return FORMAT_A, txt_a\n return [to_fmt(*m) for m in matches]", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def test_format_permissions_and_docstring(self):\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\"],\n {\"some\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"permission formatted string\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\"\n ),\n )\n\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\", \"another permission\"],\n {\"some\": \"docstring\", \"another\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"- permission formatted string\\n\"\n \"- another permission\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\\n\"\n \"- **another** : docstring\"\n ),\n )", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def text(value):\n return True", "def test_is_entry_formatted(self):\n\n valid_formats = test_case_data.get('valid_formats')\n for i, valid_entry in enumerate(test_case_data.get('valid_entries')):\n entry = [value.strip() for value in valid_entry.split(',')]\n format_fields = valid_formats[i].split(',')\n valid = self.parser._is_entry_formatted(entry, format_fields)\n self.assertTrue(valid, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [value.strip() for value in invalid_entry.split(',')]\n for f in valid_formats:\n format_fields = f.split(',')\n entry_dict = self.parser._is_entry_formatted(entry, format_fields)\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def _check_style(file_path, clang_format_bin):\n with open(file_path, 'r') as f:\n is_valid_header = f.read().startswith(CppFormatter.standard_header)\n\n cmd = [\n clang_format_bin,\n \"-style=file\",\n \"-output-replacements-xml\",\n file_path,\n ]\n result = subprocess.check_output(cmd).decode(\"utf-8\")\n if \"<replacement \" in result:\n is_valid_style = False\n else:\n is_valid_style = True\n return (is_valid_style, is_valid_header)", "def info(level):\n if level == 'basic':\n string = _(\"Basic markup\")\n text = _(\"Only basic text tags are available in this input field.\")\n elif level == 'rich':\n string = _(\"Rich markup\")\n text = _(\"Rich and basic text tags are available in this input field.\") \n elif level == 'full':\n string = _(\"Full markup\")\n text = _(\"Every tags are available in this input field.\") \n elif level == 'none':\n string = _(\"No markup\")\n text = _(\"No tags are available in this input field.\") \n\n if level != 'none':\n text = text + \" \" + _(\"Check the markup reminder in related documentation for a description of these tags.\")\n\n return '<span class=\"help\" title=' + quoteattr(text) \\\n + '><img src=\"' + settings.STATIC_MEDIA_URL \\\n + 'images/savane/common/misc.default/edit.png' \\\n + ' border=\"0\" class=\"icon\" alt=\"\" />' \\\n + string + '</span>'", "def human_readable(self):\n if self.no_flags_set():\n return \"no flags set\"\n else:\n flag_desc = []\n for name in (\"bookmarked\", \"for_validation\", \"candidate\", \"final causative\"):\n if getattr(self, \"flag_%s\" % name.replace(\" \", \"_\")):\n flag_desc.append(name)\n for name in (\"visual\", \"validation\", \"molecular\", \"phenotype_match\", \"summary\"):\n field = getattr(self, \"flag_%s\" % name)\n if field and field != \"empty\":\n flag_desc.append(\"%s rating is %s\" % (name.split(\"_\")[0], field))\n return \", \".join(flag_desc)", "def fancyString(inVal, correctOutput, funcOutput):\r\n checkCorrect = \"Correct = \" + u'\\u2713'*(funcOutput == correctOutput) + 'X'*(funcOutput != correctOutput)\r\n # Check mark code from site below:\r\n # https://stackoverflow.com/questions/16676101/print-the-approval-sign-check-mark-u2713-in-python\r\n return \"Input(s) = {:<15} Output = {:<25} Your Output = {:<35} \".format(str(inVal), str(correctOutput), str(funcOutput)) + checkCorrect", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))", "def parse_edits(line):\n \n # Get edits inside brackets allowing parentheses\n edit_strings = re.findall(r'\\((.*?)\\)[,\\]]', line)\n \n edits = []\n for edit_string in edit_strings:\n # Splitting by comma is not enough. Some of the edits actually fix spacing\n # when commas are used, so we may can't use edit_string.split(', ')\n # For some reason, the unicode gold strings are enclosed in a list\n m = re.match(r'^(\\d+), (\\d+), (.*), \\[?(.*)\\]?$', edit_string)\n edit_items = [m.group(i) for i in range(1, 5)]\n \n # No way to handle this in regex\n if edit_items[3].endswith(']'):\n edit_items[3] = edit_items[3][:-1]\n \n # Cast the indices\n edit_items[0] = int(edit_items[0])\n edit_items[1] = int(edit_items[1])\n \n # Convert unicode-string-inside-string into actual unicode string\n edit_items[2] = codecs.decode(edit_items[2][2:-1], 'unicode_escape') or 'NIL'\n edit_items[3] = codecs.decode(edit_items[3][2:-1], 'unicode_escape') or 'NIL'\n \n edits.append(edit_items)\n \n return edits", "def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text", "def show_line_edit_dialog(text):\n dialog = QDialog()\n interface = lineEditEntryGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True, str(interface.lineEdit.text())\n else:\n return False, \"\"", "def as_tuple(self, selected: bool) -> tuple:\n if self.check_ans:\n current = self.num.ljust(2) + self.answer\n else:\n current = self.num.ljust(2)+self.current.rjust(1)\n return ((\n \"fg:\"+self.style_fg + \" bg:\" # apply foreground color\n + (self.style_bg_selected if selected else self.style_bg),\n current\n if self.answer != \".\" else \"▐\" + \"█\" + \"▌\"), ('bg:#fefefe fg:#000', \"│\"))", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def hints(s):\n if s == 'hello':\n # string, color, bold\n return (' World', 35, False)\n return None", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_data_from_nonformat_text():\n pass", "def FormatYesNo(value):\n if value:\n return u'Yes'\n else:\n return u'No'", "def validateWorkFormat(format):\n\n if not(format):\n return \"You must select a work format.\"", "def rich(text):\n return full(text, False)", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def change_prompt_format(self, arg, **_):\n if not arg:\n message = 'Missing required argument, format.'\n return [(None, None, None, message)]\n\n self.prompt_format = self.get_prompt(arg)\n return [(None, None, None, \"Changed prompt format to %s\" % arg)]", "def test_command_edit_info_boolean_flags():\n def f(inputfile):\n with tempfile.NamedTemporaryFile() as tmp:\n shutil.copy(inputfile, tmp.name)\n\n for flag in (\"write_protected\", \"synchronized\", \"cleaned\"):\n for true_value, false_value in ((\"1\", \"0\"),\n (\"yes\", \"no\"),\n (\"YES\", \"No\"),\n (\"true\", \"false\"),\n (\"tRuE\", \"FaLsE\")):\n wozardry.parse_args([\"edit\", \"-i\", \"%s:%s\" % (flag, true_value), tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[flag] == True\n wozardry.parse_args([\"edit\", \"-i\", \"%s:%s\" % (flag, false_value), tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[flag] == False\n f(kValid1)\n f(kValid2)", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def get_help_text(self):\n requirements = \"Your password must contain at least: {min_length} \" \\\n \"character(s), {min_uppercase} uppercase letter(s), \" \\\n \"{min_lowercase} lowercase letter(s) and \" \\\n \"{min_digits} digit(s). \".format(\n min_length=self.min_length,\n min_uppercase=self.min_uppercase,\n min_lowercase=self.min_lowercase,\n min_digits=self.min_digits)\n return requirements", "def help_for(self, flag: str) -> Tuple[str, str]:\n # Obtain arg obj\n if flag not in self.flags:\n err = \"{!r} is not a valid flag for this context! Valid flags are: {!r}\" # noqa\n raise ValueError(err.format(flag, self.flags.keys()))\n arg = self.flags[flag]\n # Determine expected value type, if any\n value = {str: \"STRING\", int: \"INT\"}.get(arg.kind)\n # Format & go\n full_names = []\n for name in self.names_for(flag):\n if value:\n # Short flags are -f VAL, long are --foo=VAL\n # When optional, also, -f [VAL] and --foo[=VAL]\n if len(name.strip(\"-\")) == 1:\n value_ = (\"[{}]\".format(value)) if arg.optional else value\n valuestr = \" {}\".format(value_)\n else:\n valuestr = \"={}\".format(value)\n if arg.optional:\n valuestr = \"[{}]\".format(valuestr)\n else:\n # no value => boolean\n # check for inverse\n if name in self.inverse_flags.values():\n name = \"--[no-]{}\".format(name[2:])\n\n valuestr = \"\"\n # Tack together\n full_names.append(name + valuestr)\n namestr = \", \".join(sorted(full_names, key=len))\n helpstr = arg.help or \"\"\n return namestr, helpstr", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def __check_format(node, lint_ctx, profile: str, allow_ext=False):\n if \"format_source\" in node.attrib and (\"ext\" in node.attrib or \"format\" in node.attrib):\n lint_ctx.warn(\n f\"Tool {node.tag} output '{node.attrib.get('name', 'with missing name')}' should use either format_source or format/ext\",\n node=node,\n )\n if \"format_source\" in node.attrib:\n return True\n if node.find(\".//action[@type='format']\") is not None:\n return True\n # if allowed (e.g. for discover_datasets), ext takes precedence over format\n fmt = None\n if allow_ext:\n fmt = node.attrib.get(\"ext\")\n if fmt is None:\n fmt = node.attrib.get(\"format\")\n if fmt == \"input\":\n message = f\"Using format='input' on {node.tag} is deprecated. Use the format_source attribute.\"\n if Version(str(profile)) <= Version(\"16.01\"):\n lint_ctx.warn(message, node=node)\n else:\n lint_ctx.error(message, node=node)\n\n return fmt is not None", "def get_opt_formatted(self, command):\n if \"formatted\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"formatted\"]\n else:\n return CommandDict.DEFAULT_OPT_FORMATTED", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def verify_diagnostics_and_usage_text():\r\n msg, status = \"\", True\r\n try:\r\n sleep(10)\r\n if g.platform == 'android':\r\n sleep(3)\r\n 'verify end user license agreement label'\r\n flag1,msg = element_textvalidation('Diagnostics_usage_lbl','Diagnostics and Usage')\r\n sleep(4) \r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_Diagnostcs_nd_usage_txt)\r\n 'verify end user license agreement label'\r\n flag2,msg = element_textvalidation('Diagnostics_usage_txt',text_to_verify)\r\n \r\n flag = False if not (flag1 and flag2) else True\r\n else:\r\n\r\n \r\n 'Verify diagnostics usage text'\r\n flag1 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_lbl'))\r\n 'Verify diagnostics usage text'\r\n flag2 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_txt'))\r\n \r\n sleep(4) \r\n \r\n flag = False if not (flag1 and flag2) else True\r\n if not flag:\r\n return False, msg\r\n else:\r\n print \"License agreement screen name is displayed properly\"\r\n \r\n \r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return False, msg\r\n return True, msg", "def _format_action(self, action):\n parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)\n if action.nargs == argparse.PARSER:\n parts = \"\\n\".join(parts.split(\"\\n\")[1:])\n return parts", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def _engine_option_string_and_comment(option: engine.Option, value: engine.ConfigValue) -> Tuple[str, str]:\n if value is None:\n value = ''\n name_equals_val = f'{option.name}={value}'\n if option.type == 'check' or option.type == 'string' or option.type == 'button':\n return (name_equals_val, f'type={option.type}')\n if option.type == 'spin':\n return (name_equals_val, f'type=spin, min={option.min}, max={option.max}')\n if option.type == 'combo':\n return (name_equals_val, f'type=combo, var={option.var}')\n return (name_equals_val, 'type=unknown')", "def TEXT(number, format_type):\n raise NotImplementedError()", "def test_form_help_text_is_correct(self):\n # https://stackoverflow.com/questions/24344981/how-to-change-help-\n # text-of-a-django-form-field\n\n # Above link helped figure out how to access help_text.\n self.assertEqual(\n self.form.fields[\"texture\"].help_text,\n \"One word descriptions seperated by commas.\",\n )", "def __verify_plot_options(self, options_str):\n default_line = '-'\n default_marker = ''\n default_colour = 'k'\n\n # Split str into chars list\n options_split = list(options_str)\n\n # If 0, set defaults and return early\n if len(options_split) == 0:\n return [default_line, default_marker, default_colour]\n\n # If line_style given, join the first two options if applicable\n # (some types have 2 characters)\n for char in range(0, len(options_split) - 1):\n # If char is '-' (only leading character in double length option)\n if options_split[char] == '-' and len(options_split) > 1:\n # If one of the leading characters is valid\n if options_split[char + 1] == '-' or \\\n options_split[char + 1] == '.':\n # Join the two into the first\n options_split[char] = options_split[char] \\\n + options_split[char + 1]\n # Shuffle down the rest\n for idx in range(char + 2, len(options_split)):\n options_split[idx - 1] = options_split[idx]\n # Remove duplicate extra\n options_split.pop()\n\n # If any unknown, throw error\n for option in options_split:\n if option not in self.__line_styles and \\\n option not in self.__marker_styles and \\\n option not in self.__colour_styles:\n error_string = \"Unknown character entered: '{0}'\"\n raise ValueError(error_string.format(option))\n\n ##############################\n # Verify Line Style\n ##############################\n line_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n line_style_index = 0\n for option in options_split:\n if option in self.__line_styles:\n line_style_count = line_style_count + 1\n line_style_index = self.__line_styles.index(option)\n\n # If more than one, throw error\n if line_style_count > 1:\n raise ValueError(\n \"Too many line style arguments given. Only one allowed\")\n # If none, set as solid\n elif line_style_count == 0 or not any(\n item in options_split for item in self.__line_styles):\n output_line = default_line\n # If one, set as given\n else:\n output_line = self.__line_styles[line_style_index]\n ##############################\n\n ##############################\n # Verify Marker Style\n ##############################\n marker_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n marker_style_index = 0\n for option in options_split:\n if option in self.__marker_styles:\n marker_style_count = marker_style_count + 1\n marker_style_index = self.__marker_styles.index(option)\n\n # If more than one, throw error\n if marker_style_count > 1:\n raise ValueError(\n \"Too many marker style arguments given. Only one allowed\")\n # If none, set as no-marker\n elif marker_style_count == 0 or not any(\n item in options_split for item in self.__marker_styles):\n output_marker = default_marker\n # If one, set as given\n else:\n output_marker = self.__marker_styles[marker_style_index]\n # If marker set and no line given, turn line to no-line\n if line_style_count == 0 or not any(\n item in options_split for item in self.__line_styles):\n output_line = ''\n ##############################\n\n ##############################\n # Verify Colour Style\n ##############################\n colour_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n colour_style_index = 0\n for option in options_split:\n if option in self.__colour_styles:\n colour_style_count = colour_style_count + 1\n colour_style_index = self.__colour_styles.index(option)\n\n # If more than one, throw error\n if colour_style_count > 1:\n raise ValueError(\n \"Too many colour style arguments given. Only one allowed\")\n # If none, set as black\n elif colour_style_count == 0 or not any(\n item in options_split for item in self.__colour_styles):\n output_colour = default_colour\n # If one, set as given\n else:\n output_colour = self.__colour_styles[colour_style_index]\n ##############################\n\n return [output_line, output_marker, output_colour]", "def reformat():\n toolkit.reformat()", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def validate_format(self):\n raise NotImplementedError()", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def _validate_performatives(performative: str) -> Tuple[bool, str]:\n # check performative is not a reserved name\n if _is_reserved_name(performative):\n return (\n False,\n \"Invalid name for performative '{}'. This name is reserved.\".format(\n performative,\n ),\n )\n\n # check performative's format\n if not _is_valid_regex(PERFORMATIVE_REGEX_PATTERN, performative):\n return (\n False,\n \"Invalid name for performative '{}'. Performative names must match the following regular expression: {} \".format(\n performative, PERFORMATIVE_REGEX_PATTERN\n ),\n )\n\n return True, \"Performative '{}' is valid.\".format(performative)", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def _format (color, style=''):\n _format = QtGui.QTextCharFormat()\n if color != '':\n _format.setForeground(getattr(QtCore.Qt, color))\n if 'bold' in style:\n _format.setFontWeight(QtGui.QFont.Bold)\n if 'italic' in style:\n _format.setFontItalic(True)\n return _format", "def get_replacement():\n run_linter_throw(\"path/to/file\",\n \"{s}\\n{m} Text{e}\",\n style,\n whitelist=[\"headerblock/desc_space\"])", "def edit_form_entry_help_text_extra(cls):\n return \"\"\"\n <ul class=\"{container_class}\">\n {edit_option_html}\n <li><a href=\"{delete_url}\">\n <span class=\"{delete_option_class}\"></span> {delete_text}</a>\n </li>\n </ul>\n <input type=\"hidden\" value=\"{form_element_position}\"\n name=\"form-{counter}-position\"\n id=\"id_form-{counter}-position\"\n class=\"form-element-position\">\n <input type=\"hidden\" value=\"{form_element_pk}\"\n name=\"form-{counter}-id\" id=\"id_form-{counter}-id\">\n \"\"\".format(\n container_class=cls.form_list_container_class,\n edit_option_html=\"{edit_option_html}\",\n delete_url=\"{delete_url}\",\n delete_option_class=cls.form_delete_form_entry_option_class,\n delete_text=\"{delete_text}\",\n form_element_position=\"{form_element_position}\",\n counter=\"{counter}\",\n form_element_pk=\"{form_element_pk}\",\n )", "def extension (formatStr):\n assert False, \"TODO:\"", "def _generateReadOnly(self, obj, **args):\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'readonly'\n if self._script.utilities.isReadOnlyTextArea(obj):\n result.append(self._script.formatting.getString(**args))\n return result", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def is_valid(self):\r\n for lineedit in self.lineedits:\r\n if lineedit in self.validate_data and lineedit.isEnabled():\r\n validator, invalid_msg = self.validate_data[lineedit]\r\n text = to_text_string(lineedit.text())\r\n if not validator(text):\r\n QMessageBox.critical(self, self.get_name(),\r\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\r\n QMessageBox.Ok)\r\n return False\r\n return True", "def test_incorrect_format_1(self):\n changelog = changelog_temp.format(\n before_changelog=\"## [Not yet released]\\n\\n### Added\\n\\n- Added a new feature\\n\"\n )\n with pytest.raises(ValueError):\n parse_changelog(changelog)", "def test_match_entry_to_format(self):\n\n # matches valid entries with valid formats\n for valid_entry in test_case_data.get('valid_entries'):\n entry = [e.strip() for e in valid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertTrue(entry_dict, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [e.strip() for e in invalid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def _format_answer(self, text):\n text = str(text).replace('\\n', ' ')\n answer_width = 70\n pretty_text = '\\n\\t'.join(textwrap.wrap(text, answer_width))\n\n return pretty_text", "def is_text_editable(path):\n return False", "def flags(self, f):\n if f.is_inlined:\n return \" (inlined)\"\n return \"\"", "def text_to_display(level):\n if level == \"html\":\n return html_answers, html_text\n elif level == \"css\":\n return css_answers, css_text\n elif level == \"python\":\n return python_answers, python_text", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def editorForTyp(typ):\n\n if typ == \"quint32\":\n return (\"QSpinBox\", \"setValue\", \"value\")\n elif typ == \"QString\":\n return (\"QLineEdit\", \"setText\", \"text\")\n elif typ == \"bool\":\n return (\"QCheckBox\", \"setChecked\", \"isChecked\")\n return (None, None, None)", "def show_fields(*fields):\n\n fields = filter( lambda x: x, fields )\n target_len = max( len(name) for name, value in fields ) + 2\n for name, value in fields:\n line = name + ':' + \" \" * (target_len - len(name))\n if type(value) == bool:\n line += color_text(\"Yes\", 'green') if value else color_text(\"No\", 'red')\n else:\n line += str(value)\n print line", "def testCheckRequiredFormat(self):\n plugin = gdrive_synclog.GoogleDriveSyncLogTextPlugin()\n\n file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()\n file_system_builder.AddFile('/file.txt', (\n b'2018-01-24 18:25:08,454 -0800 INFO pid=2376 7780:MainThread '\n b'logging_config.py:295 OS: Windows/6.1-SP1\\n'))\n\n file_entry = file_system_builder.file_system.GetFileEntryByPath('/file.txt')\n\n parser_mediator = self._CreateParserMediator(None, file_entry=file_entry)\n\n file_object = file_entry.GetFileObject()\n text_reader = text_parser.EncodedTextReader(file_object)\n text_reader.ReadLines()\n\n result = plugin.CheckRequiredFormat(parser_mediator, text_reader)\n self.assertTrue(result)", "def formatted(self) -> str:\r\n ...", "def text_editor():\n return True", "def ISTEXT(value):\n return isinstance(value, (basestring, AltText))", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text", "def validate(self, txt, pos):\n state, rpos = qt.QDoubleValidator.validate(self, txt, pos)\n if txt.length() == 0:\n state = qt.QValidator.Acceptable\n return state, rpos" ]
[ "0.74409115", "0.7369682", "0.7101609", "0.7101609", "0.7035388", "0.7028556", "0.6883685", "0.6550002", "0.6452589", "0.6391841", "0.6309491", "0.6299821", "0.61874855", "0.60250795", "0.583455", "0.5770457", "0.573678", "0.5614053", "0.5609872", "0.56041586", "0.55477756", "0.53100324", "0.5299071", "0.5209894", "0.5205097", "0.5205097", "0.5205097", "0.5205097", "0.5205097", "0.5200622", "0.5172154", "0.51652354", "0.5159618", "0.51195395", "0.5115454", "0.5094424", "0.50928664", "0.50774634", "0.5076837", "0.506709", "0.50559866", "0.5048865", "0.50116587", "0.50006783", "0.4988621", "0.49727595", "0.49726328", "0.4945682", "0.49346167", "0.49305907", "0.49302247", "0.49205834", "0.4899774", "0.48946813", "0.4889593", "0.48848787", "0.48710102", "0.48603994", "0.485448", "0.4853418", "0.48492938", "0.4849078", "0.48415923", "0.48410097", "0.48397195", "0.48287314", "0.48252425", "0.4824147", "0.48218486", "0.48204252", "0.4820401", "0.48190412", "0.48128283", "0.48080114", "0.48075286", "0.4801114", "0.48006943", "0.4796574", "0.47940776", "0.47910094", "0.47794297", "0.4775982", "0.47566742", "0.47563574", "0.47514576", "0.47491467", "0.4748313", "0.47368434", "0.4736663", "0.47360152", "0.47345802", "0.4733563", "0.47324613", "0.47292554", "0.4723963", "0.47194177", "0.47147244", "0.47145414", "0.4712799", "0.47122368" ]
0.71582556
2
Return tuple of stored text from edited text and bool validity, using edit format option
def storedText(self, editText): choices, valid = self.sortedChoices(editText) if valid: return (self.editSep.join(choices), True) else: return (editText, not editText and not self.isRequired)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def get_data_from_nonformat_text():\n pass", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def on_edit(self, event, text):\n return None", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text", "def reformat(ctx):\n pass", "def main(text,result=\"latex\",check_text=True,index_project='',file_name=''): # TODO detect and make a warning for genuine latex marks\n if isinstance(text,str):\n text = text.split('\\n')\n \n if check_text:\n check(text)\n \n print(text)\n \n ### managing placeholders\n text = parsers['v'].main(text)\n \n ### saving names\n if index_project:\n indexer.parse(text,index_project,file_name)\n \n \n for i in range(len(text)):\n line = text[i]\n ### managing end of line\n line = line.replace(\" ,,\",\"\\\\\\\\\")\n \n while line.count(opening_mark):\n first_part, mark, late_part = line.partition(',;')\n if not late_part:\n break\n late_part, text = parsers[late_part[0]].main(late_part = late_part,\n text=text,\n result=result,\n line_nb = i)\n line = first_part + late_part\n text[i] = line\n \n return '\\n'.join(text)", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def get_edit_text(self):\n # grab edit page\n response = self._get_page('edit.php')\n html = response.text\n # parse out existing plan\n soup = bs4.BeautifulSoup(html, 'html5lib')\n plan = soup.find('textarea')\n if plan is None:\n raise PlansError(\"Couldn't get edit text, are we logged in?\")\n else:\n plan = u'' + plan.contents[0]\n # prepending the empty string somehow prevents BS from\n # escaping all the HTML characters (weird)\n assert type(plan) == str\n # convert to CRLF line endings\n plan = convert_endings(plan, 'CRLF')\n # parse out plan md5\n md5sum = soup.find('input',\n attrs={'name': 'edit_text_md5'}).attrs['value']\n # also, explicitly compute the hash, for kicks\n assert md5sum == plans_md5(plan)\n # verify that username has not changed\n assert self.username == self.parser.username\n return plan, md5sum", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def parse_edits(line):\n \n # Get edits inside brackets allowing parentheses\n edit_strings = re.findall(r'\\((.*?)\\)[,\\]]', line)\n \n edits = []\n for edit_string in edit_strings:\n # Splitting by comma is not enough. Some of the edits actually fix spacing\n # when commas are used, so we may can't use edit_string.split(', ')\n # For some reason, the unicode gold strings are enclosed in a list\n m = re.match(r'^(\\d+), (\\d+), (.*), \\[?(.*)\\]?$', edit_string)\n edit_items = [m.group(i) for i in range(1, 5)]\n \n # No way to handle this in regex\n if edit_items[3].endswith(']'):\n edit_items[3] = edit_items[3][:-1]\n \n # Cast the indices\n edit_items[0] = int(edit_items[0])\n edit_items[1] = int(edit_items[1])\n \n # Convert unicode-string-inside-string into actual unicode string\n edit_items[2] = codecs.decode(edit_items[2][2:-1], 'unicode_escape') or 'NIL'\n edit_items[3] = codecs.decode(edit_items[3][2:-1], 'unicode_escape') or 'NIL'\n \n edits.append(edit_items)\n \n return edits", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_edits(text):\n edit_p = re.compile(\"(?P<open><edit.*?>)(?P<inner>.*?)(?P<close></edit>)\")\n corr_p = re.compile(\"<corrections>.*?</corrections>\")\n edits = []\n\n offset = 0\n\n for m in re.finditer(edit_p, text):\n # Make an edit object\n edit_text = \"\".join(m.groups())\n edit = ET.XML(m.group(0))\n\n # Set the bounds of the original text and adjust offset\n inner_string = m.group('inner') \n start = m.start() - offset\n corr_m = re.search(corr_p, inner_string)\n \n if corr_m: # Replacement/insertion have a correction\n offset += len(corr_m.group(0)) \n \n if not inner_string.startswith(\"<empty/>\"):\n end = start + corr_m.start()\n else:\n offset += len(\"<empty/>\") # It is \"\" in plain text\n end = start\n else:\n # Deletions may not have a correction\n if not inner_string.startswith(\"<empty/>\"):\n end = start + len(inner_string)\n else: # Unspecified error <empty/> is \"\" in plain text\n end = start\n offset += len(inner_string)\n\n\n edit.set(\"start\", \"%d\" % start) \n edit.set(\"end\", \"%d\" % end)\n\n offset += len(m.group('open')) + len(m.group('close'))\n \n\n # Make the original text a subelement of <edit>\n # Original text may be a string or <empty/> element.\n original = ET.SubElement(edit, \"original\")\n \n if edit.text:\n original.text = edit.text\n edit.text = \"\"\n else:\n empty = edit.find('empty')\n \n try:\n edit.remove(empty)\n original.append(empty)\n except Exception as e:\n pass\n \n edits.append(edit)\n\n return edits", "def refang(self, text: str):", "def edit_text(self, _, val):\n t_edit = text_editor.TextEditor(val, \"value\")\n t_edit.execute = self.execute", "def handle_data(self, text):\n if self.bankacctfrom:\n if self.bankid:\n self.compte['banque'] = text.strip()\n self.bankid = False\n if self.branchid:\n self.compte['guichet'] = text.strip()\n self.branchid = False\n if self.acctid:\n self.compte['compte'] = text.strip()\n self.acctid = False\n if self.banktranlist:\n if self.stmttrn:\n if self.dtposted:\n self.ecriture_tmp['date'] = datetime.strptime(text.strip(), \"%Y%m%d\")\n self.dtposted = False\n if self.trnamt:\n self.ecriture_tmp['montant'] = locale.atof(text.strip())\n self.trnamt = False\n if self.trntype:\n self.ecriture_tmp['type'] = text.strip()\n self.trntype = False\n if self.name:\n self.ecriture_tmp['name'] = text.strip()\n self.name = False\n if self.memo:\n self.ecriture_tmp['memo'] = text.strip()\n self.memo = False", "def get_mark(text, short):\n\n line = text.readline()\n\n # check that the line begins with a valid entry type\n if not short and not re.match(r'^\\s*(text|mark) = \"', line):\n raise ValueError('Bad entry: ' + line)\n\n # read until the number of double-quotes is even\n while line.count('\"') % 2:\n next_line = text.readline()\n\n if not next_line:\n raise EOFError('Bad entry: ' + line[:20] + '...')\n\n line += next_line\n if short:\n pattern = r'^\"(.*?)\"\\s*$'\n else:\n pattern = r'^\\s*(text|mark) = \"(.*?)\"\\s*$'\n entry = re.match(pattern, line, re.DOTALL)\n\n return entry.groups()[-1].replace('\"\"', '\"')", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def getText(self):", "def get_text(text_input):\r\n return text_input", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def rich(text):\n return full(text, False)", "def text(value):\n return True", "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def edit():", "def get_text_editor_input(initial_msg):\n EDITOR = os.environ.get('EDITOR', 'vi')\n CROP_MARK = ('\\n\\nAnything above this line will be ignored:\\n' +\n ('-' * 34) + '>8' + ('-' * 34) + '\\n')\n\n wrapper = TextWrapper(replace_whitespace=False, drop_whitespace=False)\n initial_msg = '\\n'.join(wrapper.wrap(initial_msg))\n initial_msg += CROP_MARK\n\n with tempfile.NamedTemporaryFile(suffix='.md') as temp:\n temp.write(initial_msg.encode('utf-8'))\n temp.flush() # Write buffer to the file\n subprocess.call([EDITOR, temp.name])\n\n # The pointer was already after the initial message, but we return to\n # the beginning just in case the user added content before the mark\n temp.seek(0)\n return temp.read().decode('utf-8').split(CROP_MARK, 1)[1].strip()", "def text_example():\n \n text_store = \"01000001011000010010000001000010011000100000110100001010001100010011001000110011\"\n text.delete('1.0', tk.END) \n text.insert(tk.END, text_store) \n box=tk.Tk()\n m = tk.Message(box, text=\"You should be able to save this file and open it in a text editor like Notepad or Nano to read it. If you edit the values you may find it does not display properly as text. Unchanged, it should be interpreted by a text editor as:\\n\\nAa Bb\\n123\\n\\nAs the file was made on a Windows machines you may find other systems display the line breaks differently.\")\n m.config(padx=50, pady=50, width=350)\n m.pack()", "def is_text_editable(path):\n return False", "def show_line_edit_dialog(text):\n dialog = QDialog()\n interface = lineEditEntryGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True, str(interface.lineEdit.text())\n else:\n return False, \"\"", "def on_text(self, instance, value):\n if not EVENTS['IS_OBJ']:\n EVENTS['EDITOR_SAVED'] = False\n\n if value:\n self.valid_text = True\n EVENTS['IS_RAM_EMPTY'] = False\n else:\n self.valid_text = False", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def edit_once(self, text):\n return self._edit_engine(text, break_on_success=True)", "def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text", "def edit_google_vision_text(self,text):\n s1=text\n try:\n log_info(\"Correcting google vision text to remove extra spacing\",MODULE_CONTEXT)\n i=0\n while(i<len(text)):\n s1=text\n if text[i] in [\"/\",\"।\",'।' ,':','|',\",\" ,'०',\"]\",\"-\",\")\",\"}\"] and text[i-1]==\" \": \n text=text[:i-1]+text[i:]\n if i > 0 :\n if text[i-1] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i]==\" \":\n text=text[:i]+text[i+1:]\n elif text[i] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i+1]==\" \":\n text=text[:i+1]+text[i+2:]\n i=i+1\n except Exception as e:\n log_exception(\"Exception while correcting google vision text\", MODULE_CONTEXT, e)\n return s1\n return text", "def process_text(self, text, language):", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def get_row_input_text(self, row_idx):\n return self.row_items[row_idx][1].get()", "def updateText(widget,text,format=''):\n # autorecognition\n if format not in ['plain','html','rest']:\n if type(text) is str and text.startswith('..'):\n format = 'rest'\n\n # conversion\n if format == 'rest' and pf.options.rst2html:\n html = utils.rst2html(text)\n if html[:10] == text[:10]:\n #print \"CONVERSION TO HTML FAILED\"\n text += \"\\n\\nNote: This reStructuredText is displayed as plain text because it could not be converted to html. If you install python-docutils, you will see this text (and other pyFormex messages) in a much nicer layout!\\n\"\n else:\n text = html\n\n # We leave the format undefined, because we are not sure\n # that the conversion function (docutils) is available\n # and always produces good results\n format = ''\n\n if format == 'plain':\n widget.setPlainText(text)\n elif format == 'html':\n widget.setHtml(text)\n else:\n # As a last rescue, try QT4's autorecognition\n widget.setText(text)", "def modified_flag(self, event):\n text = self.get_current()\n text.modified = 1", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def get_text_from_editor():\n with tempfile.NamedTemporaryFile(suffix='.tmp', mode='w+t') as f:\n # Create a temporary file with instructions on describing bug\n f.write(message + '\\n\\n')\n f.flush()\n # Open the editor and allow the user to type\n editor = os.environ.get('EDITOR', 'vim')\n subprocess.call([editor, f.name])\n # Read and clean the file\n f.seek(0)\n text = ''.join([line.lstrip() for line in f.readlines()\n if line and not line.lstrip().startswith('#')])\n return '\\n'.join(textwrap.wrap(text, width=100))", "def storeTextEditValue(self):\n\n\t\tcategory, attr = self.getWidgetMeta(self.sender())\n\t\tvalue = self.sender().toPlainText()\n\t\tself.storeValue(category, attr, value)", "def _editorText(self):\n if self.__lineEditKind:\n return self._editor.text()\n else:\n return self._editor.currentText()", "def _hidden_in_unicode(self, txt):", "def fix_document(key, value, _format, _meta):\n if key == \"Link\":\n url = value[2][0]\n if url.startswith(\"user-manual\") or url.startswith(\"developers-guide\"):\n # Return the link text\n return value[1]\n # Reformat the text inside block quotes\n elif key == \"BlockQuote\":\n try:\n first_string = value[0][\"c\"][0][\"c\"]\n if first_string == \"[!NOTE]\":\n value[0][\"c\"][0] = Strong([Str(\"Note:\")])\n return BlockQuote(value)\n elif first_string == \"[!INFO]\":\n value[0][\"c\"][0] = Strong([Str(\"Info:\")])\n return BlockQuote(value)\n elif first_string == \"[!TIP]\":\n value[0][\"c\"][0] = Strong([Str(\"Tip:\")])\n return BlockQuote(value)\n elif first_string == \"[!WARNING]\":\n value[0][\"c\"][0] = Strong([Str(\"Warning:\")])\n return BlockQuote(value)\n elif first_string == \"[!ATTENTION]\":\n value[0][\"c\"][0] = Strong([Str(\"Attention:\")])\n return BlockQuote(value)\n except Exception:\n return\n return", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def edit_type(self, candidate, word):\n edit = [False] * 4\n correct = \"\"\n error = \"\"\n replaced = ''\n replacer = ''\n for i in range(min([len(word), len(candidate)]) - 1):\n if candidate[0:i + 1] != word[0:i + 1]: # inconsistency in the first (i + 1) characters of the two strings\n if candidate[i:] == word[i - 1:]:\n edit[1] = True # deletion\n correct = candidate[i - 1] # candidate[i - 1] is deleted and we get word\n error = ''\n replaced = candidate[i - 2]\n replacer = candidate[i - 2] + candidate[i - 1]\n break\n elif candidate[i:] == word[i + 1:]:\n edit[0] = True # insertion\n correct = ''\n error = word[i] # word[i] is redundant\n if i == 0:\n replacer = '@'\n replaced = '@' + error\n else:\n replacer = word[i - 1]\n replaced = word[i - 1] + error\n break\n if candidate[i + 1:] == word[i + 1:]:\n edit[2] = True # substitution\n correct = candidate[i]\n error = word[i]\n replaced = error\n replacer = correct\n break\n if candidate[i] == word[i + 1] and candidate[i + 2:] == word[i + 2:]:\n edit[3] = True # transposition\n correct = candidate[i] + candidate[i + 1]\n error = word[i] + word[i + 1]\n replaced = error\n replacer = correct\n break\n\n # string inversion\n candidate = candidate[::-1]\n word = word[::-1]\n\n for i in range(min([len(word), len(candidate)]) - 1):\n if candidate[0:i + 1] != word[0:i + 1]:\n if candidate[i:] == word[i - 1:]:\n edit[1] = True\n correct = candidate[i - 1]\n error = ''\n replaced = candidate[i - 2]\n replacer = candidate[i - 2] + candidate[i - 1]\n break\n elif candidate[i:] == word[i + 1:]:\n correct = ''\n error = word[i]\n if i == 0:\n replacer = '@'\n replaced = '@' + error\n else:\n replacer = word[i - 1]\n replaced = word[i - 1] + error\n edit[0] = True\n break\n if candidate[i + 1:] == word[i + 1:]:\n edit[2] = True\n correct = candidate[i]\n error = word[i]\n replaced = error\n replacer = correct\n break\n if candidate[i] == word[i + 1] and candidate[i + 2:] == word[i + 2:]:\n edit[3] = True\n correct = candidate[i] + candidate[i + 1]\n error = word[i] + word[i + 1]\n replaced = error\n replacer = correct\n break\n\n if word == candidate:\n return \"None\", '', '', '', ''\n if edit[0]:\n return EDIT_TYPE_INSERTION, correct, error, replaced, replacer\n elif edit[1]:\n return EDIT_TYPE_DELETION, correct, error, replaced, replacer\n elif edit[2]:\n return EDIT_TYPE_SUBSTITUTION, correct, error, replaced, replacer\n elif edit[3]:\n return EDIT_TYPE_TRANSPOSITION, correct, error, replaced, replacer", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def text_editor():\n return True", "def task_format_check():\n return {\"actions\": [[\"black\", HERE, \"--check\"]], \"verbosity\": 1}", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def stepText2Changed(build, step, text2):", "def retrieve_input():\r\n inputValue = simpleText.get(\"1.0\",\"end-1c\") #Our Variable\r\n #\"1.0\" = start from first character in the text widget\r\n #\"end-1c = delete the last character that Text creates every time\"\r\n return inputValue", "def on_idEdit_textChanged(self, txt):\n self.__generateDefaultCommitMessage()\n self.__updateOK()", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def element_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier))\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def reformat():\n toolkit.reformat()", "def process_raw_text(self, file_name, column_side):\n self.mvc_check()\n\n model_txt = None\n if column_side == LEFT_TEXT:\n model_txt = self.model.txt1\n elif column_side == RIGHT_TEXT:\n model_txt = self.model.txt2\n\n model_txt.open_raw(file_name)\n model_txt.process_raw()\n self.opened_txt[column_side] = True\n self.can_align = self.opened_txt[LEFT_TEXT] and self.opened_txt[RIGHT_TEXT]\n\n # Goldsmith\n model_txt.make_trie(column_side)\n model_txt.apply_goldsmith(1.1, 20, column_side)\n\n # Associate word for alignment if both text were opened\n if self.can_align:\n for view in self.views:\n view.end_task()\n view.change_task(\"Associating words\")\n self.model.associate_words(1.5)\n for view in self.views:\n view.end_task()\n\n # TODO : coherent saving to database using model.save_data\n\n return model_txt.str", "def handleFormatText(paragraphContent):\n # We tokenize and remove the stop word\n words = tokenizeWord(paragraphContent) \n \n stemWords = []\n # We loop on each word.\n for word in words:\n stemWord = STEMMER.stem(word)\n \n # Selection on a part of string.\n stemWord = re.sub(\"[*\\'\\.+:,\\`:/]\", '', stemWord)\n if stemWord.isdigit() or len(stemWord) < 2:\n continue\n \n stemWords.append(stemWord)\n my_r_string = stemWords.pop(0)\n for word in stemWords:\n my_r_string += \" \"+str(word)\n return my_r_string", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def get_text(self):\n\n if self.text: return self.text\n # retrieve from args and return if exists\n text = Settings.get_text() or None\n if text: \n self.text = text\n return text\n # prompt skip\n if not Settings.prompt(\"text\"): return None\n question = {\n 'type': 'input',\n 'name': 'text',\n 'message': 'Text:'\n }\n text = prompt(question)[\"text\"]\n # confirm text\n if not Settings.confirm(text): return self.get_text()\n self.text = text\n return self.text", "def read_plain_txt(input_fn: str) -> Tuple[List[str], List[str]]:\n\n with open(input_fn, 'r') as f:\n migrations = []\n queries = []\n mode = 'none'\n for line in f:\n stripped = line.strip()\n if len(stripped) == 0:\n continue\n if stripped.lower() == '== migrations':\n if mode != 'none':\n raise ValueError(f'Invalid {input_fn}: The migrations section should appear first.')\n mode = 'migrations'\n elif stripped.lower() == '== queries':\n if mode != 'migrations':\n raise ValueError(f'Invalid {input_fn}: The queries section should appear after the migrations section.')\n mode = 'queries'\n elif stripped[0] == '#':\n pass\n else:\n if mode == 'migrations':\n migrations.append(stripped)\n elif mode == 'queries':\n queries.append(stripped)\n else:\n pass\n return migrations, queries", "def on_lineEdit_textChanged(self, p0):\n # str_me = \"我爱我的祖国\"\n # self.lineEdit.setText(str_me) # 设置单行文本内容\n input_text = self.lineEdit.text()\n self.textEdit.setPlainText(input_text)\n # self.textEdit.setHtml(input_text) # 显示Html,如 <font color='red' size='20'>HELLO!</font>\n a = self.textEdit.toPlainText()\n print(a)", "def post_process_text(self, text):\n\t\treturn text", "def text(self) -> str:", "def editText(self, text, jumpIndex=None, highlight=None):\n try:\n import gui\n except ImportError, e:\n print 'Could not load GUI modules: %s' % e\n return text\n editor = gui.EditBoxWindow()\n return editor.edit(text, jumpIndex=jumpIndex, highlight=highlight)", "def alter_text_format(self):\n service = self.slides_service\n requests = [\n {\n 'updateParagraphStyle': {\n 'objectId': self.song_numbers_box_id,\n 'style': {\n 'alignment': 'CENTER'\n },\n 'fields': 'alignment'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.song_numbers_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.TITLE_FONT_SIZE, # numbers slightly larger than lyrics\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.left_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.FONT_SIZE,\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.right_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.FONT_SIZE,\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n }\n ]\n body = {\n 'requests': requests\n }\n response = service.presentations().batchUpdate(presentationId=self.presentation_id, body=body).execute()\n print(f'Updated the text style for shape with ID: {self.left_box_id}')\n return response", "def T(value):\n return (value if isinstance(value, basestring) else\n str(value) if isinstance(value, AltText) else \"\")", "def as_tuple(self, selected: bool) -> tuple:\n if self.check_ans:\n current = self.num.ljust(2) + self.answer\n else:\n current = self.num.ljust(2)+self.current.rjust(1)\n return ((\n \"fg:\"+self.style_fg + \" bg:\" # apply foreground color\n + (self.style_bg_selected if selected else self.style_bg),\n current\n if self.answer != \".\" else \"▐\" + \"█\" + \"▌\"), ('bg:#fefefe fg:#000', \"│\"))", "def edit(self,edits):\n\t\tself.alphanumeric=edits['alphanumeric'] if 'alphanumeric' in edits else None\n\t\tself.alphanumeric_color = edits['alphanumeric_color'] if 'alphanumeric_color' in edits else None\n\t\tif self.alphanumeric_color ==\"grey\":\n\t\t\tself.alphanumeric_color = \"gray\"\n\t\tself.background_color = edits['background_color'] if 'background_color' in edits else None\n\t\tif self.background_color == \"grey\":\n\t\t\tself.background_color = \"gray\";\n\t\tshapeChoices = dict((x,y) for x,y in Target.SHAPE_CHOICES)\n\t\tself.shape = str(shapeChoices[edits['shape']]) if 'shape' in edits else None\n\t\tself.orientation = edits['orientation'] if 'orientation' in edits else None\n\t\tself.ptype = edits['ptype']\n\t\tself.description = edits['description'] if 'description' in edits else None\n\t\tself.save()", "def label_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),label=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def displayText():\n global entryWidget,entryWidget1,entryWidget2,entryWidget3,entryWidget4 ,entryWidget5,entryWidget6\n global thefilename,itrial,do_stim, delaylen,ntest_arms,stop_if_error,timeout_arm_sec\n thefilename=entryWidget.get().strip()\n itrial=entryWidget1.get().strip()\n do_stim=entryWidget2.get().strip()\n delaylen=entryWidget3.get().strip()\n ntest_arms=entryWidget4.get().strip()\n stop_if_error=int(entryWidget5.get().strip())==1 # convert to logical\n print 'stop_if_error is ', stop_if_error\n\n\n timeout_arm_sec=entryWidget6.get().strip()\n root.destroy()\n return thefilename,itrial,do_stim,delaylen,ntest_arms,stop_if_error,timeout_arm_sec" ]
[ "0.78716373", "0.76830506", "0.75691116", "0.75691116", "0.7379154", "0.73117137", "0.7183602", "0.7152062", "0.7089976", "0.6903923", "0.6863199", "0.6748621", "0.6604557", "0.62711895", "0.61224514", "0.6009547", "0.5690611", "0.5690611", "0.5690611", "0.5690611", "0.5690611", "0.5534457", "0.5529326", "0.55119324", "0.54897064", "0.54593766", "0.53941077", "0.53884834", "0.53541094", "0.5348279", "0.5336523", "0.53298044", "0.53044033", "0.53017735", "0.5284678", "0.52548796", "0.5231703", "0.52075195", "0.51657903", "0.5139631", "0.51269805", "0.51183087", "0.50954133", "0.5086037", "0.50556576", "0.50475675", "0.50413114", "0.5033974", "0.50320536", "0.50238174", "0.50172436", "0.501209", "0.5011348", "0.50095177", "0.499828", "0.49958882", "0.49862808", "0.49802482", "0.49685866", "0.49656975", "0.49588487", "0.4951691", "0.49488887", "0.49448055", "0.49138415", "0.49082175", "0.48921612", "0.48836753", "0.48688877", "0.48642147", "0.48558703", "0.48427588", "0.48402458", "0.48379573", "0.48347312", "0.4829869", "0.48117617", "0.48040468", "0.48027003", "0.47989967", "0.47953638", "0.47919485", "0.47787616", "0.47736892", "0.47728088", "0.47708187", "0.4769437", "0.4768398", "0.47677627", "0.47633177", "0.47631097", "0.4755773", "0.47515184", "0.4750719", "0.47494507", "0.47457764", "0.47452554", "0.4735827", "0.47239852", "0.47187877" ]
0.68065554
11
Return list of choices for combo box, each a tuple of edit text and any annotation text
def getEditChoices(self, currentText=''): currentChoices, valid = self.sortedChoices(currentText) nonChoices = [text for text in self.formatList if text not in currentChoices] results = [] for choice in nonChoices: # menu entries to add a choice allChoices = currentChoices + [choice] allChoices = [text for text in self.formatList if text in allChoices] results.append((self.editSep.join(allChoices), '(%s %s)' % (_('add'), choice))) if currentChoices: results.append((None, None)) # separator for choice in currentChoices: # menu entries to remove a choice allChoices = currentChoices[:] allChoices.remove(choice) allChoices = [text for text in self.formatList if text in allChoices] results.append((self.editSep.join(allChoices), '(%s %s)' % (_('remove'), choice))) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def choices(self, typename, value_field='description'):\n rows = self.type(typename).values('id', value_field)\n return [(r['id'], r[value_field]) for r in rows]", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def select_combo_text(cb, text, index=0):\n i = 0\n for n in cb.get_model():\n if n[index] == text:\n break\n i += 1\n cb.set_active(i)", "def application_command_autocomplete_choice_builder(name, value):\n return {\n 'name': name,\n 'value': value,\n }", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def choices(self):\n return tuple(self._choices)", "def iter_choices(self):\n\n for pk, obj in self._get_object_list():\n if hasattr(obj, self.id):\n selected = getattr(obj, self.id)\n else:\n selected = obj in self.data\n\n yield (pk, self.get_label(obj), selected)", "def _get_choices_str(self):\n return ', '.join(\n '\"%s\"' % choice\n for choice in self.choices\n )", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def fill_combobox_attributes(self):\n\n list_char = [\"\"]\n list_num = [\"\"]\n if self.ui.radioButton_file.isChecked():\n for a in self.attributes:\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n else:\n for a in self.attributes:\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n self.ui.comboBox_num_attributes.blockSignals(True)\n self.ui.comboBox_char_attributes.blockSignals(True)\n self.ui.comboBox_num_attributes.clear()\n self.ui.comboBox_char_attributes.clear()\n self.ui.comboBox_char_attributes.addItems(list_char)\n self.ui.comboBox_num_attributes.addItems(list_num)\n self.ui.comboBox_num_attributes.blockSignals(False)\n self.ui.comboBox_char_attributes.blockSignals(False)", "def getComboTerms(tuples):\t\t\t\n\t\t\t#return \"[{0}]\".format('; '.join([\"({0})\".format(','.join([text[indices[0]:indices[1]], str(indices[0])])) for indices in tuples]))\n\t\t\treturn \"{0}\".format('; '.join((\"{0}\".format(text[indices[0]:indices[1]]) for indices in tuples)))", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def get_choices(cls):\n return cls.values.items()", "def _find_options(self, inputfield):\r\n elements = inputfield.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def create_combo(self) -> typing.Iterable[Combo]:\n raise NotImplementedError()", "def choices(cls):\n return tuple(item.as_tuple for item in list(cls))", "def choices(self) -> list:\n return [self.mapper(item) for item in self]", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice>\" +\r\n \"tag; got {0} instead\".format(choice.tag)\r\n )\r\n\r\n components = []\r\n choice_text = ''\r\n if choice.text is not None:\r\n choice_text += choice.text\r\n # Initialize our dict for the next content\r\n adder = {\r\n 'type': 'text',\r\n 'contents': choice_text,\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n components.append(adder)\r\n\r\n for elt in choice:\r\n # for elements in the choice e.g. <text> <numtolerance_input>\r\n adder = {\r\n 'type': 'text',\r\n 'contents': '',\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n tag_type = elt.tag\r\n # If the current `elt` is a <numtolerance_input> set the\r\n # `adder`type to 'numtolerance_input', and 'contents' to\r\n # the `elt`'s name.\r\n # Treat decoy_inputs and numtolerance_inputs the same in order\r\n # to prevent students from reading the Html and figuring out\r\n # which inputs are valid\r\n if tag_type in ('numtolerance_input', 'decoy_input'):\r\n # We set this to textinput, so that we get a textinput html\r\n # element.\r\n adder['type'] = 'textinput'\r\n adder['contents'] = elt.get('name')\r\n else:\r\n adder['contents'] = elt.text\r\n\r\n # Add any tail text(\"is the mean\" in the example)\r\n adder['tail_text'] = elt.tail if elt.tail else ''\r\n components.append(adder)\r\n\r\n # Add the tuple for the current choice to the list of choices\r\n choices.append((choice.get(\"name\"), components))\r\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices", "def combo_callback(self, eventobj):\n print(self.combo_input.get()) # print name\n print(self.combo_input.current()) # print index", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)", "def initDefaultChoices(self):\n return [text for text in self.formatList]", "def __init__(self,choices,caption='ListSelection',default=[],single=False,check=False,sort=False,*args,**kargs):\n InputDialog.__init__(self,caption=caption,items = [\n dict(name='input',value=default,itemtype='list',choices=choices,\n text='',single=single,check=check,sort=sort,*args,**kargs),\n ],)", "def _build_dropdown(options):\n return [(x, x) for x in options]", "def set_choices(self, index, choices):\n if len(choices) == 1:\n self._label(index)\n self._widgets[index][\"text\"] = str(choices[0])\n else:\n self._combo(index)\n self._widgets[index][\"values\"] = [str(t) for t in choices]\n width = max(len(str(t)) for t in choices)\n width = max(5, width)\n self._widgets[index][\"width\"] = width", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice> tag; got %s instead\"\r\n % choice.tag)\r\n choices.append((choice.get(\"name\"), stringify_children(choice)))\r\n return choices", "def __init__(self, \n num_fld=1, \n lab_txt=[\"1\"], \n txt_fld=[\"1\"], \n title_txt=\"test\", \n comb_txt=[],\n comb_lab_txt=[], \n comb_num=0, \n root_x=50, \n root_y=50):\n super().__init__()\n self.geometry(f'+{root_x}+{root_y}') #head=y+20px\n self.str_in=[]\n self.title(title_txt)\n if comb_txt:\n self.name=[0]*num_fld\n ent=[0]*num_fld\n self.comb=[]\n self.act=[]\n lab=[0]*num_fld\n lab_comb=[0]*comb_num\n else:\n self.name=[0]*num_fld\n ent=[0]*num_fld\n lab=[0]*num_fld\n self.comb=[]\n self.act=[]\n for i in range(num_fld):\n self.name[i]=tk.StringVar()\n ent[i]=tk.Entry(self,textvariable=self.name[i])\n ent[i].insert(0, txt_fld[i])\n lab[i] = tk.Label(self,width=15, text=lab_txt[i])\n lab[i].pack()\n ent[i].pack()\n for i in range(comb_num):\n lab_comb[i]=tk.Label(self,width=35, text=comb_lab_txt[i])\n self.comb.append(ttk.Combobox(self, values=comb_txt))\n lab_comb[i].pack()\n self.comb[i].pack()\n self.comb[i].current(1)\n\n but_ac=tk.Button(self, text=\"Accept\", command=self.ins)\n but_ac.pack()\n self.mainloop", "def input_choices_from_list(choices, text):\n no_courses_text = \"\"\"\n init will only list the courses you are enrolled in\n and there seem to be none.\n Either enrol in a course or add the course id as command line argument.\n \"\"\"\n if choices is None or len(choices) == 0:\n print(no_courses_text)\n raise SystemExit(1)\n\n digits = str(math.ceil(math.log10(len(choices))))\n format_str = '{:' + digits + 'd} {}'\n for n, c in enumerate(choices):\n print(format_str.format(n, c))\n try:\n return [int(c) for c in input(text).split()]\n except EOFError:\n return []", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def combobox(self):\n return self._combo", "def widgets_from_abbreviations(self, seq):\n result = []\n for name, abbrev, default in seq:\n widget = self.widget_from_abbrev(abbrev, default)\n if not (isinstance(widget, ValueWidget) or isinstance(widget, fixed)):\n if widget is None:\n raise ValueError(\"{!r} cannot be transformed to a widget\".format(abbrev))\n else:\n raise TypeError(\"{!r} is not a ValueWidget\".format(widget))\n if not widget.description:\n widget.description = name\n widget._kwarg = name\n result.append(widget)\n return result", "def multiple_choice():\n\n return [\"MAR\", \"MAR\", \"NI\", \"NI\", \"MCAR\"]", "def get_chosen_options(user):\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n ocl = eval(user_application.options_selected)\n chosen_options = []\n for oc in ocl:\n chosen_options.append(Option.objects.get(opt_code=int(oc))) \n return chosen_options", "def build_comboboxes(activities, events):\n global comboboxes\n # For each activity set up a selector for an event\n\n for activity in activities:\n\n # Setup frame for better display in gui\n frame = Frame(main_window)\n frame.configure(background=\"gray30\")\n\n # Label the left column as activity in a model + \"beautify gui\"\n text = \"Activity name (model):\"\n Label(frame, text=text, bg=\"gray30\", fg=\"white\", padx=5).grid(column=0, row=0)\n Label(frame, text=activity, bg=\"gray30\", fg=\"white\").grid(column=0, row=1)\n\n # Set up the combobox for an event\n combo = Combobox(frame)\n combo['values'] = events\n\n # If activity is in events preselect the current one\n if activity in events:\n combo.current(events.index(activity))\n\n # Label the combobox and place label and box in frame\n Label(frame, text=\"Event name (log):\", bg=\"gray30\", fg=\"white\", padx=5).grid(column=1, row=0)\n combo.grid(column=1, row=1)\n\n # If the last activity in the graph is handled then do not write a separator\n if activity != activities[-1]:\n Separator(frame, orient=\"horizontal\").grid(row=2, columnspan=2, sticky=\"ew\", pady=10)\n\n comboboxes[activity] = combo\n # place the frame in the main_window\n frame.grid(column=0)", "def get_generic_choices(model, key, allow_null=False):\n CHOICES = [('', '--------')] if allow_null else []\n for i in model.objects.all().values_list(key, flat=True).distinct():\n CHOICES.append((str(i), str(i)))\n CHOICES.sort()\n\n return CHOICES", "def _combobox_choice(self, _=None):\n combobox_string = self.value_combobox.var.get()\n if combobox_string.startswith(\"Unknown: \"):\n value = int(combobox_string[len(\"Unknown: \"):])\n else:\n value = int(self.value_combobox.var.get().split(\" \")[0])\n self.master.change_field_value(self.field_name, value)", "def widgets(parameter: Parameter):\n widgets = []\n for key in parameter.keys():\n textEdit = QTextEdit()\n textEdit.setText(key)\n widgets.append(textEdit)\n if isinstance(parameter[key], Enum):\n comboBox = MyQtEnumComboBox()\n comboBox.fillValues(type(parameter[key]))\n widgets.append(comboBox)\n elif isinstance(parameter[key], bool):\n comboBox = QComboBox()\n comboBox.addItems((\"False\", \"True\"))\n widgets.append(comboBox)\n else:\n textEdit = QTextEdit()\n textEdit.setText(str(parameter[key]))\n widgets.append(textEdit)\n for widget in widgets:\n widget.setFixedHeight(30)\n return widgets", "def _getBrailleRegionsForComboBox(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForComboBox\", obj)\n\n regions = []\n\n focusedRegionIndex = 0\n label = self._script.getDisplayedLabel(obj)\n if label and (len(label) > 0):\n regions.append(braille.Region(label + \" \"))\n focusedRegionIndex = 1\n\n # Check to see if the text is editable. If so, then we want\n # to show the text attributes (such as selection -- see bug\n # 496846 for more details).\n #\n textObj = None\n for child in obj:\n if child and child.getRole() == pyatspi.ROLE_TEXT:\n textObj = child\n if textObj and textObj.getState().contains(pyatspi.STATE_EDITABLE):\n textRegion = braille.Text(textObj)\n regions.append(textRegion)\n else:\n displayedText = self._script.getDisplayedText(obj)\n if displayedText:\n regions.append(braille.Region(displayedText))\n\n regions.append(braille.Region(\n \" \" + rolenames.getBrailleForRoleName(obj)))\n\n # Things may not have gone as expected above, so we'll do some\n # defensive programming to make sure we don't get an index out\n # of bounds.\n #\n if focusedRegionIndex >= len(regions):\n focusedRegionIndex = 0\n if len(regions) == 0:\n focusedRegion = None\n else:\n focusedRegion = regions[focusedRegionIndex]\n\n # [[[TODO: WDW - perhaps if a text area was created, we should\n # give focus to it.]]]\n #\n return [regions, focusedRegion]", "def form_SelectChoiceCallableOptions(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n def _():\n options = [(1,'a'),(2,'b'),(3,'c')]\n for option in options:\n yield option\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(_)\n return form", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def _comboSlot(self, select):\n select = self.sender().itemText(select)\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n item = QtXml.QDomElement()\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() != select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n widget_enabled = h.elem.attribute(\"Enabled\", \"True\")\n widget_visible = h.elem.attribute(\"Visible\", \"Unknown\")\n h.widget.setEnabled(False)\n if(widget_visible != \"Unknown\"):\n h.label.hide()\n h.widget.hide()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() == select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n h.widget.setEnabled(True)\n h.label.show()\n h.widget.show()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")", "def get_choices(self, cutoff=None):\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict(\n [\n (\n # This line below is modifed.\n item.pk,\n self.display_value(item),\n )\n for item in queryset\n ]\n )", "def test_rendering_combobox(qtbot):\n layer = Image(np.random.rand(8, 8))\n qtctrl = QtImageControls(layer)\n qtbot.addWidget(qtctrl)\n combo = qtctrl.renderComboBox\n opts = {combo.itemText(i) for i in range(combo.count())}\n rendering_options = {\n 'translucent',\n 'additive',\n 'iso',\n 'mip',\n 'minip',\n 'attenuated_mip',\n 'average',\n }\n assert opts == rendering_options\n # programmatically updating rendering mode updates the combobox\n layer.rendering = 'iso'\n assert combo.findText('iso') == combo.currentIndex()", "def get_choicesdata(self):\n selected_value = self.get_cleaned_value()\n choicesdata = []\n for value, label in self.get_choices_cached():\n is_selected = value == selected_value\n url = self.build_set_values_url(values=[value])\n choicesdata.append({\n 'url': url,\n 'label': label,\n 'is_selected': is_selected,\n 'dom_id': '{}_{}'.format(self.get_inputfield_dom_id(), value)\n })\n return choicesdata", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def get_poll_choices(self, games: [Game]) -> [dict]:\n answer_texts = []\n for g in games:\n answer_texts.append(g.name + \" - \" + g.genre)\n answer_texts = sorted(answer_texts, key=str.lower)\n poll_choices = []\n for at in answer_texts:\n poll_choices.append({\"text\": at})\n return poll_choices", "def __str__(self):\n return \"choice_text: \" + self.choice_text", "def _parse_choices(self, text):\n choices = dict()\n\n matches = re.findall(self.choice_regex, text)\n for match in matches:\n # remove the brackets\n match = match.replace('[[', '')\n match = match.replace(']]', '')\n\n if '|' in match:\n # format is {text}|{node_id}, the text and node id are different\n text, node_id = match.split('|')\n choices[node_id] = text\n else:\n choices[match] = match\n\n return choices", "def get_choices_new_protected():\n ret = []\n ret.append( (1, _(u'Nur Community-Mitglieder dürfen neue Beiträge leisten')) )\n ret.append( (-1, _(u'Offener Zugang')) )\n return ret", "def comboBox(args: list, slot) -> QComboBox:\n comboBox = QComboBox()\n comboBox.addItems(args[0])\n comboBox.currentTextChanged.connect(slot)\n return comboBox", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices", "def make_ddls( self, parent ):\n # ---- 0\n a_widget = ttk.Combobox( parent,\n width = self.arg_width,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n a_widget.bind( \"<<ComboboxSelected>>\", self.sync_ddl_0 )\n\n # AppGlobal.gui_style.style_combobox( a_widget )\n self.ddl_0_widget = a_widget\n\n # ---- 1\n a_widget = ttk.Combobox( parent,\n width = self.arg_width,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n self.ddl_1_widget = a_widget\n\n # ---- 2\n a_widget = ttk.Combobox( parent,\n width = self.arg_width + 5,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n # AppGlobal.gui_style.style_combobox( a_widget )\n self.ddl_2_widget = a_widget\n\n # self.ddl_widgets = [ self.ddl_0_widget, self.ddl_1_widget, self.ddl_2_widget, ]\n # print( self.ddl_widgets[ 0 ] == self.ddl_widgets[ 1 ])\n\n #self.load_ddl_0( )", "def __str__(self):\n return self.choice_text", "def as_choices():\n return (\n # Live is currently disabled as a choice\n # pending implementation\n (\"live\", \"Use working directory\"),\n (\"latest\", \"Use latest snapshot\"),\n (\"pinned\", \"Pinned to snapshot\"),\n )", "def comboBoxes(self):\r\n # Cities Combo Button\r\n self.comboCities = QComboBox()\r\n self.comboCities.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: 15px;\r\n background-color : #A8DBC5;\r\n border: 1px solid white;\r\n\r\n \"\"\")\r\n self.comboCities.addItems(\r\n ['Girón', 'Piedecuesta', 'Floridablanca', 'Bucaramanga'])\r\n self.grid.addWidget(self.comboCities, 6, 1, 1, 2)\r\n self.comboCities.setCurrentText(\"Bucaramanga\")\r\n # Payment Combo Button\r\n self.comboPayment = QComboBox()\r\n self.comboPayment.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: 15px;\r\n background-color : #A8DBC5;\r\n border: 1px solid white;\r\n\r\n \"\"\")\r\n self.comboPayment.addItems(['Efectivo', 'Nequi'])\r\n self.grid.addWidget(self.comboPayment, 7, 1, 1, 2)", "def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n # choicesdata = []\n # found_selected_value = False\n # for value, label in self.get_choices():\n # is_selected = value == selected_value\n # if is_selected:\n # found_selected_value = True\n # url = self.build_set_values_url(values=[value])\n # choicesdata.append({\n # 'url': url,\n # 'label': label,\n # 'is_selected': is_selected\n # })\n choicesdata, found_selected_value = self.__make_choicesdata_list(\n choices=self.get_choices(),\n selected_value=self.get_cleaned_value())\n if not found_selected_value and len(choicesdata) > 0:\n selected_index = self.get_default_is_selected_index(choicesdata=choicesdata)\n choicesdata[selected_index]['is_selected'] = True\n return choicesdata", "def _find_options(self):\r\n elements = self.xml.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def __str__(self):\n return gettext('One of %s') % self._get_choices_str()", "def initDefaultChoices(self):\n return []", "def list_selector_widget(members=None,\n preselect=None,\n entry=False,\n callback=None):\n store, i=generate_list_model(members,\n active_element=preselect)\n\n if entry:\n combobox=gtk.ComboBoxEntry(store, column=0)\n else:\n combobox=gtk.ComboBox(store)\n cell = gtk.CellRendererText()\n combobox.pack_start(cell, expand=True)\n combobox.add_attribute(cell, 'text', 0)\n combobox.add_attribute(cell, 'background', 2)\n\n combobox.set_active(-1)\n if i is None:\n i = store.get_iter_first()\n if i is not None:\n combobox.set_active_iter(i)\n\n if entry:\n def get_current_element(combo):\n try:\n return combo.get_model().get_value(combo.get_active_iter(), 1)\n except (TypeError, AttributeError):\n return unicode(combo.child.get_text())\n def set_current_element(combo, t):\n combo.child.set_text(t)\n else:\n def get_current_element(combo):\n if combo.get_active_iter() is not None:\n return combo.get_model().get_value(combo.get_active_iter(), 1)\n else:\n return None\n def set_current_element(combo, el):\n # Find the index of the element\n l=[ t[0] for t in enumerate(combo.get_model()) if t[1][1] == el ]\n if l:\n # The element is present.\n combo.set_active(l[0])\n else:\n combo.set_active_iter(combo.get_model().append( (unicode(el), el, None) ))\n\n # Bind the method to the combobox object\n combobox.get_current_element = get_current_element.__get__(combobox)\n combobox.set_current_element = set_current_element.__get__(combobox)\n\n if callback is not None:\n combobox.connect('changed', callback)\n\n return combobox", "def occurrence_choices():\n OCCURRENCE_CHOICES = [('one_off', 'One Off'), ('weekly', 'Weekly'), ('fortnightly', 'Fortnightly')]\n occurrence = forms.ChoiceField(choices=OCCURRENCE_CHOICES, widget=forms.RadioSelect())\n return occurrence", "def get_action_choices():\n from hardware.management.commands.gpio_buttons import Command\n import re\n pattern = re.compile(r'^on_(?P<name>\\w+)_press$')\n choices = []\n for member in dir(Command):\n match = pattern.match(member)\n if match:\n action = match.groupdict()['name']\n name = action.replace('_', ' ').title()\n choices.append((action, name))\n return choices", "def DrawComboBox(*args, **kwargs):\n return _gdi_.RendererNative_DrawComboBox(*args, **kwargs)", "def fill_combobox(self):\n sqlstring = \"SELECT first_name, last_name from employees WHERE enabled = 1 ORDER BY last_name ASC\"\n self.CB_employee.addItem(\"\")\n for employee in self.ms.c.execute(sqlstring):\n self.CB_employee.addItem(\" \".join((employee[0], employee[1])))\n sqlstring = \"SELECT first_name, last_name from employees WHERE enabled = 0 ORDER BY last_name ASC\"\n for employee in self.ms.c.execute(sqlstring):\n self.CB_employee.addItem(\" \".join((employee[0], employee[1])))", "def _get_choices ( self, context, path = '' ):\n choices = []\n gdc = context.get_data_context\n for name in context.data_contexts:\n next_path = path_for( path, name )\n choices.append( TemplateChoice( choice_value = next_path ) )\n choices.extend( self._get_choices( gdc( name ), next_path ) )\n \n return choices", "def get_context(self, name, value, attrs=None, choices=()):\n context = super(TriStateCheckboxSelectMultiple, self).get_context(\n name, value, attrs\n )\n\n choices = dict(it.chain(self.choices, choices))\n if value is None:\n value = dict.fromkeys(choices, False)\n else:\n value = dict(dict.fromkeys(choices, False).items() +\n value.items())\n\n context['values'] = [\n (choice, label, value[choice])\n for choice, label in choices.iteritems()\n ]\n\n return context", "def get_text_type():\n opt = ['text', 'email', 'password']\n inp = option_menu(opt, 'Select text type:')\n\n # mark text type with option\n OPTIONS['text-type'] = opt[inp]\n\n # add option to collected list\n add_to_collected('text type', opt[inp])\n\n return", "def set_dropdown_b_options(value):\n options_c = []\n if value=='C':\n options_c = [{'label': '1', 'value': '1'},\n {'label': '2', 'value': '2'}]\n if value == 'D':\n options_c = [{'label': '3', 'value': '3'},\n {'label': '4', 'value': '4'}]\n if value=='E':\n options_c = [{'label': '5', 'value': '5'},\n {'label': '6', 'value': '6'}]\n if value == 'F':\n options_c = [{'label': '7', 'value': '7'},\n {'label': '8', 'value': '8'}]\n return options_c", "def objects_to_choices(queryset):\n res = []\n for elm in queryset:\n res.append((elm.pk, unicode(elm)))\n return res", "def choice(text, choices, **kwargs):\n return click.prompt(click.style('> {}'.format(text), fg='blue', bold=True),\n type=click.Choice(choices),\n **kwargs)", "def on_comboBox_enceinte_activated(self, index):\n nom_enceinte = self.comboBox_enceinte.currentText()\n marque = [x[2] for x in self.enceintes if x[1] == nom_enceinte][0]\n n_serie = [x[4] for x in self.enceintes if x[1] == nom_enceinte][0]\n model =[x[3] for x in self.enceintes if x[1] == nom_enceinte][0]\n \n \n self.lineEdit_marque.setText(marque)\n self.lineEdit_n_serie.setText(n_serie)\n self.lineEdit_model.setText(model)", "def display_choose(self, text, choices):\n cur_index = 0\n key = None\n while key != 'KEY_NEWLINE':\n if key == 'KEY_UP':\n cur_index = max(cur_index - 1, 0)\n elif key == 'KEY_DOWN':\n cur_index = min(cur_index + 1, len(choices) - 1)\n self.stdscr.erase()\n for line in text:\n self.stdscr.addstr(f'{PADCHAR}{line}\\n')\n for index, value in enumerate(choices):\n self.stdscr.addstr('\\n')\n self.stdscr.addstr(PADCHAR)\n self.stdscr.addstr(value, color_pair(7 if index == cur_index else 1))\n self.stdscr.addstr(f'\\n\\n{PADCHAR}') \n key = self.get_key() \n return cur_index", "def list_selector(title=None,\n text=None,\n members=None,\n controller=None,\n preselect=None,\n entry=False):\n combobox = list_selector_widget(members=members,\n preselect=preselect,\n entry=entry)\n\n d = gtk.Dialog(title=title,\n parent=None,\n flags=gtk.DIALOG_DESTROY_WITH_PARENT,\n buttons=( gtk.STOCK_OK, gtk.RESPONSE_OK,\n gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL ))\n\n if text is not None:\n l=gtk.Label(text)\n l.show()\n d.vbox.add(l)\n\n d.vbox.add(combobox)\n combobox.show_all()\n\n d.connect('key-press-event', dialog_keypressed_cb)\n\n d.show()\n center_on_mouse(d)\n res=d.run()\n retval=None\n if res == gtk.RESPONSE_OK:\n retval=combobox.get_current_element()\n d.destroy()\n return retval", "def get_classes(self, code):\n \n select = v.Combobox(\n _metadata={'name':code}, \n items=self.items, \n v_model=None, \n dense=True,\n hide_details=True\n )\n \n select.observe(partial(self.store, code), 'v_model')\n \n return select", "def getOptionsNames(self) -> List[unicode]:\n ...", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def the_option_named(text: str) -> \"SelectByText\":\n return SelectByText(text)", "def prepareAutoComplete(editor, text, charPos, lineStartCharPos,\r\n wikiDocument, settings):\r\n return []", "def autocomplete_possibilities():\n try:\n # get data sent by client\n typed_input = request.args.get('q')\n print(' ')\n print('\\n------ getting autocomplete_possibilities ------')\n print(f\"recived: input:{typed_input}\")\n\n # call the google API\n results = gmaps.places_autocomplete(typed_input)\n data = [\n {'value': r['place_id'], 'text': r['description']}\n for r in results\n ]\n\n # Pass data to the front end\n print(f'returning: {data}')\n return jsonify(data)\n\n except Exception as e:\n print(\"AJAX excepted \" + str(e))\n return str(e)", "def _validate_selects(text, response):\n answer_options = re.split(settings.MULTISELECT_DELIMITER_RE, str(text))\n choices = map(lambda choice: choice.lower(), response.event.choices)\n logger.debug('Question (%s) answer choices are: %s, given answers: %s' % (datatype, choices, answer_options))\n new_answers = copy(answer_options)\n for idx, opt in enumerate(answer_options):\n logger.debug('Trying to format (m)select answer: \"%s\"' % opt)\n try: \n #in the case that we accept numbers to indicate option selection\n opt_int = int(opt)\n if not (1 <= opt_int <= len(choices)): \n return text, 'Answer %s must be between 1 and %s' % (opt_int, len(choices))\n else:\n new_answers[idx] = str(opt_int)\n\n except ValueError: \n # in the case where we accept the actual text of the question\n logger.debug('Caught value error, trying to parse answer string choice of: %s' % choices)\n if opt.lower() not in choices:\n return text, 'Answer must be one of the choices'\n else:\n new_answers[idx] = str(choices.index(opt.lower()) + 1)\n return ' '.join(new_answers), None", "def choice(choices=[], message=\"Pick something.\", title=None):\n return dialog(\n \"choice\",\n choices=choices,\n message=message,\n title=title,\n )", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def choices(self, cl):\n # TODO: Determine if non-static choices would be cleaner here.\n # Honestly, I tried a more generic version and it was even harder to\n # follow than this version.\n yield {\n 'selected': not (self.lookup_val_gte or self.lookup_val_lt),\n 'query_string': cl.get_query_string({}, [self.lookup_kwarg_gte,\n self.lookup_kwarg_lt]),\n 'display': 'All'\n }\n\n goal = settings.FACEBOOK_CLICK_GOAL\n yield {\n 'selected': self.lookup_val_gte and not self.lookup_val_lt,\n 'query_string': cl.get_query_string({self.lookup_kwarg_gte: goal},\n [self.lookup_kwarg_lt]),\n 'display': 'Yes'\n }\n yield {\n 'selected': self.lookup_val_lt and not self.lookup_val_gte,\n 'query_string': cl.get_query_string({self.lookup_kwarg_lt: goal},\n [self.lookup_kwarg_gte]),\n 'display': 'No'\n }", "def build_options(slot, snacks):\n \n if slot == 'Fast':\n return [\n {'text': 'Pizza', 'value': 'Pizza'},\n {'text': 'Fries', 'value': 'Fries'},\n {'text': 'Franky', 'value': 'Franky'},\n {'text': 'Burger', 'value': 'Burger'},\n {'text': 'Sandwich', 'value': 'Sandwich'}\n \n \n ]\n elif slot == 'drink':\n return [\n {'text': 'Coca-Cola', 'value': 'Coca-cola'},\n {'text': 'Appy', 'value': 'Appy'},\n \n {'text': 'Beer', 'value': 'Beer'},\n {'text': 'Frooti', 'value': 'Frooti'},\n {'text': 'Pepsi', 'value': 'Pepsi'}\n \n ]", "def sortedChoices(self, inText):\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return (sortedChoices, True)\n else:\n return (sortedChoices, False)", "def _getChoices(self, acronym):\n # get matches from acronymDB\n matches = []\n if(acronym in self.acronymDB):\n matches += self.acronymDB[acronym]\n if(acronym[-1] == \"s\" and acronym[:-1] in self.acronymDB):\n matches += self.acronymDB[acronym]\n\n # create training data\n X_train, y_train = [], []\n for definition, articleID, ignored_var in matches:\n text = self.articleDB[articleID]\n X_train.append(\n ExpansionChoice(article_id=articleID, article_text=text))\n y_train.append(definition)\n\n # create y labels to group similar acronyms\n y_labels, labelToExpansion = self._processChoices(y_train)\n\n return X_train, y_labels, labelToExpansion", "def multiple_choices(self, choices, response):\n for elem in self.method_order:\n if elem in choices:\n return [elem]\n raise NoData", "def set_dropdown_b_options(value):\n options_b = []\n if value=='A':\n options_b = [{'label': 'C', 'value': 'C'},\n {'label': 'D', 'value': 'D'}]\n if value == 'B':\n options_b = [{'label': 'E', 'value': 'E'},\n {'label': 'F', 'value': 'F'}]\n return options_b", "def choices(self, choices: Iterable[Tuple[str, str]]):\n try:\n iter(choices)\n except TypeError:\n raise TypeError(\"'choices' isn't a valid iterable\")\n\n apply_choices = []\n for i, (choice_id, choice_label) in enumerate(choices):\n apply_choices.append((str(choice_id), str(choice_label)))\n\n if len(apply_choices) < 2:\n raise ValueError(\"you need to specify at least two choices\")\n\n self._choices = apply_choices\n self.specific.refresh()\n self._selected = 0", "def before_choose_candidate_listener(self, session, task):\n choices = [PromptChoice('d', 'eDit', self.importer_edit)]\n if task.candidates:\n choices.append(PromptChoice('c', 'edit Candidates',\n self.importer_edit_candidate))\n\n return choices", "def _get_completion(self, original_text, remaining_text, options):\n if self.should_hide_completions(original_text=original_text,\n remaining_text=remaining_text,\n allowed_suffixes=(\" \", \".\")):\n return []\n\n return [(option, len(remaining_text)) for option in options\n if option.startswith(remaining_text) and not option.startswith(\"_\")]" ]
[ "0.6981332", "0.64435107", "0.64435107", "0.6406375", "0.6282898", "0.61602914", "0.61550856", "0.6096663", "0.6079173", "0.6074226", "0.599768", "0.5979722", "0.5946701", "0.59085536", "0.58665997", "0.5852769", "0.5851758", "0.5840527", "0.58387506", "0.5816007", "0.5803215", "0.5797734", "0.57951343", "0.57936877", "0.57807535", "0.5749749", "0.57457596", "0.5732043", "0.57093215", "0.57050306", "0.5695638", "0.5680274", "0.5638338", "0.5617769", "0.56013155", "0.5580123", "0.5573768", "0.55674773", "0.55656695", "0.5561425", "0.5539777", "0.5525614", "0.55243665", "0.55211055", "0.5516054", "0.55135965", "0.5486497", "0.54864573", "0.5484098", "0.5467698", "0.5450487", "0.5444694", "0.5435837", "0.5432833", "0.542561", "0.54099566", "0.5406829", "0.5394251", "0.53907686", "0.5388395", "0.53733003", "0.5353227", "0.5352402", "0.53441244", "0.5335833", "0.5330664", "0.5320152", "0.5317789", "0.53159815", "0.5291184", "0.52660507", "0.5261751", "0.52587485", "0.5247112", "0.52468276", "0.5246636", "0.52386904", "0.523807", "0.52264065", "0.52225775", "0.521434", "0.52137464", "0.5197637", "0.5192666", "0.51882684", "0.5188186", "0.5170487", "0.516354", "0.5163382", "0.5161854", "0.5159188", "0.5158065", "0.51575136", "0.51539713", "0.5146208", "0.5145707", "0.5143474", "0.5142414", "0.51338685", "0.51279086" ]
0.6369448
4
Return a list of choices for setting the init default
def initDefaultChoices(self): return [entry[0] for entry in self.getEditChoices()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initDefaultChoices(self):\n return []", "def initDefaultChoices(self):\n return [text for text in self.formatList]", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def as_choices():\n return (\n # Live is currently disabled as a choice\n # pending implementation\n (\"live\", \"Use working directory\"),\n (\"latest\", \"Use latest snapshot\"),\n (\"pinned\", \"Pinned to snapshot\"),\n )", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices", "def get_choices(cls):\n return cls.values.items()", "def __init__(self,choices,caption='ListSelection',default=[],single=False,check=False,sort=False,*args,**kargs):\n InputDialog.__init__(self,caption=caption,items = [\n dict(name='input',value=default,itemtype='list',choices=choices,\n text='',single=single,check=check,sort=sort,*args,**kargs),\n ],)", "def choices(self):\n return tuple(self._choices)", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def multiple_choice():\n\n return [\"MAR\", \"MAR\", \"NI\", \"NI\", \"MCAR\"]", "def choices(cls):\n return tuple(item.as_tuple for item in list(cls))", "def as_choices(cls, key_type=None):\n if key_type is None:\n key_type = cls.get_default_choice_type()\n return cls.enum_class.as_choices(key_type)", "def _set_default_suits(self):\n # set up suits\n suit_types = [('Spades', 1), ('Hearts', 2), ('Diamonds', 3), ('Clubs', 4)]\n # populate the list of suits\n suit_list = list()\n for s in suit_types:\n suit_list.append(Suit(s[0], s[1]))\n\n return suit_list", "def choices(self) -> list:\n return [self.mapper(item) for item in self]", "def _resolve_defaults(self, **kwargs):\n res = list()\n for name, value in kwargs.items():\n if value is None:\n value = self.default(name)\n if value is None:\n raise RuntimeError(f\"Missing default {name}\")\n res.append(value)\n return res", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def get_setting_choices(cls, key, **kwargs):\n setting = cls.get_setting_definition(key, **kwargs)\n\n choices = setting.get('choices', None)\n\n if callable(choices):\n # Evaluate the function (we expect it will return a list of tuples...)\n return choices()\n\n return choices", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def get_template_base_dir_choices() -> list[tuple[str, str]]:\n # handle predefined choices\n choices, seen = [], set()\n for template_name in TemplateName:\n choices.append((template_name.value, template_name.label))\n seen.add(template_name.value)\n\n # handle custom choices via settings\n for template_name, display_name in getattr(settings, \"CAST_CUSTOM_THEMES\", []):\n if template_name not in seen:\n choices.append((template_name, display_name))\n seen.add(template_name)\n\n # search for template base directories\n template_directories = get_template_directories()\n template_base_dir_candidates = get_template_base_dir_candidates(template_directories)\n for candidate in template_base_dir_candidates:\n if candidate not in seen:\n choices.append((candidate, candidate))\n\n return choices", "def create_default_repo_choice(self, default_repo):\n return (default_repo, default_repo)", "def initialise_options():\r\n default_options = list(range(NUMBER_OF_TILES))\r\n default_weights = [1/NUMBER_OF_TILES]*NUMBER_OF_TILES\r\n return default_options, default_weights", "def default_controls(self):\n\t\tcontrol_list = []\n\t\tconfig = ConfigParser.ConfigParser()\n\t\tconfig.read(\"./config.ini\")\n\t\tcontrols = config.options(\"default_controls\")\n\t\tfor c in controls:\n\t\t\ttry: control_list.append( config.get(\"default_controls\", c) )\n\t\t\texcept:\n\t\t\t\tprint \"ERROR: missing control settings. Check config.ini.\"\n\t\t\t\traise(SystemExit)\n\t\treturn control_list", "def default_variation(random, candidates, args):\r\n return candidates", "def default_variation(random, candidates, args):\r\n return candidates", "def get_default_options():\n return GROUPS_.values()", "def __init__(self, *initial):\n self.prompt_list = list(initial)", "def get_choices_new_protected():\n ret = []\n ret.append( (1, _(u'Nur Community-Mitglieder dürfen neue Beiträge leisten')) )\n ret.append( (-1, _(u'Offener Zugang')) )\n return ret", "def choices(self, var):\r\n return (self.curr_domains or self.domains)[var]", "def choices(self, choices):\n\n self._choices = choices", "def get_choices_for_var(self, var):\n return self.choices[var]", "def get_options(self):\n return []", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def set_defaults(self):\r\n for name, option in self.options.iteritems():\r\n if not option.is_required():\r\n self.set_value(name, option, option.default)", "def default_value_list(sources: List[str] = None):\n if not default:\n return list()\n if not sources:\n return [default]\n else:\n return sources", "def _get_target_choices():\n apps = [('public', _(\"Public website\"))]\n for model, entity in registry.registry.items():\n if entity.menu:\n appname = model._meta.app_label.lower()\n apps.append((appname, unicode(entity.label)))\n return tuple(apps)", "def get_generic_choices(model, key, allow_null=False):\n CHOICES = [('', '--------')] if allow_null else []\n for i in model.objects.all().values_list(key, flat=True).distinct():\n CHOICES.append((str(i), str(i)))\n CHOICES.sort()\n\n return CHOICES", "def get_default_is_selected_index(self, choicesdata):\n\n return 0", "def __init__(self, choices, *args, **kwargs):\n super(RangePollChoiceForm, self).__init__(*args, **kwargs)\n nominees = [(i, '%d' % i) for i in range(0, choices.count()+1)]\n for choice in choices:\n self.fields['range_poll__%s' % str(choice.id)] = (\n forms.ChoiceField(widget=forms.Select(),\n choices=nominees,\n label=choice.nominee.get_full_name()))", "def _create_defaults(self):\n return DefaultCommandOptionValues(\n min_confidence=3, output_format='vs7')", "def create_options(self):\n return []", "def Choices(cls):\n attr = '_choice_attr_' + cls.__name__\n if hasattr(cls, attr):\n return getattr(cls, attr)\n\n choices = set()\n for (k, v) in cls.__dict__.items():\n if not k.startswith('_') and issubclass(type(v), (str, unicode)):\n choices.add(v)\n for base in cls.__bases__:\n if issubclass(base, ChoiceBase) and base is not ChoiceBase:\n choices = set.union(choices, base.Choices())\n setattr(cls, attr, choices)\n\n return choices", "def multiple_choices(self, choices, response):\n for elem in self.method_order:\n if elem in choices:\n return [elem]\n raise NoData", "def form_SelectChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(options)\n form['mySelect'].default = 2\n return form", "def season_choices():\n return [(s, s) for s in range(0, 3)]", "def is_a_list_of_choices(self):\n pass", "def setChoices(self, choices):\n self.getGtkObject('property_liststore').clear()\n for choice in choices:\n self.getGtkObject('property_liststore').append([str(choice)])", "def _get_choices ( self, context, path = '' ):\n choices = []\n gdc = context.get_data_context\n for name in context.data_contexts:\n next_path = path_for( path, name )\n choices.append( TemplateChoice( choice_value = next_path ) )\n choices.extend( self._get_choices( gdc( name ), next_path ) )\n \n return choices", "def choices(symbols, k):\n return [R.choice(symbols) for _ in range(k)]", "def episode_choices():\n return [(e, e) for e in range(0, 2)]", "def setAll(self):\n self.setValue(self._choices_)", "def configure_list_of_choices_type_question(self, question_data):\n self.driver.find_radio_button(LIST_OF_CHOICE_RB).click()\n index = 1\n for choice in fetch_(CHOICE, from_(question_data)):\n if index > 1:\n self.driver.find(ADD_CHOICE_LINK).click()\n self.driver.find_text_box(by_xpath(CHOICE_XPATH_LOCATOR + \"[\" + str(index) + \"]\" + CHOICE_TB_XPATH_LOCATOR)).enter_text(choice)\n index += 1\n choice_type = fetch_(ALLOWED_CHOICE, from_(question_data))\n if ONLY_ONE_ANSWER == choice_type:\n self.driver.find_radio_button(ONLY_ONE_ANSWER_RB).click()\n elif MULTIPLE_ANSWERS == choice_type:\n self.driver.find_radio_button(MULTIPLE_ANSWER_RB).click()\n return self", "def get_init_list(self):\n\n return self.convert_compartments_to_list(self.init_compartments)", "def __init__(self, choiceList=None, prompt=DEFAULT_PROMPT, title=DEFAULT_TITLE):\n self.choice = None\n \n wpf.LoadComponent(self, GUI_XAML_FILE)\n \n self.Title = title\n self.lblPrompt.Content = prompt\n \n self.choicesBox.ItemsSource = choiceList", "def initDefaults(self):\n return _libsbml.Species_initDefaults(self)", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def _get_choices_str(self):\n return ', '.join(\n '\"%s\"' % choice\n for choice in self.choices\n )", "def choices(self):\n self._choices = self.getChoices()\n return len(self._choices)", "def get_defaults(self):\n\t\treturn self.__defaults", "def init_defaults(self, defaults):\r\n for (sect, opt, default) in defaults:\r\n self._default(sect, opt, default)", "def choices(self, cl):\n # TODO: Determine if non-static choices would be cleaner here.\n # Honestly, I tried a more generic version and it was even harder to\n # follow than this version.\n yield {\n 'selected': not (self.lookup_val_gte or self.lookup_val_lt),\n 'query_string': cl.get_query_string({}, [self.lookup_kwarg_gte,\n self.lookup_kwarg_lt]),\n 'display': 'All'\n }\n\n goal = settings.FACEBOOK_CLICK_GOAL\n yield {\n 'selected': self.lookup_val_gte and not self.lookup_val_lt,\n 'query_string': cl.get_query_string({self.lookup_kwarg_gte: goal},\n [self.lookup_kwarg_lt]),\n 'display': 'Yes'\n }\n yield {\n 'selected': self.lookup_val_lt and not self.lookup_val_gte,\n 'query_string': cl.get_query_string({self.lookup_kwarg_lt: goal},\n [self.lookup_kwarg_gte]),\n 'display': 'No'\n }", "def choices(self, typename, value_field='description'):\n rows = self.type(typename).values('id', value_field)\n return [(r['id'], r[value_field]) for r in rows]", "def test_get_prior_string_list(self):\n categories = list(range(10))\n categories[0] = \"asdfa\"\n categories[2] = \"lalala\"\n dim = Categorical(\n \"yolo\", categories, shape=2, default_value=[\"asdfa\", \"lalala\"]\n )\n assert dim.get_prior_string() == (\n \"choices(['asdfa', 1, 'lalala', 3, 4, 5, 6, 7, 8, 9], \"\n \"shape=2, default_value=['asdfa', 'lalala'])\"\n )", "def default_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"default_values\")", "def get_choices(self, cutoff=None):\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict(\n [\n (\n # This line below is modifed.\n item.pk,\n self.display_value(item),\n )\n for item in queryset\n ]\n )", "def choices(self, choices: Iterable[Tuple[str, str]]):\n try:\n iter(choices)\n except TypeError:\n raise TypeError(\"'choices' isn't a valid iterable\")\n\n apply_choices = []\n for i, (choice_id, choice_label) in enumerate(choices):\n apply_choices.append((str(choice_id), str(choice_label)))\n\n if len(apply_choices) < 2:\n raise ValueError(\"you need to specify at least two choices\")\n\n self._choices = apply_choices\n self.specific.refresh()\n self._selected = 0", "def choose_option(self, state):\n options = [o for o in self.options if o.initiation_set[state] == 1]\n return random.choice(options)", "def setUp(self):\n current_date = date.today()\n name = 'name'\n possible_meals = [Meal(date=current_date, name=name)]\n self.possible_meals_choices = [(possible_meal.id, possible_meal.name)\n for possible_meal in possible_meals]", "def all_options():\n return _OptionRegistry.values()", "def __init__(self, name, attrs={}):\n ChoiceFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n ChoiceFormat.__init__(self, name, attrs)", "async def choices(self, ctx, *, options):\n choices = options.split('-')\n choice = random.choice(choices)\n await ctx.send(f'My choice is\\\"{choice}\\\"')", "def get_chosen_options(user):\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n ocl = eval(user_application.options_selected)\n chosen_options = []\n for oc in ocl:\n chosen_options.append(Option.objects.get(opt_code=int(oc))) \n return chosen_options", "def initialize_options(self):", "def initDefaults(self):\n return _libsbml.Reaction_initDefaults(self)", "def get_options(self):\r\n return self._option_values", "def calendar_choices(self):\n if not self._calendars:\n if self.authenticated:\n default = self.account.schedule().get_default_calendar()\n # {\n # \"default\" : <DEFAULT_CALENDAR>,\n # \"<CALENDAR_NAME>: <CALENDAR>,\n # ...\n # }\n self._calendars = {\n DEFAULT_CALENDAR: default,\n **{\n c.name: c\n for c in self.account.schedule().list_calendars() if c.name != default.name\n }\n }\n\n return self._calendars", "def getOptionsNames(self) -> List[unicode]:\n ...", "def default_args(self) -> Optional[list[str]]:\n _args: list[Arg] = []\n _ctx = self._select(\"defaultArgs\", _args)\n return _ctx.execute_sync(Optional[list[str]])", "def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n # choicesdata = []\n # found_selected_value = False\n # for value, label in self.get_choices():\n # is_selected = value == selected_value\n # if is_selected:\n # found_selected_value = True\n # url = self.build_set_values_url(values=[value])\n # choicesdata.append({\n # 'url': url,\n # 'label': label,\n # 'is_selected': is_selected\n # })\n choicesdata, found_selected_value = self.__make_choicesdata_list(\n choices=self.get_choices(),\n selected_value=self.get_cleaned_value())\n if not found_selected_value and len(choicesdata) > 0:\n selected_index = self.get_default_is_selected_index(choicesdata=choicesdata)\n choicesdata[selected_index]['is_selected'] = True\n return choicesdata", "def available_binary_choices() -> Iterable[str]:\n for name, _ in inspect.getmembers(sys.modules[__name__], inspect.isclass):\n if name.startswith('Binary'):\n yield name", "def initDefaults(self):\n return _libsbml.Event_initDefaults(self)", "def form_CheckboxMultiChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('multiChoice', schemaish.Sequence(schemaish.Integer()))\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['multiChoice'].widget = formish.CheckboxMultiChoice(options)\n form['multiChoice'].default = [2]\n return form", "def default_selection(random, population, args):\r\n return population", "def form_SequenceOfStringsWithDefault(request):\n schema = schemaish.Structure()\n schema.add( 'myList', schemaish.Sequence( schemaish.String() ))\n\n form = formish.Form(schema, 'form')\n form.defaults = {'myList': ['a','b']}\n return form", "def test_model_choices_all_models(self):\n unique_action_admin = UniqueActionAdmin(UniqueAction, self.site)\n\n self.assertFalse(getattr(unique_action_admin, '_model_choices', False))\n\n model_choices = unique_action_admin.model_choices()\n\n self.assertTrue(getattr(unique_action_admin, '_model_choices'))\n self.assertTrue(isinstance(model_choices, list))", "def sel_prep(self):\n sel_blob = []\n for sel in self.blob['options']:\n if self.blob['defaultValue'] == sel['name']:\n sel_blob.append({'value': sel['name'], 'selected': 'true'})\n else:\n sel_blob.append({'value': sel['name'], 'selected': 'false'})\n\n return sel_blob", "def setChoices(self,report):\n\t\tif report is not None:\n\t\t\tbrowser = report[1]['objects']\n\n\t\t\tif browser is not None:\n\t\t\t\tbrowserChoices = list()\n\t\n\t\t\t\t#compute select list\n\t\t\t\tfor b in browser:\n\t\t\t\t\tif \"chrome\" in b['name'].lower():\n\t\t\t\t\t\tformString = constConfig.HISTORY_FORM_CHROME\n\t\t\t\t\telif \"firefox\" in b['name'].lower():\n\t\t\t\t\t\tformString = constConfig.HISTORY_FORM_FF\n\t\t\t\t\telif \"thunderbird\" in b['name'].lower():\n\t\t\t\t\t\tformString = constConfig.HISTORY_FORM_TH\n\n\t\t\t\t\tfor p in b['profiles']:\n\t\t\t\t\t\tformValue = str(formString)+\"_\"+p['profileName']\t\n\t\t\t\t\t\tbrowserChoices.append((formValue,b['name']+\" - \"+p['profileName']))\n\t\t\t\n\t\t\t\tch = forms.ChoiceField(label=\"Profile\",widget=forms.Select(attrs={'class':'form-control'}),choices=browserChoices)\n\t\t\t\tself.fields['choices'] = ch", "def test_default(self):\n for n in range(1, 5):\n for prefix in ['', 'git-', 'gbp-']:\n parser = GbpOptionParser('%scmd%d' % (prefix, n))\n self.assertEqual(parser.config['default_option'], 'default_default1')", "def form_SelectWithOtherChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectWithOtherChoice(options)\n form['mySelect'].default = 2\n return form" ]
[ "0.8790614", "0.8089398", "0.7564882", "0.74508727", "0.70026475", "0.70026475", "0.680855", "0.67124087", "0.6624441", "0.65727", "0.653437", "0.64892524", "0.6431678", "0.6409585", "0.63181144", "0.6240514", "0.6240365", "0.62128824", "0.6163351", "0.6163266", "0.6090951", "0.6066959", "0.6054693", "0.60396624", "0.60294706", "0.60005", "0.5994685", "0.59767413", "0.5959246", "0.592687", "0.59262645", "0.59103084", "0.590184", "0.5882532", "0.5882532", "0.5859008", "0.5836419", "0.58130014", "0.5798692", "0.5789201", "0.5760558", "0.57430315", "0.5737376", "0.5732801", "0.5709087", "0.56912374", "0.56849235", "0.56539017", "0.56511205", "0.5647366", "0.56454474", "0.56372243", "0.56312835", "0.5606162", "0.56021655", "0.5598892", "0.5586636", "0.55855805", "0.5569713", "0.55686575", "0.55556387", "0.554631", "0.5521598", "0.5510379", "0.54975694", "0.54883635", "0.54830474", "0.54726905", "0.54721117", "0.5445676", "0.5418068", "0.5402634", "0.5388768", "0.53883016", "0.5386314", "0.5381717", "0.53741294", "0.53630304", "0.53518015", "0.5349285", "0.5349285", "0.5348316", "0.53463614", "0.53438973", "0.53366566", "0.53365505", "0.53365135", "0.5332077", "0.53207636", "0.5319262", "0.53119504", "0.5300134", "0.5299081", "0.5287518", "0.52822554", "0.5277455", "0.5273564", "0.526702", "0.5266721", "0.5253591" ]
0.83091384
1
Any format, prefix, suffix, html info in attrs dict
def __init__(self, name, attrs={}): TextFormat.__init__(self, name, attrs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def html_attrs(attrs):\n html = \"\"\n for a in attrs.items():\n if a[1]:\n html = html + \"%s=\\\"%s\\\" \"%(a)\n return html", "def attr(*attrs: ATTRIBUTE) -> str:\n return PyHTML.attr(*attrs)", "def attrs(context):\n result = \"\"\n for key, value in context.flatten().items():\n if key not in [\"True\", \"False\", \"None\", \"content\", \"element\"]:\n if \"hx_\" in key:\n key = key.replace(\"_\", \"-\")\n result += f' {key}=\"{value}\"'\n return mark_safe(result)", "def __get_attr_format (self, attrs):\r\n format = { \r\n 'editor': None,\r\n 'min': None,\r\n 'max': None,\r\n 'step': None,\r\n 'subtype': None,\r\n 'flags': None,\r\n 'enums': None\r\n }\r\n\r\n for attr in attrs: \r\n attr_type = attr[\"type\"]\r\n if \"editor\" == attr_type:\r\n format['editor'] = attr[\"value\"] \r\n if \"min\" == attr_type:\r\n format['min'] = attr[\"value\"] \r\n if \"max\" == attr_type:\r\n format['max'] = attr[\"value\"] \r\n if \"default\" == attr_type:\r\n format['default'] = attr[\"value\"] \r\n if \"step\" == attr_type:\r\n format['step'] = attr[\"value\"]\r\n if \"subtype\" == attr_type:\r\n format['subtype'] = attr[\"value\"]\r\n if \"flags\" == attr_type:\r\n format['flags'] = attr['value']\r\n if \"enums\" == attr_type:\r\n format['enums'] = attr['value']\r\n\r\n return format", "def _formatAttributes(self, attr=None, allowed_attrs=None, **kw):\n\n # Merge the attr dict and kw dict into a single attributes\n # dictionary (rewriting any attribute names, extracting\n # namespaces, and merging some values like css classes).\n attributes = {} # dict of key=(namespace,name): value=attribute_value\n if attr:\n for a, v in attr.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n if kw:\n for a, v in kw.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n\n # Add title attribute if missing, but it has an alt.\n if ('html', 'alt') in attributes and ('html', 'title') not in attributes:\n attributes[('html', 'title')] = attributes[('html', 'alt')]\n\n # Force both lang and xml:lang to be present and identical if\n # either exists. The lang takes precedence over xml:lang if\n # both exist.\n #if ('html', 'lang') in attributes:\n # attributes[('xml', 'lang')] = attributes[('html', 'lang')]\n #elif ('xml', 'lang') in attributes:\n # attributes[('html', 'lang')] = attributes[('xml', 'lang')]\n\n # Check all the HTML attributes to see if they are known and\n # allowed. Ignore attributes if in non-HTML namespaces.\n if allowed_attrs:\n for name in [key[1] for key in attributes if key[0] == 'html']:\n if name in _common_attributes or name in allowed_attrs:\n pass\n elif name.startswith('on'):\n pass # Too many event handlers to enumerate, just let them all pass.\n else:\n # Unknown or unallowed attribute.\n err = 'Illegal HTML attribute \"%s\" passed to formatter' % name\n raise ValueError(err)\n\n # Finally, format them all as a single string.\n if attributes:\n # Construct a formatted string containing all attributes\n # with their values escaped. Any html:* namespace\n # attributes drop the namespace prefix. We build this by\n # separating the attributes into three categories:\n #\n # * Those without any namespace (should only be xmlns attributes)\n # * Those in the HTML namespace (we drop the html: prefix for these)\n # * Those in any other non-HTML namespace, including xml:\n\n xmlnslist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if not k[0]]\n htmllist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] == 'html']\n otherlist = ['%s:%s=\"%s\"' % (k[0], k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] and k[0] != 'html']\n\n # Join all these lists together in a space-separated string. Also\n # prefix the whole thing with a space too.\n htmllist.sort()\n otherlist.sort()\n all = [''] + xmlnslist + htmllist + otherlist\n return ' '.join(all)\n return ''", "def get_format_attrs(self, name, field, alt_field_info={}):\n # important_props = ('initial', 'autofocus', 'widget')\n if name in alt_field_info:\n field = deepcopy(field)\n for prop, value in alt_field_info[name].items():\n setattr(field, prop, value)\n initial = field.initial\n initial = initial() if callable(initial) else initial\n attrs, result = {}, []\n if initial and not isinstance(field.widget, Textarea):\n attrs['value'] = str(initial)\n data_val = self.form.data.get(get_html_name(self.form, name), None)\n if data_val not in ('', None):\n attrs['value'] = data_val\n attrs.update(field.widget_attrs(field.widget))\n result = ''.join(f'{key}=\"{val}\" ' for key, val in attrs.items())\n if getattr(field, 'autofocus', None):\n result += 'autofocus '\n if issubclass(self.form.__class__, FormOverrideMixIn):\n # TODO: Expand for actual output when using FormOverrideMixIn, or a sub-class of it.\n result += '%(attrs)s' # content '%(attrs)s'\n else:\n result = '%(attrs)s' + result # '%(attrs)s' content\n return result", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def render_attr(key, value, attr_format='{key}=\"{value}\"'):\n\n if not key or ' ' in key:\n raise InvalidAttribute('Invalid name \"{}\"'.format(key))\n\n if value:\n if type(value) is RawNode:\n value = str(value)\n else:\n value = html.escape(str(value))\n\n return attr_format.format(key=key, value=value)\n\n return key", "def make_attrs(self, mixed):\n if isinstance(mixed, dict):\n return ''.join('%s=\"%s\" ' % (k, v) for k, v in mixed.items())\n return str(mixed)", "def gen_tag_attrs(self, *a, **kw):\n return gen_tag_attrs(self, *a, **kw)", "def attributes(self):\n _attrs = [\"label\"]\n if self.attrs:\n _attrs.append(\"attrs\")\n return _attrs", "def back_to_tag(tag, attrs):\n sol = '<' + tag\n for (prop, val) in attrs:\n sol += ' ' + prop + '=\"' + val + '\"'\n sol += '>'\n return sol", "def add_attrs(value, arg):\n try:\n # Split list on comma\n kv_pairs = arg.split(\",\")\n except ValueError:\n raise template.TemplateSyntaxError(\n \"add_attrs requires as an argument a string in the format 'key:value, key1:value1, key2:value2...'\"\n )\n\n\n # Create dictionary\n html_attrs = dict()\n\n # Clean items and add attribute pairs to dictionary\n for item in kv_pairs:\n item = item.strip()\n k, v = item.split(\":\")\n html_attrs.update({k.strip():v.strip()})\n\n return value.as_widget(attrs=html_attrs)", "def _wrap_attr(attr):\n return '={0},'.format(attr)", "def string_for_attrs(attrs):\n if not attrs: return ''\n return ''.join(' %s=\"%s\"' % (attr, value) for attr, value in attrs)", "def attr(*attrs: ATTRIBUTE) -> str:\n formatted = []\n for attr_ in attrs:\n if isinstance(attr_, str):\n formatted.append(attr_)\n elif isinstance(attr_, tuple) and len(attr_) == 2:\n formatted.append(f'{attr_[0]}=\"{attr_[1]}\"')\n else:\n raise ValueError(f\"Bad attribute: {attr_}\")\n return \" \".join(formatted)", "def attrs(self):\n return self.size, self.propSuffix, self.specified", "def attrs(**kwds):\n\n def decorate(f):\n for k in kwds:\n setattr(f, k, kwds[k])\n return f\n\n return decorate", "def dot_node_attrs(self):\n\n lbl_name = '%s' % self.format_name(True, True, 24)\n lbl_acc = '<font point-size=\"8.0\">%s</font>' % self.format_id()\n label = self.node_label_fmt % (self.url(), self.name,\n lbl_name, lbl_acc)\n\n node_attrs = {'label': label}\n return node_attrs", "def attrs(*attributes):\n return ';'.join([ str(i) for i in attributes ])", "def ATTRIBUTE():\n return \"author\", \"title\", \"publisher\", \"shelf\", \"category\", \"subject\"", "def getAttributeInfoDictionary(attr, format=None):\n format = format or _getDocFormat(attr)\n return {'name': attr.getName(),\n 'doc': renderText(attr.getDoc() or '', format=format)}", "def attrsToString(self, attrs):\n string = \"\"\n # for every attribut\n for attr in attrs:\n # converts its name and value to string and adds this to string\n string += \" {}=\\\"{}\\\"\".format(attr[0], attr[1])\n # no exception!\n print(\"Das Attribut ist zu lang!\") if len(attr) > 2 else None\n return string", "def format_attr(attr: str) -> str:\r\n prefix = query_params[Toml.REMOVE_PREFIX]\r\n suffix = query_params[Toml.REMOVE_SUFFIX]\r\n prefix_len = len(prefix)\r\n suffix_len = len(suffix)\r\n stripped = attr.strip()\r\n if stripped[:prefix_len] == prefix:\r\n stripped = stripped[prefix_len:]\r\n if stripped[-suffix_len:] == suffix:\r\n stripped = stripped[:-suffix_len]\r\n return constcase(stripped).replace('__', '_')", "def handleAttributes(text, parent):\r\n def attributeCallback(match):\r\n parent.set(match.group(1), match.group(2).replace('\\n', ' '))\r\n return ATTR_RE.sub(attributeCallback, text)", "def _attrs(self, element, attrs):\n for attr, val in list(attrs.items()):\n element.setAttribute(attr, val)\n return element", "def date_attrs(name):\n attrs = battrs(name)\n attrs.update({'class': 'form-control datepicker'})\n return attrs", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def extract_attrs(attr_string):\n attributes = {}\n for name, val in FIND_ATTRS.findall(attr_string):\n val = (\n val.replace(\"&lt;\", \"<\")\n .replace(\"&gt;\", \">\")\n .replace(\"&quot;\", '\"')\n .replace(\"&amp;\", \"&\")\n )\n attributes[name] = val\n return attributes", "def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):\n attrs = dict(base_attrs, **kwargs)\n if extra_attrs:\n attrs.update(extra_attrs)\n return attrs", "def get_attrs(foreground, background, style):\n return foreground + (background << 4) + style", "def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def extend_attribute_dictionary(attributedict, ns, name, value):\n\n key = ns, name\n if value is None:\n if key in attributedict:\n del attributedict[key]\n else:\n if ns == 'html' and key in attributedict:\n if name == 'class':\n # CSS classes are appended by space-separated list\n value = attributedict[key] + ' ' + value\n elif name == 'style':\n # CSS styles are appended by semicolon-separated rules list\n value = attributedict[key] + '; ' + value\n elif name in _html_attribute_boolflags:\n # All attributes must have a value. According to XHTML those\n # traditionally used as flags should have their value set to\n # the same as the attribute name.\n value = name\n attributedict[key] = value", "def _attribs(self, name=None, description=None):\n a = {}\n if name:\n a['name'] = name\n if description:\n a['description'] = description\n return a", "def _base_attrs(self, service):\n keys = ['name', 'desc', 'url']\n return {name:getattr(service, name, None) for name in keys}", "def addattrs(field, my_attrs):\n my_attrs = my_attrs.split(',')\n my_attrs = dict([attr.split('=') for attr in my_attrs])\n return field.as_widget(attrs=my_attrs)", "def attrs(xml):\r\n return lxml.html.fromstring(xml).attrib", "def parse_tag_attrs(tag_str, options_d=None, font_d=None, case=\"\", **kwargs):\n attr_b = kwargs.pop(\"attr\", \"\")\n auto_b = kwargs.pop(\"auto\", False)\n font_d = kwargs.pop(\"font_d\", font_d or {})\n options_d = kwargs.pop(\"options_d\", options_d or {})\n case = kwargs.pop(\"case\", case)\n widget = kwargs.pop(\"widget\", None)\n text_w = kwargs.pop(text_s, None)\n bad_opts = []\n # INTs: height repeatdelay repeatinterval underline width; size fun fov\n for keyval in split_attrs(tag_str):\n if \"=\" in keyval:\n key, val = keyval.split(\"=\")\n val = unquote(val)\n elif keyval:\n key, val = keyval, None\n else:\n continue\n key = key.lower()\n key2, key3, key4 = key[:2], key[:3], key[:4]\n lowval = val.lower() if val else val\n key = unalias(key)\n kalias = alias(key)\n if val == \"None\": # in ('False', 'None') #\n pass\n elif key3 in (\n bg_s,\n background_s[:3],\n fg_s,\n foreground_s[:3],\n ) or kalias in (bg_s, fg_s):\n options_d.update(**{key: val})\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n options_d.update(**{key: val})\n if auto_b and compound_s not in options_d:\n options_d.update(compound=tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],) or kalias == bd_s:\n options_d.update(borderwidth=val)\n elif key4 in (command_s[:4], compound_s[:4],) or kalias in (\n command_as,\n compound_as,\n ):\n options_d.update(**{key: val})\n elif (\n key2 in (height_s[:2], width_s[:2])\n or key3 in (repeatdelay_s[:3], repeatinterval_s[:3])\n or kalias\n in (height_as, width_as, repeatdelay_as, repeatinterval_as)\n ):\n options_d.update(**{key: int(val)})\n elif (\n key2 in (cursor_s[:2],)\n or key3 == font_s[:3]\n or kalias in (cursor_as, font_as)\n ):\n options_d.update(**{key: val})\n elif key2 in (\"r\", relief_s[:2],) or kalias == relief_as:\n options_d.update(relief=val)\n if auto_b and borderwidth_s not in options_d and val != tk.FLAT:\n options_d.update(borderwidth=str(1))\n elif key2 == underline_s[:2] or kalias == underline_as:\n options_d.update(underline=-1 if val is None else int(val))\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ) or kalias in (selectbackground_as, selectforeground_as):\n options_d.update(**{key: val})\n # special for fonts\n elif key2 in (family_s[:2],) or kalias == family_as:\n font_d[family_s] = val\n elif key2 in (size_s[:2],) or kalias == size_as:\n try:\n font_d[size_s] = int(val)\n except ValueError:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: ERROR Setting Font Size to %r\" % val,\n Raise=True,\n )\n elif key3 in (bold_as, tk_font.BOLD[:3]) or kalias == bold_as:\n font_d[weight_s] = (\n tk_font.BOLD\n if str(val) not in (\"0\", \"False\",)\n else tk_font.NORMAL\n )\n elif key2 in (weight_s[:2],) or kalias == weight_as:\n font_d[weight_s] = val\n elif key2 in (italic_as, tk_font.ITALIC[:2]) or kalias == italic_as:\n font_d[slant_s] = (\n tk_font.ITALIC\n if str(val) not in (\"0\", \"False\",)\n else tk_font.ROMAN\n )\n elif key2 in (slant_s[:2],) or kalias == slant_as:\n font_d[slant_s] = val\n elif (\n key3 in (funderline_as, funderline_s[:3])\n or kalias == funderline_as\n ):\n font_d[underline_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n elif (\n key3 in (foverstrike_as, foverstrike_s[:3])\n or kalias == foverstrike_as\n ):\n font_d[overstrike_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n # special \"case\" implementation\n elif key3 in (case_s[:3],) or kalias == case_as:\n for s in (upper_s, capitalize_s, lower_s, title_s, swapcase_s):\n if s.startswith(lowval):\n case = s if s != capitalize_s else upper_s\n break\n elif (\n key2 == upper_s[:2]\n or key3 in (capitalize_s[:3],)\n or kalias in (upper_as, capitalize_as)\n ):\n if str(val) not in (\"0\", \"False\",):\n case = upper_s\n elif key2 in (lower_s[:2],) or kalias == lower_as:\n if str(val) not in (\"0\", \"False\",):\n case = lower_s\n elif key2 == title_s[:2] or kalias == title_as:\n if str(val) not in (\"0\", \"False\",):\n case = title_s\n elif key2 == swapcase_s[:2] or kalias == swapcase_as:\n if str(val) not in (\"0\", \"False\",):\n case = swapcase_s\n elif key in ():\n bad_opts.append((key, val))\n else:\n options_d.update(**{key: val})\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n if attr_b:\n return (\n case\n if attr_b == case_s\n else options_d.get(attr_b, font_d.get(attr_b))\n )\n return options_d, font_d, case", "def gen_tag_attrs(widget=None, options_d=None, font=None, case=None, **kwargs):\n auto_b = kwargs.get(\"auto\", False)\n case = kwargs.get(case_s, case)\n extend_b = kwargs.get(\"extend\", False)\n font = kwargs.pop(\"font\", font or {})\n index_i = kwargs.pop(\"index\", None)\n kmode_s = kwargs.get(\"kmode\", \"\") # a=alias, o=options, ''=unchanged\n options_d = kwargs.pop(\"options\", options_d or {})\n pare_b = kwargs.get(\"pare\", True)\n widget = kwargs.pop(\"widget\", widget)\n text_w = kwargs.get(text_s, None)\n recurse_b = kwargs.pop(\"recurse\", widget and isinstance(widget, TTWidget))\n fmt_s = \"\"\n font_d = {}\n w_font_d, w_options_d = {}, {}\n if index_i is not None and widget is None:\n raise Exception(\"Cannot set 'index' when 'widget' is None\")\n if widget: # and isinstance(widget, TTWidget): #\n excludes_t = () if widget.emulation_b else ()\n w_options_d = {\n k: v[-1]\n for k, v in widget.config().items()\n if len(v) == 5 and str(v[-1]) != str(v[-2]) and k not in excludes_t\n }\n try:\n w_options_d[case_s] = widget.case\n except AttributeError:\n pass\n w_font = widget.cget(font_s) # w_options_d.pop(font_s, None)\n w_font_d = get_font_dict(w_font) if w_font else {}\n if pare_b and w_font_d:\n def_w_font = widget.config(font_s)[-2]\n def_w_font_d = get_font_dict(def_w_font)\n w_font_d = pare_dict(w_font_d, def_w_font_d)\n if font:\n if isinstance(font, str):\n try:\n font = tk_font.nametofont(font)\n except tk.TclError:\n pass\n elif type(font) in (list, tuple):\n font = tk_font.Font(font=font)\n if isinstance(font, tk_font.Font):\n font = font.actual()\n if isinstance(font, dict):\n font_d = font\n if case: # is not None:\n options_d = _merge_dicts(options_d, dict(case=case))\n d = _merge_dicts(\n w_options_d,\n convert_font_dict_to_ttoptions_dict(w_font_d),\n options_d,\n convert_font_dict_to_ttoptions_dict(font_d),\n kwargs,\n )\n bad_opts = []\n for key, val in d.items():\n key = key.lower()\n if key in (\"auto\", \"extend\", \"kmode\", \"pare\",): # text_s, ): #\n continue\n key2, key3, key4 = key[:2], key[:3], key[:4]\n kalias = alias(key)\n koption = unalias(key)\n if kmode_s:\n if kmode_s[0] == \"a\": # alias\n keyout = kalias\n kfunc = alias\n auto_cpd, auto_bd = compound_as, bd_s\n elif kmode_s[0] == \"o\": # option\n keyout = koption\n kfunc = unalias\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n else:\n keyout = key\n kfunc = str\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n if val:\n val = quote(val)\n if (\n key3 in (bg_s, background_s[:3], fg_s, foreground_s[:3])\n or key2 == underline_s[:2]\n or kalias in (bg_s, fg_s, underline_as)\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_cpd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_cpd, tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],):\n if \"%s=%s \" % (auto_bd, 1) in fmt_s:\n if val != 1:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_bd, 1), \"%s=%s \" % (keyout, val)\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key4 in (compound_s[:4],) or kalias == compound_as:\n if \"%s=%s \" % (auto_cpd, tk.CENTER) in fmt_s:\n if val != tk.CENTER:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_cpd, tk.CENTER),\n \"%s=%s \" % (keyout, val),\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == cursor_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == font_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, get_named_font(val))\n elif key2 in (relief_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_bd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_bd, 1)\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sbd_s,\n selectborderwidth_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n # special for fonts\n elif key2 in (family_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (size_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (weight_s[:2],):\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.BOLD),\n 1\n if isinstance(val, str) and val.lower() == tk_font.BOLD\n else 0,\n )\n elif key2 == slant_s[:2]:\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.ITALIC),\n 1\n if isinstance(val, str) and val.lower() == tk_font.ITALIC\n else 0,\n )\n elif key3 in (funderline_as, funderline_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(funderline_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n elif key3 in (foverstrike_as, foverstrike_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(foverstrike_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n # special \"case\" implementation\n elif key3 == case_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(case_s), val)\n elif key2 == upper_s[:2] or key3 == capitalize_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(upper_s), val)\n elif key2 in (lower_s[:2], title_s[:2], swapcase_s[:2]):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key in ():\n bad_opts.append((key, val))\n elif key in (text_s, text_as):\n if extend_b or widget:\n fmt_s += \"%s=%s \" % (keyout, val)\n else:\n # bad_opts.append((key, val))\n fmt_s += \"%s=%s \" % (keyout, val)\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n fmt = fmt_s.strip()\n if widget and isinstance(widget, TTWidget) and recurse_b:\n fmt = [\n fmt,\n ]\n for _, gathering in widget._get_kids(items=True):\n child = gathering[\"label\"]\n case = gathering.get(case_s, \"\")\n kid_options = {\n k: v[-1]\n for k, v in child.config().items()\n if len(v) == 5\n and str(v[-1]) != str(v[-2])\n and (k, v[-1]) not in w_options_d.items()\n and not (k in label_override_d and str(v[-1]) == \"0\")\n } #\n cf = kid_options.pop(font_s, None)\n cdf = child.config(font_s)[-2]\n if cf != cdf:\n c_font_d = pare_dict(get_font_dict(cf), get_font_dict(cdf))\n else:\n c_font_d = {}\n if case:\n kid_options.update(case=case)\n fmt.append(\n gen_tag_attrs(options=kid_options, font=c_font_d, **kwargs)\n )\n return fmt if index_i is None else fmt[index_i]", "def format_attributes(attributes):\n return ';'.join([k + '=' + v for k, v in attributes.items()])", "def handle_meta(self, tag, attrs):\n ad = {}\n for tup in attrs:\n ad[tup[0]] = tup[1]\n if 'name' in ad.keys() \\\n and 'keywords' == ad['name'] \\\n and 'content' in ad.keys():\n self.filetype = ad['content']\n if 'name' in ad.keys() \\\n and 'description' == ad['name']:\n self.description = 'present'\n if 'charset' in ad.keys():\n self.charset = 'present'", "def add_attributes(self, attrs):\n self.attrs.add_container(attrs)", "def set_attrs(dict, elem, attrs):\n for attr in attrs:\n if attr in elem.keys():\n dict[attr] = elem.get(attr)", "def __init__(self, attrs: Dict[str, Any]) -> None:\n self.attrs = attrs", "def prepare_node_attrs(self):", "def get_attribute_data(self, attrs):\n return {\n 'id': attrs['data-id'],\n }", "def get_attrs(self):\n req_attrv = self._ptr.contents.attrv\n attrs = {}\n if bool(req_attrv):\n i = 0\n while 1:\n s = bytestostr(req_attrv[i])\n i += 1\n if s == None:\n break\n try:\n k, v = s.split(\"=\", 1)\n attrs[k] = v\n except:\n pass\n return attrs", "def attkey_to_SVG_attribs(self,k):\n atts= k.split('@')\n o= ''\n acodes= {'C':'stroke','W':'stroke-width','S':'stroke-dasharray','O':'stroke-opacity'}\n for a in atts:\n if a[0] in acodes:\n o+= '%s=\"%s\" ' % (acodes[a[0]],a[1:])\n# elif a[0] == 'S': # Maybe do something special like this.\n# o+= 'stroke-dasharray=\"%\" ' % a[1:]\n return o", "def add_attributes(self, attrs):\n self.attrs.add_attributes(attrs)", "def fix_attributes(string):\n defs = re.compile('<dl class=\"attribute\">(?P<descrip>.*?)</dl>',flags=re.DOTALL)\n name = re.compile('<code class=\"descclassname\">(?P<name>[^<]*)</code>')\n prefix = ''\n remain = string\n \n match = defs.search(remain)\n while match:\n prefix += remain[:match.start(1)]\n prefsub = ''\n remnsub = remain[match.start(1):match.end(1)]\n descrip = name.search(remnsub)\n if descrip:\n prefix += remnsub[:descrip.start()]\n prefix += remnsub[descrip.end():]\n prefix += remain[match.end(1):match.end(0)]\n else:\n prefix += remain[match.start(1):match.end(0)]\n remain = remain[match.end(0):]\n match = defs.search(remain)\n return prefix+remain", "def a_attr_dict (self) :\n return dict (href = self.abs_href)", "def attributes(table,attrs): \n if isinstance(table,Table):\n table.html_attributes = attrs\n return table", "def read_attribs(self):\n\n attribs = {}\n while self.index < self.length:\n self.ignore_whitespaces()\n if self.xtext[self.index] == '>':\n break\n name = self.read_until('=')\n self.index += 1\n self.read_until('\"')\n self.index += 1\n value = self.read_until('\"')\n self.index += 1\n\n attribs[name] = value\n\n return attribs", "def extensible_attributes():\n return 'extensibleattributedef?'", "def transform_attributes(attrs):\n transformed = {}\n for key, value in attrs.items():\n if key in [\"raw_message\", \"text\"]:\n transformed[\"raw_content\"] = value\n elif key in [\"diaspora_handle\", \"sender_handle\", \"author\"]:\n transformed[\"handle\"] = value\n elif key == \"recipient_handle\":\n transformed[\"target_handle\"] = value\n elif key == \"parent_guid\":\n transformed[\"target_guid\"] = value\n elif key == \"first_name\":\n transformed[\"name\"] = value\n elif key == \"image_url\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"large\"] = value\n elif key == \"image_url_small\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"small\"] = value\n elif key == \"image_url_medium\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"medium\"] = value\n elif key == \"tag_string\":\n transformed[\"tag_list\"] = value.replace(\"#\", \"\").split(\" \")\n elif key == \"bio\":\n transformed[\"raw_content\"] = value\n elif key == \"searchable\":\n transformed[\"public\"] = True if value == \"true\" else False\n elif key == \"target_type\":\n transformed[\"entity_type\"] = DiasporaRetraction.entity_type_from_remote(value)\n elif key == \"remote_photo_path\":\n transformed[\"remote_path\"] = value\n elif key == \"remote_photo_name\":\n transformed[\"remote_name\"] = value\n elif key == \"status_message_guid\":\n transformed[\"linked_guid\"] = value\n transformed[\"linked_type\"] = \"Post\"\n elif key in BOOLEAN_KEYS:\n transformed[key] = True if value == \"true\" else False\n elif key in DATETIME_KEYS:\n try:\n # New style timestamps since in protocol 0.1.6\n transformed[key] = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n # Legacy style timestamps\n transformed[key] = datetime.strptime(value, \"%Y-%m-%d %H:%M:%S %Z\")\n elif key in INTEGER_KEYS:\n transformed[key] = int(value)\n else:\n transformed[key] = value or \"\"\n return transformed", "def tag(func):\n @functools.wraps(func)\n def wrapper(**kwargs):\n\n name = func.__name__\n\n if kwargs:\n \n try:\n\n check_text = kwargs['text']\n del kwargs['text']\n \n \n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n open('index.html', 'a+').write(f\"{check_text}\")\n open('index.html', 'a+').write(f\"</{name}>\")\n\n except KeyError:\n\n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n else:\n\n open('index.html', 'a+').write(f\"\\n<{name}>\")\n\n\n func(**kwargs)\n \n return wrapper", "def getAttrsDict(attrs):\r\n attrsDict = json.loads(re.sub('/\\\"(?!(,\\s\"|}))','\\\\\"',attrs).replace(\"\\t\",\" \").replace(\"\\n\",\" \")) if len(attrs)>0 else {}\r\n return attrsDict", "def parse_tag_attrs(\n self, tags_str, options_d=None, font_d=None, case=\"\", **kwargs\n ):\n return parse_tag_attrs(\n tags_str,\n options_d,\n font_d,\n case,\n widget=self,\n text=getattr(self, \"debug_text\", None),\n **kwargs\n )", "def _get_annotation_data_attr(self, index, el):\r\n\r\n data_attrs = {}\r\n attrs_map = {\r\n 'body': 'data-comment-body',\r\n 'title': 'data-comment-title',\r\n 'problem': 'data-problem-id'\r\n }\r\n\r\n for xml_key in attrs_map.keys():\r\n if xml_key in el.attrib:\r\n value = el.get(xml_key, '')\r\n html_key = attrs_map[xml_key]\r\n data_attrs[html_key] = {'value': value, '_delete': xml_key}\r\n\r\n return data_attrs", "def get_attributes(self) -> Dict[str, str]:\n pass", "def transform(attrs: dict) -> dict:\n\n pass", "def get_html_element_attributes(self):\n html_element_attributes = {\n 'class': self.css_classes or False, # Fall back to false to avoid class=\"\"\n }\n if self.should_render_as_link():\n html_element_attributes['href'] = self.url\n return html_element_attributes", "def create_descr(self, attr_name):", "def _checkTableAttr(self, attrs, prefix):\n if not attrs:\n return {}\n\n result = {}\n s = [] # we collect synthesized style in s\n for key, val in attrs.items():\n # Ignore keys that don't start with prefix\n if prefix and key[:len(prefix)] != prefix:\n continue\n key = key[len(prefix):]\n val = val.strip('\"')\n # remove invalid attrs from dict and synthesize style\n if key == 'width':\n s.append(\"width: %s\" % val)\n elif key == 'height':\n s.append(\"height: %s\" % val)\n elif key == 'bgcolor':\n s.append(\"background-color: %s\" % val)\n elif key == 'align':\n s.append(\"text-align: %s\" % val)\n elif key == 'valign':\n s.append(\"vertical-align: %s\" % val)\n # Ignore unknown keys\n if key not in self._allowed_table_attrs[prefix]:\n continue\n result[key] = val\n st = result.get('style', '').split(';')\n st = '; '.join(st + s)\n st = st.strip(';')\n st = st.strip()\n if not st:\n try:\n del result['style'] # avoid empty style attr\n except:\n pass\n else:\n result['style'] = st\n #logging.debug(\"_checkTableAttr returns %r\" % result)\n return result", "def set_attrs(self, username, attrs):\n pass", "def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,\r\n prettyPrint=False, indentLevel=0):\r\n\r\n encodedName = self.toEncoding(self.name, encoding)\r\n\r\n attrs = []\r\n if self.attrs:\r\n for key, val in self.attrs:\r\n fmt = '%s=\"%s\"'\r\n if isString(val):\r\n if self.containsSubstitutions and '%SOUP-ENCODING%' in val:\r\n val = self.substituteEncoding(val, encoding)\r\n\r\n # The attribute value either:\r\n #\r\n # * Contains no embedded double quotes or single quotes.\r\n # No problem: we enclose it in double quotes.\r\n # * Contains embedded single quotes. No problem:\r\n # double quotes work here too.\r\n # * Contains embedded double quotes. No problem:\r\n # we enclose it in single quotes.\r\n # * Embeds both single _and_ double quotes. This\r\n # can't happen naturally, but it can happen if\r\n # you modify an attribute value after parsing\r\n # the document. Now we have a bit of a\r\n # problem. We solve it by enclosing the\r\n # attribute in single quotes, and escaping any\r\n # embedded single quotes to XML entities.\r\n if '\"' in val:\r\n fmt = \"%s='%s'\"\r\n if \"'\" in val:\r\n # TODO: replace with apos when\r\n # appropriate.\r\n val = val.replace(\"'\", \"&squot;\")\r\n\r\n # Now we're okay w/r/t quotes. But the attribute\r\n # value might also contain angle brackets, or\r\n # ampersands that aren't part of entities. We need\r\n # to escape those to XML entities too.\r\n val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)\r\n\r\n attrs.append(fmt % (self.toEncoding(key, encoding),\r\n self.toEncoding(val, encoding)))\r\n close = ''\r\n closeTag = ''\r\n if self.isSelfClosing:\r\n close = ' /'\r\n else:\r\n closeTag = '</%s>' % encodedName\r\n\r\n indentTag, indentContents = 0, 0\r\n if prettyPrint:\r\n indentTag = indentLevel\r\n space = (' ' * (indentTag-1))\r\n indentContents = indentTag + 1\r\n contents = self.renderContents(encoding, prettyPrint, indentContents)\r\n if self.hidden:\r\n s = contents\r\n else:\r\n s = []\r\n attributeString = ''\r\n if attrs:\r\n attributeString = ' ' + ' '.join(attrs)\r\n if prettyPrint:\r\n s.append(space)\r\n s.append('<%s%s%s>' % (encodedName, attributeString, close))\r\n if prettyPrint:\r\n s.append(\"\\n\")\r\n s.append(contents)\r\n if prettyPrint and contents and contents[-1] != \"\\n\":\r\n s.append(\"\\n\")\r\n if prettyPrint and closeTag:\r\n s.append(space)\r\n s.append(closeTag)\r\n if prettyPrint and closeTag and self.nextSibling:\r\n s.append(\"\\n\")\r\n s = ''.join(s)\r\n return s", "def attributes(self):\n _attrs = [\"type\", \"name\", \"value\"]\n if self.confidence is not None:\n _attrs.append(\"confidence\")\n if self.constant:\n _attrs.append(\"constant\")\n if self.tags:\n _attrs.append(\"tags\")\n\n return _attrs", "def Attribute(name, value=None):\r\n if value:\r\n return '{}=\"{}\"'.format(name, value)\r\n else:\r\n return ''", "def convert_attributes(cls, attrs):\n return {}", "def get_switched_form_field_attrs(self, prefix, input_type, name):\n attributes = {'class': 'switched', 'data-switch-on': prefix + 'field'}\n attributes['data-' + prefix + 'field-' + input_type] = name\n return attributes", "def AttributeString(self) -> str:", "def AttributeString(self) -> str:", "def proc_attr(inp):\n dic = {}\n for att in inp.attrs.keys():\n if getattr(inp.attrs[att], \"dtype\", None) is None:\n dic[att] = inp.attrs[att]\n elif inp.attrs[att].dtype.char == 'S':\n dic[att] = [\n x.strip() for x in inp.attrs[att].tostring().decode('ascii').split(',')\n ]\n else:\n dic[att] = (\n inp.attrs[att][0]\n if isinstance(inp.attrs[att],np.ndarray) and\n inp.attrs[att].size==1\n else inp.attrs[att]\n )\n return dic\n pass", "def add_attr(self, section_name: str, attr_name: str, value: str) -> None:\n pass", "def _get_attrs_symbols():\n return {\n 'True', 'False', 'None', # those are identifiers in Python 2.7\n 'self',\n 'parent',\n 'id',\n 'uid',\n 'context',\n 'context_today',\n 'active_id',\n 'active_ids',\n 'allowed_company_ids',\n 'current_company_id',\n 'active_model',\n 'time',\n 'datetime',\n 'relativedelta',\n 'current_date',\n 'abs',\n 'len',\n 'bool',\n 'float',\n 'str',\n 'unicode',\n }", "def widget_attrs(self, widget):\n\n attrs = super(RelateField, self).widget_attrs(widget)\n\n attrs.update({'content_type': self.content_types})\n\n return attrs", "def attributes(doc, header, renderer=Attribute, item_class=DefinitionItem):\n items = doc.extract_items(item_class)\n lines = []\n renderer = renderer()\n for item in items:\n renderer.item = item\n lines += renderer.to_rst()\n lines.append('')\n return lines", "def info_from_entry(self, entry):\n info = super().info_from_entry(entry)\n info[ATTR_NAME] = info[ATTR_PROPERTIES]['Name'].replace('\\xa0', ' ')\n return info", "def img(self, **kwargs):\n attrs = ''\n for item in kwargs.items():\n if not item[0] in IMGATTRS:\n raise AttributeError, 'Invalid img tag attribute: %s'%item[0]\n attrs += '%s=\"%s\" '%item\n return '<img src=\"%s\" %s>'%(str(self),attrs)", "def gen_tag_attrs(self, *a, **kw):\n if kw.get(\"widget\", sentinel) is not None:\n raise Exception(\n \"TTToolTip.gen_tag_attrs(): 'widget' keyword must be set\"\n \" to None\"\n )\n return gen_tag_attrs(None, *a, **kw)", "def init_attrs(self):\n raise NotImplementedError", "def _arg_attr(identifier, attr1, attr2):\n return attr1 if identifier.startswith('t') else attr2", "def _get_var_attrs(var):\n\n generic_dict = {'instrument': '', 'valid_range': (-1e+35,1e+35),\n 'missing_value': -9999, 'height': '',\n 'standard_name': '', 'group_name': '',\n 'serial_number': ''}\n\n generic_dict.update(attrs_dict[var])\n return generic_dict", "def parseAttrLine(line):\n\tpre, post = line.strip().split(':')\n\tnumber, attr = pre.strip().split('.')\n\tattr = attr.strip().replace('%','').replace(' ', '-')\n\tvals = [clean(x) for x in post.strip().strip('.').split(',')]\n\treturn {'num':int(number), 'attr':clean(attr), 'vals':vals}", "def _style_to_basic_html_attributes(self, element, style_content,\n force=False):\n if style_content.count('}') and \\\n style_content.count('{') == style_content.count('{'):\n style_content = style_content.split('}')[0][1:]\n\n attributes = {}\n for rule in style_content.split(';'):\n split = rule.split(':')\n if len(split) != 2:\n continue\n key = split[0].strip()\n value = split[1]\n\n if key == 'text-align':\n attributes['align'] = value.strip()\n elif key == 'background-color':\n attributes['bgcolor'] = value.strip()\n elif key == 'width' or key == 'height':\n value = value.strip()\n if value.endswith('px'):\n value = value[:-2]\n attributes[key] = value\n\n for key, value in list(attributes.items()):\n if key in element.attrib and not force or key in self.disable_basic_attributes:\n # already set, don't dare to overwrite\n continue\n element.attrib[key] = value", "def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}", "def domAttributesToString( node ):\n strOut = \"node has %d attribute(s):\\n\" % node.attributes.length;\n for i in range(node.attributes.length):\n attr = node.attributes.item(i);\n strOut += \"- %s:'%s'\\n\" % (attr.name, attr.value );\n return strOut;", "def dict_with_attrs2(*args):\n class CustomDict(object):\n __slots__ = args\n __dict__ = {}\n\n def __init__(self, *args, **kwargs):\n super(CustomDict, self).__init__()\n if args:\n self.__dict__.update(*args)\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __getattr__(self, name):\n return self.__dict__[name]\n\n return CustomDict", "def attributes(self):\n attrs_ = [\"name\", \"type\"]\n if self.exclusive:\n attrs_.append(\"exclusive\")\n if self.default is not None:\n attrs_.append(\"default\")\n\n return attrs_", "def replace_tag_attributes(code_attrs, tag, tag_attrs):\n\n new_attrs = code_attrs.copy()\n for key, value in tag_attrs.items():\n if key in new_attrs:\n new_attrs[key] = new_attrs[key].replace(tag, value)\n\n return new_attrs", "def _iterattrs(self, handle=\"\"):\n if not handle:\n handle = self.handle\n attr = gv.firstattr(handle)\n while gv.ok(attr):\n yield gv.nameof(attr), decode_page(gv.getv(handle, attr))\n attr = gv.nextattr(handle, attr)", "def as_html(self):\n return mark_safe(\" \".join([\n self.attribute_template % (k, escape(v if not callable(v) else v()))\n for k, v in six.iteritems(self) if not v in EMPTY_VALUES]))", "def strpatt(self, name):\n return name.replace(\"att.\", \"\")", "def format_link(attrs: Dict[tuple, str], new: bool = False):\n try:\n p = urlparse(attrs[(None, 'href')])\n except KeyError:\n # no href, probably an anchor\n return attrs\n\n if not any([p.scheme, p.netloc, p.path]) and p.fragment:\n # the link isn't going anywhere, probably a fragment link\n return attrs\n\n c = urlparse(settings.SITE_URL)\n if p.netloc != c.netloc:\n # link is external - secure and mark\n attrs[(None, 'target')] = '_blank'\n attrs[(None, 'class')] = attrs.get((None, 'class'), '') + ' external'\n attrs[(None, 'rel')] = 'nofollow noopener noreferrer'\n\n return attrs", "def extractAttrs(obj, justLabel=False, dictName=''):\n return extractAttrsCore(obj, {}, justLabel, dictName)", "def parseAttrs(self,attrs,date_type):\n\tattrs=copy.copy(attrs) #make sure we don't change user/group attributes\n \tattr_holders=self.getAttrHolders(attrs)\n\tmap(lambda x:x.setDateType(date_type),attr_holders)\n\tmap(lambda x:attrs.update(x.getParsedDic()),attr_holders)\n\treturn attrs", "def add_attributes(self, attrs):\n for attr in attrs:\n self.add_attribute(attr)", "def _parse_attr(self, attr_proto):\n attrs = {}\n for a in attr_proto:\n for f in ['f', 'i', 's']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['floats', 'ints', 'strings']:\n if list(getattr(a, f)):\n assert a.name not in attrs, \"Only one type of attr is allowed\"\n attrs[a.name] = tuple(getattr(a, f))\n for f in ['t', 'g']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['tensors', 'graphs']:\n if list(getattr(a, f)):\n raise NotImplementedError(\"Filed {} is not supported in mxnet.\".format(f))\n if a.name not in attrs:\n raise ValueError(\"Cannot parse attribute: \\n{}\\n.\".format(a))\n return attrs" ]
[ "0.735201", "0.6754294", "0.67166066", "0.67071074", "0.66780305", "0.65807486", "0.6522693", "0.6522693", "0.65187657", "0.6471306", "0.6269984", "0.62653935", "0.6153201", "0.6090701", "0.60323846", "0.60278016", "0.6011661", "0.60042846", "0.59841794", "0.5941162", "0.59205276", "0.5918955", "0.59121054", "0.5903962", "0.5884743", "0.5876164", "0.5857109", "0.5851559", "0.583173", "0.58274394", "0.5816038", "0.58061635", "0.5784312", "0.5755998", "0.5755998", "0.57360405", "0.57051307", "0.5701552", "0.5687975", "0.5650812", "0.5618766", "0.561154", "0.5605911", "0.56030387", "0.5602799", "0.55926436", "0.5587559", "0.5571399", "0.5567558", "0.55631375", "0.555545", "0.5550559", "0.55490625", "0.55470836", "0.55410224", "0.5519966", "0.55098814", "0.5492064", "0.547102", "0.5470936", "0.54692423", "0.5467515", "0.54661024", "0.54518676", "0.54405665", "0.5438651", "0.54003173", "0.5388153", "0.5382598", "0.5375904", "0.5375076", "0.53706104", "0.5359634", "0.5354708", "0.5354708", "0.5331472", "0.5324531", "0.53227526", "0.5316361", "0.5309617", "0.5308968", "0.53067", "0.5306182", "0.5299369", "0.52990687", "0.5287107", "0.52791494", "0.5277907", "0.5276578", "0.52742803", "0.5270845", "0.52608305", "0.52524847", "0.5244876", "0.5239417", "0.5234171", "0.5224983", "0.5215326", "0.521457", "0.5212088", "0.5203955" ]
0.0
-1
Called by base init, after class change or format text change
def initFormat(self): self.formatList = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initFormat(self):\n pass", "def init_text(self):\n d = self.declaration\n if d.text:\n self.set_text(d.text)\n if d.text_color:\n self.set_text_color(d.text_color)\n if d.text_alignment:\n self.set_text_alignment(d.text_alignment)\n if d.font_family or d.text_size:\n self.refresh_font()\n if hasattr(d, 'max_lines') and d.max_lines:\n self.set_max_lines(d.max_lines)", "def initFormat(self):\n self.formatList = self.splitText(self.format)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, text):\n\n self.text = text", "def __init__(self, text):\n self.text = text", "def __init__(self, text):\n self.text = text", "def __init__(self):\n self.text = ''", "def set_text(self):\n pass", "def post_init(self):\n\t\tpass", "def init_widget(self):\n super(UiKitTextView, self).init_widget()\n self.init_text()", "def __init__(self,\n text: str) -> None:\n\n super().__init__(text)", "def initFormat(self):\n self.html = True", "def initFormat(self):\n self.html = True", "def _post_init(self):\n pass", "def __post_init__(self):\n pass", "def initWidgets(self):\n self.lambdtext.setText(str(self.lambd))\n self.ptext.setText(str(self.p))", "def done_adding_strings(self):\n #placeholder in case there's some additional init we need to do.\n pass", "def done_adding_strings(self):\n #placeholder in case there's some additional init we need to do.\n pass", "def _init_display(self):\n raise NotImplementedError", "def __init__(self):\n super(Command, self).__init__()\n self.style.TITLE = self.style.SQL_FIELD\n self.style.STEP = self.style.SQL_COLTYPE\n self.style.ITEM = self.style.HTTP_INFO\n disconnect_objectapp_signals()", "def __init__(self, font='mediumbold'):\n\tself.set_font(font)", "def __init__(self):\r\n self.label = \"Bulk Layout Text Replace\"\r\n self.alias = \" Jake's Toolbox Alias Property True\"\r\n self.description = \"\"\r\n self.canRunInBackground = False", "def __post_init__(self):\n super().__post_init__()", "def __init__(self):\n super(Command, self).__init__()\n self.style.TITLE = self.style.SQL_FIELD\n self.style.STEP = self.style.SQL_COLTYPE\n self.style.ITEM = self.style.HTTP_INFO\n disconnect_gstudio_signals()", "def after_parsing(self):", "def __init__(self, as_text=False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.as_text = as_text", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def on_origEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()\n self.__updateTranslateButton()", "def after_init(self) -> None:\n if self.options.format.lower() != \"default_notebook\":\n self.error_format = self.options.format\n if not hasattr(self, \"color\"):\n self.color = True", "def afterInit(self):", "def post_init(self, msg='hello'):\n print(\"post init ! height=%s, color=%s, msg=%s\" % (self.height, self.color, msg))\n self.non_field_attr = msg", "def __init__(self, name, time, text):\n pass", "def init(self):", "def init(self):", "def __init__(self,txt=u'',unicodeEncoding='utf-8',verbose=False,tagID=0):\n # __document capture the document level structure\n # for each sentence and then put in the archives when the next sentence\n # is processed\n super(ConTextMarkup,self).__init__(__txt=None,__rawTxt=txt,\n __SCOPEUPDATED=False,__VERBOSE=verbose,\n __tagID=tagID,\n __unicodeEncoding=unicodeEncoding)\n self.__cleanText()", "def __init__(self):\n\t\t# Setup fonts\n\t\tself.large_font = self._get_font(1,Annotator.THICK)\n\t\tself.large_font_outline = self._get_font(1,Annotator.THICK + Annotator.BORDER)\n\t\t\n\t\tself.small_font = self._get_font(0.5,Annotator.THIN)\n\t\tself.small_font_outline = self._get_font(0.5,Annotator.THIN + Annotator.BORDER)\n\t\t\n\t\t# Text colour\n\t\tself.colour = Annotator.COLOUR_BUSY\n\t\t\n\t\tself.forehead = (0,0,1,1)\n\t\tself.face = (0,0,1,1)", "def onInit(self):\n pass", "def _afterInit(self):\n pass", "def __init__(self, **kwargs):\n # We set it to True so that starting empty lines are\n # not counting as separators\n self.last_line_was_empty = True", "def _post_init(self) -> None:\n return", "def _init(self):", "def update_editor ( self ):\n super( SimpleFontEditor, self ).update_editor()\n set_font( self )", "def __init__(self, text=\"\", widget=None):\n self._label_text = text\n self._widget = widget\n self._widget.on_change = self._update\n super().__init__(text=f\"{text} {widget.value}\")", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self): \r\n pass", "def init_widget(self):", "def __init__(self):\n ## Global initialization\n self.default_initialization()\n ## Initial function set\n self.selfdriven = False\n self._format_default_functions()\n ## Check descriptormodel\n self._assert_correctness()", "def __init__(self):\n self.content = \"\"", "def setInitDefault(self, editText):\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def initWidgets(self):\n self.loctext.setText(\"{0:g}\".format(self.loc))\n self.scaletext.setText(\"{0:g}\".format(self.scale))", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def initDocTagText(self):\n self.doc, self.tag, self.text = Doc().tagtext()", "def __init__(\n self,\n type,\n text):\n self.type = type\n self.text = text", "def _init(self):\n pass", "def _initialize(self):\n \n self.view.lineEdit_3.setText(\"C,H,N,O,P,S\")\n self.view.spin_hit.setValue(20)\n self.view.lineEdit_2.setValue(10.)\n self.view.checkBox_8.setChecked(True)", "def format(self):\n ...", "def init(self) -> None:", "def update_editor ( self ):\n super( TextFontEditor, self ).update_editor()\n set_font( self )", "def __init__(self, msg='hello'):\n print(\"post init ! height=%s, color=%s, msg=%s\" % (self.height, self.color, msg))\n self.non_field_attr = msg", "def __init__(self):\n\t\tprint(\"Class initilised\")", "def __init__(self, text='', **kwargs):\n Control.__init__(self, text=text, **kwargs)", "def on_transEdit_textChanged(self):\n self.__updatePronounceButtons()\n self.__updateClearButton()", "def __init__(self, text, idx):\n self.text = text\n self.idx = idx", "def __init__(self):\n self.update_state()", "def set_initial_values(self):\n #Stores each line of the text file in a list\n self.text = []\n \n #Scrolling distance\n self.scroll = 0\n\n #Zooming level (font size) \n self.zoom = 12\n\n #Factor by which is decrement self.zoom\n self.factor = 0\n\n #Number of tabs spaces before a line\n self.indent = 0\n\n #Flag to only set up pango descriptions only once \n self.set_pc = 1\n\n #list of indetation level of all lines\n self.tab_index = []\n\n #Total line count\n self.line_count = 0\n\n #line number of line rendered off top of window \n self.min_text = 0\n #line number of line rendered off bottom of window \n self.max_text = 50\n\n #y position for cairo for the text at the top\n self.min_cairo = 20\n\n #y position for text at bottom\n self.max_cairo = 20\n\n #x positiong for indented text\n self.tab_cairo = 20", "def __init__(self):\n fmt = \"%(message)s\"\n super().__init__(fmt=fmt)\n\n self.baseline = None\n self.cut = None\n self.manual_push = 0", "def setInitDefault(self, editText):\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def __init__(self, text):\n self.text = text\n self.letters = [letters[c] for c in self.text]\n self.width = sum(let.width + 1 for let in self.letters)\n self._offset = width\n self.is_done = False", "def __init__(self, text=None, settings=None, style='General', language='en'):\n\n self._text = None\n self._settings = None\n self._style = None\n self._language = None\n\n self.text = text\n self.settings = settings\n self.style = style\n self.language = language", "def init(self) -> None:\n self.started = False\n self.lines = []\n self.text = ''\n self.graphics = ''\n self.ids = {}\n self.first_line_added = False\n\n self.used_fonts = set()\n self.current_line_used_fonts = set()\n self.current_height = 0\n self.lines = []\n\n line_width = self.width - (self.indent if self.is_first_line else 0)\n self.current_line = PDFTextLine(\n self.fonts, line_width, self.text_align, self.line_height\n )\n\n self.last_indent = 0\n self.last_state = self.last_factor = self.last_fill = None\n self.last_color = self.last_stroke_width = None\n\n self.y_ = 0", "def _settext(self, textEntered):\n if textEntered.strip() == '':\n textEntered=self.data['initialtext']\n self.entry.enterText(textEntered)\n else:\n if callable(self.data['callback']): self.data['callback'](textEntered)\n if self.data['autoexit'] and callable(self.data['exit']):\n # NOTE not safe to call here user callback...\n taskMgr.doMethodLater(.5, self.data['exit'], '_ntryxt')", "def __init__(self, edit: QtWidgets.QTextEdit, out=None, color=None):\n self.edit = edit\n self.out = out\n self.color = color", "def on_load(self):\n self.__init__()", "def __init__():", "def __init__(self) -> None:\n str.__init__(self)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._ansi_escape_codes = True", "def do_init(self):\n\n pass", "def initialize(self):\n\t\tpass", "def run_init(self):\n InitEditor(self.root, self)", "def __init(self):\n print(\"Welkam tu mobail lejen\")", "def __init__(self, text, tag, start ,end):\n\n self.text = six.text_type(text)\n self.tag = copy.copy(tag)\n self.end = end\n self.start = start" ]
[ "0.7095915", "0.70883477", "0.6957401", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6801035", "0.67764556", "0.67764556", "0.6772573", "0.67218834", "0.6665987", "0.6530844", "0.6495981", "0.6494592", "0.6494592", "0.6401653", "0.6355695", "0.63224435", "0.627716", "0.627716", "0.62600374", "0.6241324", "0.6241043", "0.6223984", "0.6216441", "0.6214059", "0.62072545", "0.6179023", "0.61773074", "0.6165903", "0.6150355", "0.61494476", "0.6145963", "0.6123563", "0.6106276", "0.6106276", "0.61052555", "0.6075407", "0.606871", "0.60595924", "0.6050179", "0.6039118", "0.6025508", "0.60182106", "0.60180503", "0.5996569", "0.5996569", "0.5996569", "0.5996569", "0.5993615", "0.5956698", "0.59549457", "0.59410423", "0.5936671", "0.5926797", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.59250134", "0.5922803", "0.59159535", "0.59074825", "0.59036523", "0.59019417", "0.5898051", "0.58926487", "0.5887501", "0.5887218", "0.58803314", "0.5877826", "0.5868464", "0.58638364", "0.5862526", "0.58605254", "0.5853759", "0.5833662", "0.58296865", "0.5820315", "0.5815491", "0.58068454", "0.579537", "0.57909584", "0.57830495", "0.5776756", "0.5769101", "0.5765869", "0.5761965", "0.5755533", "0.57552737" ]
0.6490198
22
Add choice to edit menu list if not already there
def addChoice(self, choice, sort=False): if choice and choice not in self.formatList: self.formatList.append(choice) if sort: self.sortChoices()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_add_to_menu ( self, action ):\r\n return True", "def menu(update, context):\n\n update_message_text = update.callback_query.edit_message_text if update.callback_query else update.message.reply_text\n update_message_text(\n text='Please choose an option.',\n reply_markup=InlineKeyboardMarkup([\n [\n InlineKeyboardButton('Author Details', callback_data='details'),\n InlineKeyboardButton('Help', callback_data='help'),\n ],\n [\n InlineKeyboardButton('Linkedin Profile', url=Config.OWNER_WEBSITE),\n InlineKeyboardButton('Github repo', url=Config.GITHUB_REPO_URL),\n ],\n [\n InlineKeyboardButton('Download CV', url=Config.DOWNLOAD_CV_URL)\n ]\n ]),\n )", "def add_to_menu ( self, menu_item ):\r\n pass", "def get_one_menu_option():", "def add_edit(self, edit):\n if edit not in self.edits:\n self.edits.append(edit)", "def update_choice(self, value):\n if self.p is not None:\n if value == \"none\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', \"\")\n if value == \"categories\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == \"pageid\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == \"sections\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == \"html\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())", "def initEditMenu(self):\n autocompletionMenu = QMenu(\n QCoreApplication.translate('ViewManager', 'Complete'),\n self.ui)\n autocompletionMenu.setTearOffEnabled(True)\n autocompletionMenu.addAction(self.autoCompleteAct)\n autocompletionMenu.addSeparator()\n autocompletionMenu.addAction(self.autoCompleteFromDocAct)\n autocompletionMenu.addAction(self.autoCompleteFromAPIsAct)\n autocompletionMenu.addAction(self.autoCompleteFromAllAct)\n \n menu = QMenu(QCoreApplication.translate('ViewManager', '&Edit'),\n self.ui)\n menu.setTearOffEnabled(True)\n menu.addAction(self.undoAct)\n menu.addAction(self.redoAct)\n menu.addAction(self.revertAct)\n menu.addSeparator()\n menu.addAction(self.cutAct)\n menu.addAction(self.copyAct)\n menu.addAction(self.pasteAct)\n menu.addAction(self.deleteAct)\n menu.addSeparator()\n menu.addAction(self.indentAct)\n menu.addAction(self.unindentAct)\n menu.addAction(self.smartIndentAct)\n menu.addSeparator()\n menu.addAction(self.commentAct)\n menu.addAction(self.uncommentAct)\n menu.addAction(self.toggleCommentAct)\n menu.addAction(self.streamCommentAct)\n menu.addAction(self.boxCommentAct)\n menu.addSeparator()\n menu.addAction(self.editUpperCaseAct)\n menu.addAction(self.editLowerCaseAct)\n menu.addAction(self.sortAct)\n menu.addSeparator()\n menu.addMenu(autocompletionMenu)\n menu.addAction(self.calltipsAct)\n menu.addAction(self.codeInfoAct)\n menu.addSeparator()\n menu.addAction(self.gotoAct)\n menu.addAction(self.gotoBraceAct)\n menu.addAction(self.gotoLastEditAct)\n menu.addAction(self.gotoPreviousDefAct)\n menu.addAction(self.gotoNextDefAct)\n menu.addSeparator()\n menu.addAction(self.selectBraceAct)\n menu.addAction(self.selectAllAct)\n menu.addAction(self.deselectAllAct)\n menu.addSeparator()\n menu.addAction(self.shortenEmptyAct)\n menu.addAction(self.convertEOLAct)\n \n return menu", "def __setup_menu(self):\r\n self.menu.clear()\r\n if self.data:\r\n actions = self.original_actions\r\n else:\r\n actions = (self.plugin.new_action, self.plugin.open_action)\r\n self.setFocus() # --> Editor.__get_focus_editortabwidget\r\n add_actions(self.menu, actions + self.additional_actions)\r\n self.close_action.setEnabled( len(self.plugin.editortabwidgets) > 1 )", "def update(self):\n\t\tprint(\"Editing %s '%s'\" % (self.getSpecString(), self.getName()))\n\t\tchoice = None\n\t\twhile (choice != 5):\n\t\t\tchoice = None \t\n\t\t\twhile (choice != 1 and choice != 2 and choice != 3 and choice != 4 and choice != 5):\n\t\t\t\tprint(\"Please select an action\")\n\t\t\t\tprint(\" 1) Edit name\")\n\t\t\t\tprint(\" 2) Edit description\")\n\t\t\t\tprint(\" 3) Add item\")\n\t\t\t\tprint(\" 4) Remove item\")\n\t\t\t\tprint(\" 5) Save and exit\")\n\t\t\t\tchoice = self.askForInteger(\"Action\")\n\n\t\t\t\tif (choice != 1 and choice != 2 and choice != 3 and choice != 4 and choice != 5):\n\t\t\t\t\tprint(\"Invalid choice!\")\n\n\t\t\tif (choice == 1):\n\t\t\t\tself.setName(self.askForString(\"You erase the list's title and write\"))\n\t\t\telif (choice == 2):\n\t\t\t\tself.setDescription(self.askForString(\"You update the list's description to read\"))\n\t\t\telif (choice == 3):\n\t\t\t\tself.addItem(self.askForString(\"Add to list\"))\n\t\t\telif (choice == 4):\n\t\t\t\tprint(self.getAllItemsStr())\n\t\t\t\tremoveIndex = self.askForInteger(\"Remove entry\")\n\t\t\t\tprint(\"Removing %s...\" % (self.items[removeIndex - 1]))\n\t\t\t\tself.removeItem(removeIndex - 1)\n\t\t\telif (choice == 5):\n\t\t\t\tprint(\"Saving %s...\" % self.getSpecString())\n\t\t\t\tself.setUpdatedAt(datetime.datetime.now())\n\t\t\t\tself.refreshYAML()\n\t\t\t\tprint(\"Saved!\")", "def edit_choice(request):\n try:\n bpo_field = BPOField.objects.get(id=ObjectId(request.POST.get('field_id')))\n if request.POST.get('action') == 'edit':\n if request.POST.get('choice') and request.POST.get('choice_index'):\n choice_index = int(request.POST.get('choice_index'))\n bpo_field.choices[choice_index] = request.POST.get('choice')\n elif request.POST.get('action') == 'add':\n if not bpo_field.choices:\n bpo_field.choices = []\n bpo_field.choices.append(request.POST.get('choice'))\n elif request.POST.get('action') == 'delete':\n bpo_field.choices.remove(request.POST.get('choice'))\n bpo_field.save()\n except Exception as e:\n return HttpResponseBadRequest(e)\n else:\n return HttpResponse(json.dumps({'status': 'success'}), mimetype=\"application/x-javascript\")", "def create_menu():", "def addMenu():\n mb.addAction(actionAccessories)\n actionAccessories.setVisible(True)", "def __editorShowMenu(self, menuName, menu, editor):\n if menuName == \"Tools\":\n if self.__menu.menuAction() not in menu.actions():\n # Re-add our menu\n self.__editors[editor] = []\n if not menu.isEmpty():\n act = menu.addSeparator()\n self.__editors[editor].append(act)\n act = menu.addMenu(self.__menu)\n self.__editors[editor].append(act)", "def addToExtrasMenu(self, menu):\n self.__editSpellingMenu = QMenu(QCoreApplication.translate(\n 'ViewManager', \"Edit Dictionary\"))\n self.__editProjectPwlAct = self.__editSpellingMenu.addAction(\n QCoreApplication.translate('ViewManager', \"Project Word List\"),\n self.__editProjectPWL)\n self.__editProjectPelAct = self.__editSpellingMenu.addAction(\n QCoreApplication.translate(\n 'ViewManager', \"Project Exception List\"),\n self.__editProjectPEL)\n self.__editSpellingMenu.addSeparator()\n self.__editUserPwlAct = self.__editSpellingMenu.addAction(\n QCoreApplication.translate('ViewManager', \"User Word List\"),\n self.__editUserPWL)\n self.__editUserPelAct = self.__editSpellingMenu.addAction(\n QCoreApplication.translate('ViewManager', \"User Exception List\"),\n self.__editUserPEL)\n self.__editSpellingMenu.aboutToShow.connect(\n self.__showEditSpellingMenu)\n \n menu.addAction(self.spellCheckAct)\n menu.addAction(self.autoSpellCheckAct)\n menu.addMenu(self.__editSpellingMenu)\n menu.addSeparator()", "def menu_select_option(self, app: object) -> None:\n while True:\n self.back = False\n print(\"-\" * 50)\n for key, element in self.cmd_select_option.items():\n print(f\"{key} : {element}\")\n entry = input(\n \"\\nEntrer un chiffre pour sélectionner l'option correspondante : \"\n )\n if entry == \"1\":\n self.menu_categories(app)\n elif entry == \"2\":\n save = app.view_save()\n print(\"-\" * 50 + \"\\nSubstitut(s) enregistré(s) :\\n\")\n for prod, sub in save.items():\n print(f\"Produit {prod} substitué par {sub} \")\n elif entry == \"0\":\n break\n else:\n print(\"\\nCommande incorrecte\")", "def __onAddClicked(self):\n\t\tdir_name = QFileDialog.getExistingDirectory(self, \"Select a directory\")\n\t\tif dir_name is not None:\n\t\t\ttheItem = addNewListItemCalled([dir_name], self.ui.listWidget, mutable=True)\n\t\t\tif theItem is not None:\n\t\t\t\ttheItem.setSelected(True)", "def add_specific_menu(self, menu, event, lat, lon): \n add_item = Gtk.MenuItem()\n add_item.show()\n menu.append(add_item)\n add_item = Gtk.MenuItem(label=_(\"Choose and bookmark the new reference family\"))\n add_item.connect(\"activate\", self.selectFamily)\n add_item.show()\n menu.append(add_item)\n return", "def __populateMenu(self, name, menu):\n if name not in [\"Tools\", \"PluginTools\"]:\n return\n \n editor = e5App().getObject(\"ViewManager\").activeWindow()\n \n if name == \"Tools\":\n if not menu.isEmpty():\n menu.addSeparator()\n act = menu.addMenu(self.__menu)\n act.setEnabled(editor is not None)\n elif name == \"PluginTools\" and self.__mainActions:\n self.__mainActions[-1].setEnabled(editor is not None)", "def add(self, data):\n self.menuItems.update(data)", "def create_menus( self ):", "def delete_menu():", "def edit():", "def menu(self):\n print('1) Today\\'s tasks')\n print('2) Week\\'s tasks')\n print('3) All tasks')\n print('4) Missed tasks')\n print('5) Add task')\n print('6) Delete task')\n print('0) Exit')\n self.menu_choice = input()", "def edit(self):\n\n pass", "def dummy():\n\t\t\tself.edit = True", "def updateacc(cls):\n cls.var_1.set('')\n cls.right_accentry['menu'].delete(0, 'end')\n\n # Insert list of new options (tk._setit hooks them up to var)\n temp = database3.Db03(\"\", \"\")\n new_choices = temp.accounts()\n for choice in new_choices:\n cls.right_accentry['menu'].add_command(label=choice, \\\n command=tk._setit(cls.var_1, choice, God.changedacc))\n try:\n cls.var_1.set(new_choices[0])\n except IndexError:\n cls.var_1.set('None')\n God.changedacc()", "def add_choice(self, name, value):\r\n self.choices += [{\"name\": name, \"value\": value}]", "def getMenuOption():\n return menu_option", "def __modify_submenu(self):\n menu_string = \"Modify submenu\\n\"\n menu_string += \"\\t1. Modify the list of students\\n\"\n menu_string += \"\\t2. Modify the list of disciplines\\n\"\n menu_string += \"\\t0. Exit\\n\"\n stop = False\n\n while not stop:\n command_list = \\\n {\n '1': self.__ui_modify_student,\n '2': self.__ui_modify_discipline,\n '0': self.__no_command\n }\n command = self.__ui_read_command(menu_string)\n\n if command in command_list.keys():\n if command == '0':\n return\n else:\n command_list[command]()\n\n else:\n print(\"Invalid command!\")", "def menu_select_mode(self, app: object) -> None:\n while True:\n for key, element in self.cmd_select_mode.items():\n print(f\"{key} : {element}\")\n entry = input(\n \"\\nEntrer un chiffre pour sélectionner l'option correspondante : \"\n )\n if entry == \"1\":\n app.create_db()\n elif entry == \"2\":\n self.menu_select_option(app)\n elif entry == \"0\":\n break\n else:\n print(\"\\nCommande non reconnu\")", "def init_editmenu(self):\n self.menubar[\"editmenu\"] = Menu(self.menubar[\"menubar\"], tearoff=0)\n self.menubar[\"editmenu\"].add_command(label=\"Undo\", command=todo)\n self.menubar[\"editmenu\"].add_separator()\n self.menubar[\"editmenu\"].add_command(label=\"Cut\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Copy\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Paste\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Delete\", command=todo)\n self.menubar[\"editmenu\"].add_command(label=\"Select All\", command=todo)\n self.menubar[\"menubar\"].add_cascade(\n label=\"Edit\", menu=self.menubar[\"editmenu\"])", "def menuSelection(self):\n \n self.selection = int(input(\"\\nWhere do you want to go? Make a selection: \"))\n \n while self.selection not in self.menu.index:\n self.selection = int(input(\"\\nWhere do you want to go? Make a selection: \"))\n \n menuCheck = str(input(str(\"\\n\" + self.menu.ix[self.selection]['name']) + \" eh? I hope it's good, People say: \" + \\\n str(self.menu.ix[self.selection]['snippet_text']) + \"\\n\\nIs this where you want to go? (Yes (y) or No (n)) \"))\n while menuCheck.lower() not in ['yes', 'y', 'no', 'n']:\n menuCheck = str(input(str(\"\\n\" + self.menu.ix[self.selection]['name']) + \" eh? I hope it's good, People say: \" + \\\n str(self.menu.ix[self.selection]['snippet_text']) + \"\\n\\nIs this where you want to go? (Yes (y) or No (n)) \"))\n \n os.system('clear')", "def menu(self, prompt, choices):\n menu = [prompt] + [\n \"{0}. {1}\".format(*choice) for choice in enumerate(choices, start=1)\n ]\n command = 'inputlist({})'.format(repr(menu))\n choice = int(self._vim.eval(command))\n\n # Vim returns weird stuff if user clicks outside choices with mouse\n if not 0 < choice < len(menu):\n return\n\n return choices[choice - 1]", "def menu_saving(self, app: object, entry: str) -> None:\n while True:\n prod = self.cmd_products.get(entry)\n alt = app.search_alt(prod)\n sub = app.relevance(alt)\n print(\"-\" * 50)\n print(f\"\\nSubstitut trouvé pour le produit {prod} : {sub}\")\n entry = input(\n \"\\nVoulez vous enregistrer le substitut dans votre liste ? (y/n)\"\n )\n if entry == \"y\":\n feedback = app.insert_sub(prod, sub)\n print(feedback)\n self.back = True\n break\n elif entry == \"n\":\n self.back = True\n break\n else:\n print(\"\\nCommande incorrecte\")", "def OutputMenuItems():\r\n print('''\r\n Menu of Options\r\n 1) Show current data\r\n 2) Add a new item.\r\n 3) Save Data to File\r\n 4) Exit Program\r\n ''')\r\n print() # Add an extra line for looks\r", "def menuItem(*args):\n\toptionsWindow()", "def menu_choice():\r\n choice = ' '\r\n while choice not in ['l', 'a', 'i', 'd', 's', 'x']:\r\n choice = input('Which operation would you like to perform? [l, a, i, d, s or x]: ').lower().strip()\r\n print() # Add extra space for layout\r\n return choice", "def show_menu():\n if not GD.gui.menu.item('Tools'):\n create_menu()", "def update(self, option_old, option_new=\"\"):\n if option_old == option_new:\n return\n self.pile_list.remove(option_old)\n if option_new != \"\":\n self.pile_list.append(option_new)", "def _prompt_main_menu(self, update, context, message='Please choose an option:'):\n id = context.user_data['id']\n email = context.user_data['email']\n email = 'Not supplied' if email == '' else email\n self._reply_message(update,\n f'ID: {id}\\n'\n f'Email: {email}\\n'\n f'{message}',\n keyboard=self.MAIN_STATE_OPTIONS,\n inline_keyboard=True)", "def display_choices(index, entries):\n p = \"[P] - Previous Entry\"\n n = \"[N] - Next Entry\"\n e = \"[E] - Edit Entry\"\n d = \"[D] - Delete an entry\" \n m = \"[M] - Go back to Main Menu\"\n\n menu = [p,n,e,d,m]\n\n if index == 0:\n menu.remove(p)\n if index == len(entries) - 1:\n menu.remove(n)\n\n for menu in menu:\n print(menu)", "def __ui_modify_student(self):\n menu_string = \"Modify the list of students submenu\\n\"\n menu_string += \"\\t1. Add a student\\n\"\n menu_string += \"\\t2. Remove a student\\n\"\n menu_string += \"\\t3. Update a student\\n\"\n menu_string += \"\\t4. Print the list of students\\n\"\n menu_string += \"\\t0. Exit\\n\"\n stop = False\n\n while not stop:\n command_list = \\\n {\n '1': self.__ui_add_student,\n '2': self.__ui_remove_student,\n '3': self.__ui_update_student,\n '4': self.__ui_list_students,\n '0': self.__no_command\n }\n command = self.__ui_read_command(menu_string)\n\n if command in command_list.keys():\n if command == '0':\n return\n else:\n command_list[command]()\n\n else:\n print(\"Invalid command!\")", "def choose_menu(self, n):\n self.view.menu_chosen(n, uw.Button(MENUS[n]))", "def on_actions_list(self, e):\n self.PopupMenu(self.popup_menu())", "def menu_choice():\r\n choice = ' '\r\n while choice not in ['l', 'a', 'i', 'd', 's', 'x']:\r\n choice = input('Which operation wouild you like to perform? [l, a, i, d, s or x]: ').lower().strip()\r\n print() # Add extra space for layout\r\n return choice", "def showMenu():\n print '''\\nIndica una opció:\n 1 Afegir contacte\n 2 Modificar contacte\n 3 Eliminar contacte\n 4 Cercar contacte\n 5 Info de l'agenda\n 0 Sortir\\n'''\n\n try:\n global menu_option\n menu_option = int(raw_input('Opció escollida: '))\n except ValueError:\n print 'Error al escollir l\\'opció'", "def addMenu():\n toolsMenu = mb.findChild(QtGui.QMenu, \"&Tools\")\n if toolsMenu:\n toolsMenu.addAction(action)", "def disable_menu(self):\n if self.tab_control.index(\"current\") == 0:\n self.edit_menu.entryconfigure('Cut', state=\"normal\")\n self.edit_menu.entryconfigure('Paste', state=\"normal\")\n self.edit_menu.entryconfigure('Delete', state=\"normal\")\n self.file_menu.entryconfigure('Open', state=\"normal\")\n elif self.tab_control.index(\"current\") == 1:\n self.edit_menu.entryconfigure('Cut', state=\"disabled\")\n self.edit_menu.entryconfigure('Paste', state=\"disabled\")\n self.edit_menu.entryconfigure('Delete', state=\"disabled\")\n self.file_menu.entryconfigure('Open', state=\"disabled\")", "def before_choose_candidate_listener(self, session, task):\n choices = [PromptChoice('d', 'eDit', self.importer_edit)]\n if task.candidates:\n choices.append(PromptChoice('c', 'edit Candidates',\n self.importer_edit_candidate))\n\n return choices", "def __showEditSpellingMenu(self):\n proj = e5App().getObject(\"Project\")\n projetOpen = proj.isOpen()\n pwl = e5App().getObject(\"Project\").getProjectDictionaries()[0]\n self.__editProjectPwlAct.setEnabled(projetOpen and bool(pwl))\n pel = e5App().getObject(\"Project\").getProjectDictionaries()[1]\n self.__editProjectPelAct.setEnabled(projetOpen and bool(pel))\n \n from QScintilla.SpellChecker import SpellChecker\n pwl = SpellChecker.getUserDictionaryPath()\n self.__editUserPwlAct.setEnabled(bool(pwl))\n pel = SpellChecker.getUserDictionaryPath(True)\n self.__editUserPelAct.setEnabled(bool(pel))", "def custom_choice(update: Update, context: CallbackContext) -> int:\r\n update.message.reply_text(\r\n 'Primero agrega un título a tu comentario, por ejemplo \"Atención\"'\r\n )\r\n\r\n return TYPING_CHOICE", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def Adjust_Menu( self, menuoptions = 0):\r\n pass\r\n #base_tree = 6\r\n #profile_tree = 7\r\n #if( menuoptions == 0 ):\r\n # self.treeview_menu.entryconfig( base_tree , state=\"active\" )\r\n # self.treeview_menu.entryconfig( profile_tree , state=\"disabled\" )\r\n # self.menu.entryconfig( 4 , state=\"active\" )\r\n # self.menu.entryconfig( 5 , state=\"disabled\" )\r\n #elif(menuoptions == 1):\r\n # self.treeview_menu.entryconfig(base_tree ,state=\"disabled\")\r\n # self.treeview_menu.entryconfig(profile_tree ,state=\"active\")\r\n # self.menu.entryconfig(4 ,state=\"disabled\")\r\n # self.menu.entryconfig(5 ,state=\"active\")\r", "def edit_game_pressed(self) -> None:\n from ba.internal import getclass\n if not self._playlist:\n return\n self._show_edit_ui(gametype=getclass(\n self._playlist[self._selected_index]['type'],\n subclassof=ba.GameActivity),\n settings=self._playlist[self._selected_index])", "def update_namelist_menu(self):\n new_nml = wx.Menu() # build new menu\n\n # populate entries and bind their selection\n for i,nml in enumerate(self.input_file.namelists.keys()):\n item = new_nml.Append(i, self.input_file.namelists[nml].name)\n self.Bind(wx.EVT_MENU, self.SelectNamelist, item, id=i)\n\n # replace old menu in the 1st position with updated one (0-based indexing)\n self.menubar.Replace(self.nml_menu_index, new_nml, '&Namelists')\n\n # reset the namelist entries that are displayed\n self.nmlpanel.reset(unset_namelist=True) # ensure no namelist is currently selected\n\n self.statusbar.SetStatusText(\"Choose a namelist from the menu\", 1)", "def populate_entry_boxes(event):\r\n with open('Question_pool.txt', 'r+') as fp:\r\n w = event.widget\r\n value = w.get(ANCHOR)\r\n if edit_mode == FALSE:\r\n list_box.delete(ANCHOR)\r\n new.set(value)\r\n list_box.insert(ANCHOR)", "def edit_item(todo_list):\r\n item = select_item(todo_list, \"Please enter the item number you wish to \"\r\n \"edit\\nEnter a negative number or zero to \"\r\n \"cancel\")\r\n if item >= 0:\r\n while True:\r\n value = clean_input(\"Which value would you like to edit? Enter:\\n1\"\r\n \" for the Item Text (Currently: {0})\\n2 for \"\r\n \"the Item Priority (Currently: {1})\\n3 to \"\r\n \"Cancel and Exit\".format(todo_list[item].text,\r\n str(todo_list[item].\r\n priority)))\r\n if value == 1: # Item Text Change\r\n print(\"The Current Text is: {0}\".format(todo_list[item].text))\r\n todo_list[item].text = input(\"New Text:\\n\")\r\n elif value == 2: # Item Priority Change\r\n print(\"The Current Priority is: {0}\".format(str(todo_list[item]\r\n .priority)))\r\n todo_list[item].priority = check_priority_overlap(\r\n int(clean_input(\"New Priority:\")), todo_list)\r\n # elif value == 3: # Item Group Change\r\n # print(f\"The Current Group is: {todo_list[item].group}\")\r\n # todo_list[item].group = int(clean_input(\"New Group Number:\"))\r\n elif value == 3: # Exit Changing Menu\r\n break\r\n else:\r\n print(\"Invalid Input - Please Try Again\")\r\n return", "def onSaveMenu(self, item):\n self.dialog = SaveDialog()\n self.dialog.doModal(self.onSaveChosen)\n return 1", "def main_menu():\n while True:\n choice = input(MAIN_MENU)\n clean()\n if choice.lower() == \"a\":\n add_task()\n elif choice.lower() == \"s\":\n search_options()\n elif choice.lower() == \"q\":\n break\n else:\n print(\"'{}' invalid option! Please try again\".format(choice))", "def request_context_menu(self, pos):\n super(ItemListView, self).request_context_menu(pos)\n self.get_selected()\n self.manage_actions()\n self.display_context_menu(pos)", "def right_click(self, event):\n\n super().right_click(event)\n self.popup_menu.add_command(label=\"Edit..\", command=self.edit)\n\n self.popup_menu.tk_popup(event.x_root, event.y_root, 0)", "def right_click(self, event):\n\n super().right_click(event)\n self.popup_menu.add_command(label=\"Edit..\", command=self.edit)\n\n self.popup_menu.tk_popup(event.x_root, event.y_root, 0)", "def add_option(self, label, action, type_func):\n _type = \\\n { \"command\" : self._menu.add_command,\n \"checkbutton\" : self._menu.add_checkbutton,\n \"radiobutton\" : self._menu.add_radiobutton\n }\n\n if label in self._options:\n raise DuplicateCommandException(label)\n\n self._options[label] = action\n _type[type_func](label=label, command=action)", "def edit(self,item=None):\r\n raise AbstractError\r\n return False", "def addMenu(self):\n menu = self.interface.getPulldownMenu(0)\n actionBefore = menu.actions()[8]\n menu.insertAction(actionBefore, self.action)", "def OnInfoEdit(self,event):\r\n selections = self.list.GetSelections()\r\n if not selections: return bell()\r\n item = self.items[selections[0]]\r\n if self.gInfoBox.IsModified():\r\n self.data.setInfo(item,self.gInfoBox.GetValue())", "def add_file_option(self, name, callback):\n item = self.file_menu.Append(-1, name, name)\n self.Bind(wx.EVT_MENU, callback, item)", "def menu_items():\n def show():\n form.show();\n form.activateWindow()\n form.raise_()\n\n lst = []\n lst.append((\"Import Programmableweb\", show))\n \n return tuple(lst)", "def menu_selection(question=\"Hello\", option=[]):\n DebugMessage(f\"\"\"def:menu_selection | question={question} | option={option}\"\"\")\n\n # Add option='Quit' if does not exist and display the menu\n if \"Quit\" not in option:\n option.append(\"Quit\")\n\n def displaymenu(option):\n \"\"\"Sub function used to manage case Quit was not a given option (User should always be able to quit)\"\"\"\n DebugMessage(f\"\"\"def:displaymenu | option={option}\"\"\")\n print(question)\n print(\"Options:\" + str(option))\n response = input(\"$> \")\n\n for opt in option:\n if response.lower() == opt.lower():\n DebugMessage(f\"User selected a valid option:{opt}\")\n if opt == 'Quit':\n exit(0)\n return opt\n print(f\"{response}, is not a valid option\")\n print(gui_bar)\n displaymenu(option)\n\n return displaymenu(option)", "def _addClickedSlot(self):\r\n\r\n index = self._model.add()\r\n self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect)\r\n self.addButton.setEnabled(False) # We have to wait until editing is finished to avoid an invalid model\r\n self._editClickedSlot()", "def menu_save():\n\n while True:\n\n _ = os.system(\"clear\")\n menu = ['Yes', 'No']\n ret = submenu(menu, \"Save Changes?\")\n\n if ret == 'Yes':\n return ret\n elif ret == 'No':\n return ret\n else:\n print(\"Invalid Option!\")\n util.pause()", "def on_directory_list_row_activated(self, *args):\n\t\tself.on_button_edit_clicked(self.buttonEdit)", "def return_add_filter_menu(update, context):\n add_filter(update, context)\n\n logger.info(\"User [%s] selected to return to previous menu, [Add Filter], \"\n \"from [Filter Option Selection / Show Filters]\",\n update.callback_query.message.chat.first_name)\n return SELECT_FILTER", "def menu():\n\tprint (\"\\n\\tSeleccionar una opcion\")\n\n\tprint (\"\\t1.- Resistencia en un Alambre \")\n\n\tprint (\"\\t2.- Voltaje\")\n\n\tprint (\"\\t3.- Corriente\")\n\n print (\"\\t4.- Resistencia\")\n\n\tprint (\"\\t5.- salir\")", "def handle_select(self):\n #self.selected = input('>> ')\n self.selected = '0'\n if self.selected in ['Q', 'q']:\n sys.exit(1)\n elif self.selected in ['B', 'b']:\n self.back_to_menu = True\n return True\n elif is_num(self.selected):\n if 0 <= int(self.selected) <= len(self.hrefs) - 1:\n self.back_to_menu = False\n return True\n else:\n print(Colors.FAIL +\n 'Wrong index. ' +\n 'Please select an appropiate one or other option.' +\n Colors.ENDC)\n return False\n else:\n print(Colors.FAIL +\n 'Invalid input. ' +\n 'Please select an appropiate one or other option.' +\n Colors.ENDC)\n return False", "def option_two():\n if ADD_PRODUCTS == {}:\n print \"\\n**No products availabe**\" #Cannot to buy\n press_enter()\n reset()\n main_menu()\n else:\n ask_if_want()", "def enforce_menu_state(self):\n if self.config.starbound_data_dir:\n self.openfile_menu.setEnabled(True)\n self.openname_menu.setEnabled(True)\n if self.world:\n self.worldinfo_menu.setEnabled(True)\n self.goto_menu.setEnabled(True)\n self.to_spawn_menu.setEnabled(True)\n self.to_spawn_menu.setText('Go to Spawn Point ({:d}, {:d})'.format(\n *map(int, self.world.metadata['playerStart'])))\n else:\n self.worldinfo_menu.setEnabled(False)\n self.goto_menu.setEnabled(False)\n self.to_spawn_menu.setEnabled(False)\n self.to_spawn_menu.setText('Go to Spawn Point')\n else:\n self.openfile_menu.setEnabled(False)\n self.openname_menu.setEnabled(False)\n self.worldinfo_menu.setEnabled(False)\n self.goto_menu.setEnabled(False)\n self.to_spawn_menu.setEnabled(False)\n self.to_spawn_menu.setText('Go to Spawn Point')", "def reset_namelist_menu(self):\n new_nml = wx.Menu() # build new menu\n\n # add single element, don't bind it to anything\n nmlItem = new_nml.Append(wx.ID_ANY, '--No File Loaded--', '--No File Loaded--')\n\n # replace the second menu, index=1\n self.menubar.Replace(self.nml_menu_index, new_nml, '&Namelists')\n\n self.namelist = None # there is no longer a current namelist\n self.statusbar.SetStatusText(\"Namelist: --No File Loaded--\", 1)", "def transfer_menu():\n print(\"What type of transfer do you want to use?\")\n for key in sorted(TRANSFER_MENU_SELECTIONS):\n print(\"[%s] %s\" % (key, TRANSFER_MENU_SELECTIONS[key]))\n choice = raw_input(\"> \")\n while choice not in list(TRANSFER_MENU_SELECTIONS.keys()):\n choice = raw_input(\"> \")\n return choice", "def test_admin_menu_wrong_choice(self, inputs):\n inputs.side_effect = ['8', '5']\n\n assert Admin(object).admin_menu('test@gmail.com') is True", "def add_choice(self, choice: Choice) -> None:\n self._choices.append(choice)", "def response_change(self, request, obj):\n opts = obj._meta\n\n msg = 'The menu item \"%s\" was changed successfully.' % force_unicode(obj)\n\n if \"_continue\" in request.POST:\n self.message_user(request, msg + ' ' + \"You may edit it again below.\")\n return HttpResponseRedirect(request.path)\n\n elif \"_addanother\" in request.POST:\n self.message_user(request, msg + ' ' + (\"You may add another %s below.\" % force_unicode(opts.verbose_name)))\n return HttpResponseRedirect(obj.menu_item.menu.get_add_page_url())\n\n else:\n self.message_user(request, msg)\n return HttpResponseRedirect(obj.menu_item.menu.get_edit_url())", "def main_menu(ftp):\n print(\"What would you like to do?\")\n for key in sorted(MAIN_MENU_SELECTIONS):\n print(\"[%s] %s\" % (key, MAIN_MENU_SELECTIONS[key][0]))\n choice = raw_input(\"> \")\n while choice not in list(MAIN_MENU_SELECTIONS.keys()):\n choice = raw_input(\"> \")\n handle_main_menu_choice(choice, ftp)", "def display_main_menu(my_list1):\n\n user_options = \"\"\"\n \\nWould you like to:\n A. Add a new item\n B. View list\n C. Delete first item in list\n D. Quit the program\n \"\"\"\n\n while True:\n # Collect input and include your if/elif/else statements here.\n print user_options\n user_input = raw_input(\">>> \").upper()\n\n if user_input == \"A\":\n add_to_list(my_list1)\n elif user_input == \"B\":\n view_list(my_list1)\n elif user_input == \"C\":\n delete_first_item(my_list1)\n elif user_input == \"D\":\n break\n else:\n print \"Sorry, I don't know what you mean. Please try again.\"", "def edit_tools(self, e):\n #GETTING SELECTION\n\n self.selected_item = self.user_inventory.selection()\n self.select_name = self.user_inventory.item([i for i in self.selected_item], \"values\")[0]\n self.select_entdate = self.user_inventory.item([i for i in self.selected_item], \"values\")[3]\n\n self.df_same_name = self.df_user.query(\"title == @self.select_name\")\n #this is the selected one for sure\n self.df_the_selected_item = self.df_same_name.loc[self.df_same_name[\"entry date\"] == self.select_entdate]\n\n #GETTING THE INDEX NUMBER OF THE SELECTION IN .CSV FILE\n self.index_select = self.df_the_selected_item.index\n self.index_select_number = self.index_select.tolist()\n\n #bottom buttons appear:\n self.changing_item_label.config(text=\"Now editing \"+self.select_name+\" that added on \"+self.select_entdate+\":\")\n\n self.delete_but = Button (self.bottom_frame, text=\"DELETE\", command=self.delete_button)\n self.delete_but.place(relx=0.1, rely=0.7, relwidth=0.28, anchor=\"w\")\n\n self.servings_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n self.serv_drop = Combobox(self.bottom_frame, value=self.servings_list, state=\"readonly\")\n self.serv_drop.place(relx=0.5, rely=0.7, relwidth=0.2, anchor=CENTER)\n\n \n self.serv_but = Button(self.bottom_frame, text=\"CHANGE AMOUNT\", command=self.change_amount_button, state=\"disabled\")\n self.serv_but.place(relx=0.9, rely=0.7, relwidth=0.28, anchor=\"e\")\n\n self.serv_drop.bind(\"<<ComboboxSelected>>\", self.activate_button)", "def _optionsmenu_restart():\n self.input_box.delete(1.0, END)\n pass", "def onToSystemSelected(self, item):\n if not item:\n self.btnAddTradeRoute.disable()\n self.btnTradeAllRoute.disable()\n self.btnAddOneTimeTrade.disable()\n else:\n self.btnAddTradeRoute.enable()\n self.btnTradeAllRoute.enable()\n self.btnAddOneTimeTrade.enable()", "def add_or_update_item(self):\n# print '\\n\\n\\nadd_or_update'\n item = Item.get_by(test_id=self.test.id, order=self.current_item)\n type_id = self.answer_frame.answer_type_combo.get_active()\n if item:\n# print item.question\n item.question.delete()\n for op in item.option:\n# print op\n# print type(op)\n op.delete()\n item.type = u(self.get_item_type(type_id))\n# print type(item)\n# item.update()\n else:\n item = Item(order=self.current_item, type=unicode(self.get_item_type(type_id)))\n self.test.item.append(item)\n\n session.commit()\n dir_path = os.path.join(self.data_path, \"test_files/%s/%s\"%(self.test.id, item.order))\n if not os.path.isdir(dir_path):\n os.mkdir(dir_path)\n\n start, end = self.question_frame.buffer.get_bounds()\n qtext = unicode(self.question_frame.buffer.get_text(start, end))\n img_filename = \"\"\n if len(self.question_frame.question_vbox.get_children()) > 2:\n img_filename = self.prepare_img(self.question_frame.img_filename, dir_path, \"q\")\n item.question = Question(text=qtext, img=u(img_filename))\n\n# if type_id == 1:#txt\n# start, end = self.answer_frame.buffer.get_bounds()\n# atext = self.answer_frame.buffer.get_text(start,end)\n# item.option.append(Option(correct=True, text=unicode(atext), img=u\"\"))\n if type_id in (1,2):\n for i in range(4):\n atext = img_filename = u\"\"\n if self.answer_frame.buffer[i].get_char_count() !=0:\n start, end = self.answer_frame.buffer[i].get_bounds()\n atext = self.answer_frame.buffer[i].get_text(start,end)\n if len(self.answer_frame.option_vbox[i].get_children()) > 2:\n img_filename = self.prepare_img(self.answer_frame.img_filename[i], dir_path, \"op%s\"%i)\n if atext or img_filename:\n acorrect = self.answer_frame.correct_btn[i].get_active()\n item.option.append(Option(correct=acorrect, text=unicode(atext), img=unicode(img_filename)))\n elif type_id == 3:\n if self.answer_frame.answer_combo.get_active() == 0:\n acorrect = True\n else:\n acorrect = False\n item.option.append(Option(correct=acorrect, text=u\"\", img=u\"\"))\n\n# item.save()\n session.commit()", "def misc_menu(self):\n # info needed to separate edit and view widgets in self.widget_classes\n name_test_current = [\n (\"Editor\", lambda x: x.lep_type == 'EDITOR', self.edit_widget.__class__),\n (\"Viewer\", lambda x: x.lep_type != 'EDITOR', self.view_widget.__class__),\n ]\n\n menu = QtWidgets.QMenu()\n for name, is_one, current in name_test_current:\n # list Editor widgets, then Viewer widgets\n for widget_class in [i for i in self.widget_classes if is_one(i)]:\n\n def cb(checked, widget_class=widget_class):\n self.set_widget(widget_class=widget_class)\n\n act = QAction(f\"{name}: {widget_class.lep_name}\", self)\n act.setCheckable(True)\n act.setChecked(widget_class == current)\n act.triggered.connect(cb)\n menu.addAction(act)\n\n button = self.control_menu_button\n point = button.position().toPoint() if isQt6 else button.pos() # Qt6 documentation is wrong.\n global_point = button.mapToGlobal(point)\n menu.exec_(global_point)", "def render_selection_menu(request: Request):\n provider_data = open_for_reading()\n return templates.TemplateResponse(\"update_provider_form.html\", {\n \"request\": request,\n \"provider_data\": provider_data\n })", "def __add_credit_menu(self):\n log.debug(\"Displaying __add_credit_menu\")\n # Create a payment methods keyboard\n keyboard = list()\n # Add the supported payment methods to the keyboard\n # Cash\n keyboard.append([telegram.KeyboardButton(self.loc.get(\"menu_cash\"))])\n # Telegram Payments\n if self.cfg.ccard[\"credit_card_token\"] != \"\":\n keyboard.append([telegram.KeyboardButton(self.loc.get(\"menu_credit_card\"))])\n # Keyboard: go back to the previous menu\n keyboard.append([telegram.KeyboardButton(self.loc.get(\"menu_all_cancel\"))])\n # Send the keyboard to the user\n self.bot.send_message(self.chat.id, self.loc.get(\"conversation_payment_method\"),\n reply_markup=telegram.ReplyKeyboardMarkup(keyboard, one_time_keyboard=True))\n # Wait for a reply from the user\n selection = self.__wait_for_specific_message(\n [self.loc.get(\"menu_cash\"), self.loc.get(\"menu_credit_card\"), self.loc.get(\"menu_all_cancel\")],\n cancellable=True)\n # If the user has selected the Cash option...\n if selection == self.loc.get(\"menu_cash\"):\n # Go to the pay with cash function\n self.bot.send_message(self.chat.id,\n self.loc.get(\"payment_cash\", user_cash_id=self.user.identifiable_str()))\n # If the user has selected the Credit Card option...\n elif selection == self.loc.get(\"menu_credit_card\"):\n # Go to the pay with credit card function\n self.__add_credit_cc()\n # If the user has selected the Cancel option...\n elif isinstance(selection, CancelSignal):\n # Send him back to the previous menu\n return", "def edit():\n database.ask(mode='single')\n F = database.check(single=True)\n if F and hasattr(F,'edit'):\n name = database[0]\n F.edit(name)", "def _newFileMenuItem(self):\n\n dialogs = Dialogs(self.view)\n\n path = self._newFileWizard()\n\n #see if we want to make a blank scene or not\n msg = 'How should the new file be created?'\n BLANK = 'Make a blank maya scene'\n EXISTING = 'Use a copy of an existing file'\n\n choice = dialogs.radioButtonDialog(msg, [BLANK, EXISTING])\n\n if choice == BLANK:\n msg = 'Final confirmation:'\n msg += '\\n\\nCreate blank maya file at \"%s\"?' % path\n dialogs.confirmPrompt(msg)\n self.model.createFile(path)\n\n elif choice == EXISTING:\n src_path = dialogs.fileDialog(\n self.model.project.getScenesDir(),\n self.model.project.getDialogFilters())\n\n msg = 'Please confirm:'\n msg += '\\n\\nCopy \"%s\" to new file \"%s\"?' % (src_path, path)\n dialogs.confirmPrompt(msg)\n self.model.copyFile(src_path, path)\n\n msg = 'New file successfully created!'\n msg += '\\n\\nLocation: %s' % path\n msg += '\\n\\nPlease check out your new file to begin work on it.'\n dialogs.infoPrompt(msg)", "def create_edit_menu(master: Widget) -> None:\r\n\r\n def create_mode_buttons(master: Widget, mode_var: IntVar) -> None:\r\n \"\"\"Create mode buttons with the variable MODE_VAR and the parent MASTER.\"\"\"\r\n\r\n add = Radiobutton(master, text='Add', font=self.FONT_NORMAL,\r\n variable=mode_var, value=0)\r\n remove = Radiobutton(master, text='Remove', font=self.FONT_NORMAL,\r\n variable=mode_var, value=1)\r\n toggle = Radiobutton(master, text='Toggle', font=self.FONT_NORMAL,\r\n variable=mode_var, value=2)\r\n\r\n add.pack(anchor=W, padx=self.WIDGET_PAD, pady=(self.WIDGET_PAD,0))\r\n remove.pack(anchor=W, padx=self.WIDGET_PAD, pady=(self.WIDGET_PAD,0))\r\n toggle.pack(anchor=W, padx=self.WIDGET_PAD, pady=self.WIDGET_PAD)\r\n\r\n self.edit_menu = LabelFrame(master, text='Editing', font=self.FONT_SMALL,\r\n bg=self.MAIN_BG)\r\n self.edit_menu.pack(side=TOP, fill=X, pady=self.WIDGET_PAD)\r\n\r\n self.edit_mode = IntVar()\r\n self.edit_mode.set(0)\r\n\r\n create_mode_buttons(self.edit_menu, self.edit_mode)", "def _set_edit_mode(self, flag):\n\t\tself.directoryList.set_sensitive(flag)\n\t\tself._adjust_widgets()", "def on_edit_clicked(self, obj):\n store, node = self.list.selection.get_selected()\n if not node:\n return\n \n name = cuni(self.list.model.get_value(node, 0))\n if name == _('default'): # the default style cannot be edited\n return\n style = self.sheetlist.get_style_sheet(name)\n StyleEditor(name, style, self)", "def handle_selection_main(self):\n choice = self.get_input()\n if choice == '1':\n self.display_cust()\n elif choice == '2':\n self.is_user = False\n self.display_eng()", "def _editClickedSlot(self):\r\n\r\n index = self.propertiesTableView.selectionModel().currentIndex()\r\n if index.isValid():\r\n self.propertiesTableView.edit(index)", "def _update_dynamic_menu_entry(self,param_name):\n param,po = self.get_parameter_object(param_name,with_source=True)\n currently_dynamic = param_is_dynamically_generated(param,po)\n if hasattr(param,'_value_is_dynamic') and not currently_dynamic:\n self._right_click_param = param_name\n state = \"normal\"\n else:\n self._right_click_param = None\n state = \"disabled\"\n self.popup_menu.entryconfig(\"dynamic\",state=state)\n self.dynamic_var.set(currently_dynamic or \\\n param_name in self.allow_dynamic)", "def response_add(self, request, obj, post_url_continue='../%s/'):\n opts = obj._meta\n pk_value = obj._get_pk_val()\n\n msg = '\"%s\" was successfully added to the \"%s\" menu.' % (\n force_unicode(obj),\n obj.menu_item.menu\n )\n\n if \"_continue\" in request.POST:\n self.message_user(request, msg + ' ' + \"You may edit it again below.\")\n return HttpResponseRedirect(post_url_continue % pk_value)\n\n elif \"_addanother\" in request.POST:\n self.message_user(request, msg + ' ' + (\"You may add another %s below.\" % force_unicode(opts.verbose_name)))\n return HttpResponseRedirect('%s?menu=%s' % (\n request.path,\n obj.menu_item.menu.pk,\n ))\n\n else:\n self.message_user(request, msg)\n return HttpResponseRedirect(obj.menu_item.menu.get_edit_url())" ]
[ "0.63300127", "0.618695", "0.61733586", "0.6054791", "0.5977106", "0.5969435", "0.5963501", "0.5931149", "0.5929675", "0.5912512", "0.59039843", "0.58893913", "0.58734757", "0.58428335", "0.5820409", "0.57708883", "0.5754952", "0.5752344", "0.5730824", "0.5724889", "0.5722107", "0.57057875", "0.5695492", "0.5677488", "0.5668011", "0.5644372", "0.5637081", "0.561492", "0.5602625", "0.5585683", "0.5568792", "0.55636376", "0.5555384", "0.55545646", "0.5527672", "0.55148613", "0.5506504", "0.55048394", "0.549757", "0.5493651", "0.54924846", "0.5489806", "0.54879993", "0.54746294", "0.5472227", "0.54545295", "0.54545015", "0.5449607", "0.5446442", "0.54464227", "0.54307866", "0.54278904", "0.5427094", "0.5413265", "0.54088885", "0.5407213", "0.5391536", "0.539071", "0.5378482", "0.5377564", "0.5372762", "0.5372762", "0.5359066", "0.53505737", "0.5348153", "0.53438205", "0.5343667", "0.5335146", "0.53338325", "0.53311837", "0.5328516", "0.5321256", "0.5310604", "0.5307977", "0.5302919", "0.52987367", "0.52975374", "0.5288763", "0.5281987", "0.5280365", "0.5275382", "0.52716553", "0.5270838", "0.5264443", "0.5264122", "0.5263297", "0.52604735", "0.5257842", "0.525644", "0.52399683", "0.5235504", "0.52325284", "0.522844", "0.52274096", "0.5218372", "0.5208874", "0.5205905", "0.52011126", "0.5195198", "0.5195073" ]
0.5936115
7
Sort menu list choices
def sortChoices(self): self.formatList.sort()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SortList(self, key: callable = str.lower):\n temp_list = self.Items\n temp_list.sort(key=key)\n # delete contents of present listbox\n self.delete(0, Tags.End.value)\n # load listbox with sorted data\n for item in temp_list:\n self.insert(Tags.End.value, item)", "def main():\n seq = [48, 11, 45, 92, 32, 61, 65, 57, 29, 96]\n print(selection_sort(seq))", "def application_command_autocomplete_choice_sort_key(choice):\n return choice['name']", "def get_menu_items(self) -> List[str]:\n return sorted(self._items()) # return a copy", "def on_combo_sort_col_names_currentIndexChanged(self, index):\n if self.ui.sort_radio_asc.isChecked():\n self.model.setSort(index, Qt.AscendingOrder)\n else:\n self.model.setSort(index, Qt.DescendingOrder)\n self.model.select()", "def sortedChoices(self, inText):\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return (sortedChoices, True)\n else:\n return (sortedChoices, False)", "def make_act_decision(self, decision):\n choices = decision.choices()\n choices.sort(key=lambda x: self.act_priority(decision, x))\n return choices[-1]", "def select_sort_method():\n st.sidebar.markdown('### Sort method:')\n sort_select = st.sidebar.selectbox('', ['Alphabetically', 'FTE Salary'],\n index=1)\n return sort_select", "def sort(self):\n for section, section_items in self.items():\n if sorted(section_items) == list(section_items):\n continue\n\n section_dict = {k: v for k, v in section_items.items()}\n\n for k in list(section_items):\n self.remove_option(section, k)\n\n for k, v in sorted(section_dict.items()):\n self.set(section, k, v)", "def input_user_choice_sorting(self):\r\n try:\r\n user_choice = input(\"Classer par\\n Ordre alphabétique (entrez '1')\\n Classement ELO (entrez '2')\\n\")\r\n if user_choice == '1' or user_choice == '2':\r\n return user_choice\r\n else:\r\n raise ValueError\r\n except ValueError:\r\n print(\"Veuillez choisir 1 ou 2\")\r\n return self.input_user_choice_sorting()", "def sort_by_type(self):\n # sort_by_type_sitem = self.locator_finder_by_idx(self.sort_by_type_id, 30)\n # sort_by_type_sitem = sort_by_type_sitem.find_element_by_xpath(\"./..\")\n # while True:\n # try:\n # sort_by_type_sitem.click()\n # break\n # except ElementNotInteractableException:\n # time.sleep(1) \n if self.current_package_version() == semver.VersionInfo.parse(\"3.8.0\"):\n sort_by_type = '//*[@id=\"collectionsDropdown\"]/ul[3]/li[3]/a/label'\n sort_by_type_sitem = self.locator_finder_by_xpath(sort_by_type)\n else:\n sort_by_type_sitem = self.locator_finder_by_xpath(self.sort_by_type_id)\n\n sort_by_type_sitem.click()\n time.sleep(2)", "def sortby(self):\n ...", "def sort_options(command):\n command.params.sort(key=lambda p: p.name)\n return command", "def __editSortSelectedLines(self):\n editor = self.activeWindow()\n if editor:\n editor.sortLines()", "def sort_names(li, by_which):\n \n if by_which == 'first':\n li.sort(key = Name.first)\n elif by_which == 'last':\n li.sort(key = Name.last)", "def order_queryset_by_sort_order(get, qs):\n\n def get_string_from_tuple_list(lstTuples, number):\n \"\"\"Get the string value corresponding to a number in a list of number-string tuples\"\"\"\n sBack = [tup[1] for tup in lstTuples if tup[0] == number]\n return sBack\n\n # Helper: order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]\n def order_queryset_by_tuple_list(qs, sOrder, sListName):\n \"\"\"Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]\"\"\"\n\n # Get a list of tuples for this sort-order\n tpList = build_choice_list(sListName)\n # Determine sort order: ascending is default\n bReversed = False\n if (sOrder[0:1] == '-'):\n # A starting '-' sign means: descending order\n sOrder = sOrder[1:]\n bReversed = True\n\n # Order the list of tuples alphabetically\n # (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)\n tpList = sorted(tpList, key=operator.itemgetter(1))\n # Order by the string-values in the tuple list\n return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed)\n\n # Set the default sort order\n sOrder = 'woord' # Default sort order if nothing is specified\n # See if the form contains any sort-order information\n if ('sortOrder' in get and get['sortOrder'] != ''):\n # Take the user-indicated sort order\n sOrder = get['sortOrder']\n\n # The ordering method depends on the kind of field:\n # (1) text fields are ordered straightforwardly\n # (2) fields made from a choice_list need special treatment\n if (sOrder.endswith('handedness')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Handedness\")\n elif (sOrder.endswith('domhndsh') or sOrder.endswith('subhndsh')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Handshape\")\n elif (sOrder.endswith('locprim')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Location\")\n else:\n # Use straightforward ordering on field [sOrder]\n ordered = qs.order_by(sOrder)\n\n # return the ordered list\n return ordered", "def change_sort(self, sorting_choice):\r\n self.message = \"place have been sorted by: {}\".format(sorting_choice)\r\n self.place_list.sort(sorting_choice)\r\n self.root.ids.entriesBox.clear_widgets()\r\n self.create_widget()\r\n sort_index = self.sort_choices.index(sorting_choice)\r\n self.current_sort = self.sort_choices[sort_index]", "def display_actor_list(self):\r\n actor_list = list()\r\n for actor in players_table:\r\n actor_list.append(actor)\r\n user_choice = self.input_user_choice_sorting()\r\n print(\"Liste de tous les acteurs: \")\r\n if user_choice == '1':\r\n actor_list.sort(key=lambda x: x['Nom'])\r\n for player in actor_list:\r\n print(player)\r\n elif user_choice == '2':\r\n actor_list.sort(reverse=True, key=lambda x: x['ELO'])\r\n for player in actor_list:\r\n print(player)", "def test_sort(self):\n sort_field = MoveSearchForm.sort\n for value, label in sort_field.kwargs['choices']:\n response = self.do_search(id=u'1', sort=value)\n self.assert_(\n response.tmpl_context.results,\n \"\"\"Sort by {0} doesn't crash\"\"\".format(value)\n )", "def choose_sort_key(self):\n\n global st_sort_key\n global st_reverse_sort\n\n sort_choices = [\n ('*Reverse*', None),\n ('Company name', stock.stock_key_name),\n ('Symbol', stock.stock_key_symb),\n ('Price', stock.stock_key_price),\n ('Change', stock.stock_key_change),\n ('Change percent', stock.stock_key_change_percent)\n ]\n\n self.lock.acquire()\n self.clear_main()\n w = self.windows['MAIN']\n line = 1\n\n for choice, func in sort_choices:\n w.addstr(line, 0, '%2d' % line, curses.A_BOLD | curses.color_pair(1))\n w.addstr(line, 3, choice)\n line += 1\n\n self.refresh()\n\n # Wait for the user to give is a key.\n while True:\n c = self.stdscr.getch()\n\n if c < ord('1') and c > ord('9'):\n continue\n\n index = c - ord('1')\n\n if index < len(sort_choices):\n break\n\n self.lock.release()\n\n # Set the new sort function.\n if index == 0:\n st_reverse_sort = not st_reverse_sort\n else:\n _, st_sort_key = sort_choices[index]\n\n self.lock.acquire()\n self.display_portfolio(self.active_portfolio)\n self.lock.release()", "def sort_by_name(self):\n # sort_by_name_sitem = self.locator_finder_by_idx(self.sort_by_name_id)\n # sort_by_name_sitem = sort_by_name_sitem.find_element_by_xpath(\"./..\")\n # while True:\n # try:\n # sort_by_name_sitem.click()\n # break\n # except ElementNotInteractableException:\n # time.sleep(1)\n \n if self.current_package_version() == semver.VersionInfo.parse(\"3.8.0\"):\n name = '//*[@id=\"collectionsDropdown\"]/ul[3]/li[2]/a/label'\n sort_by_name_sitem = self.locator_finder_by_xpath(name)\n else:\n sort_by_name_sitem = self.locator_finder_by_xpath(self.sort_by_name_id)\n sort_by_name_sitem.click()\n time.sleep(2)", "def selection_sort(lista):\n for index in range(0, len(lista)):\n min_index = index\n\n for right in range(index + 1, len(lista)):\n if lista[right] < lista[min_index]:\n min_index = right\n\n lista[index], lista[min_index] = lista[min_index], lista[index]", "def add_arguments(self, actions):\n actions = sorted(actions, key=attrgetter('option_strings'))\n super(SortingHelpFormatter, self).add_arguments(actions)", "def sort(self, args):\n if not args:\n self.err_print('One argument required')\n return\n\n _key = args[0]\n cur = self.ui.leftwin.highlighted().data\n try:\n ind = song.tags.index(_key)\n cur.change_sort(ind)\n self.ui.rightwin.disp()\n except:\n self.err_print('\"{}\" is not a valid key to sort by'.format(_key))", "def get_all_menu():", "def sorter(Plugin):\n return Plugin.order", "def selection_sort(l):\n walk = 0\n while walk < len(l):\n i = walk\n while i < len(l):\n if l[i] < l[walk]:\n # swap i and walk\n tmp = l[walk]\n l[walk] = l[i]\n l[i] = tmp\n i += 1\n walk += 1\n return", "def selSort(L):\n\tfor i in range(len(L) - 1):\n\t\tminIndx = i\n\t\tminVal= L[i]\n\t\tj = i + 1\n\t\twhile j < len(L):\n\t\t\tif minVal > L[j]:\n\t\t\tminIndx = j\n\t\t\tminVal= L[j]\n\t\t\tj += 1\n\t\ttemp = L[i]\n\t\tL[i] = L[minIndx]\n\t\tL[minIndx] = temp", "def sorted_options(sort_options):\n return [\n {\n \"title\": v[\"title\"],\n \"value\": (\n \"-{0}\".format(k)\n if v.get(\"default_order\", \"asc\") == \"desc\"\n else k\n ),\n }\n for k, v in sorted(\n sort_options.items(), key=lambda x: x[1].get(\"order\", 0)\n )\n ]", "def test_categories_are_sorted(self):\n self.data_sorted(self.test_data['shirts'], self.test_data['pants'])", "def sorted_options(options):\n return sorted(options, key=lambda _o: _o.number)", "def _sort_modes(self):\n sort_idx = np.lexsort((self.modes[:, 1], self.modes[:, 0], self.modes[:, 2]))\n self._modes = self.modes[sort_idx]", "def menu(self, prompt, choices):\n menu = [prompt] + [\n \"{0}. {1}\".format(*choice) for choice in enumerate(choices, start=1)\n ]\n command = 'inputlist({})'.format(repr(menu))\n choice = int(self._vim.eval(command))\n\n # Vim returns weird stuff if user clicks outside choices with mouse\n if not 0 < choice < len(menu):\n return\n\n return choices[choice - 1]", "def displaySorted(self):\r\n os.system('cls')\r\n for i in self.sortedList:\r\n print(str(i[2]) + \": \" + i[0].showRule())", "def sort_priors(self):\n return", "def sort_songs(self, current_sort_option):\n self.songs.sort_songs(current_sort_option)\n SongsToLearnApp.create_widgets(self)", "def uncheck_all_sort(self):\n\n self.param_list = []\n self.ageSort.setChecked(False)\n self.sexSort.setChecked(False)\n self.speciesSort.setChecked(False)\n self.genotypeSort.setChecked(False)\n self.subjectIDSort.setChecked(False)\n self.weightSort.setChecked(False)\n self.birthSort.setChecked(False)\n self.fluorescenceSort.setChecked(False)\n self.imagesegSort.setChecked(False)\n self.rasterSort.setChecked(False)", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def reorder( self ):\n self.sorted.sort(self.compareFunction)", "def sort_list(user_input):\n user_input.sort()\n return user_input # added a return statement for a cleaner looking main function", "def _sortDialog(self) -> Tuple[Any, Any, Any]:\n dialog = QDialog()\n dialog.setFixedSize(300, 150)\n dialog.setWindowTitle('Sort')\n dialog.setWindowIcon(QIcon(':sort.png'))\n\n # layouts\n dialogLayout = QVBoxLayout()\n formLayout = QFormLayout()\n orderRadioLayout = QVBoxLayout()\n buttonBoxLayout = QHBoxLayout()\n\n # sort by column combobox\n sortLabel = QLabel('Sort by:')\n sortComboBox = QComboBox()\n sortComboBox.addItems(self._modelHeaders[:4])\n\n # order choosing radio buttons\n orderLabel = QLabel('Order:')\n orderRadioAsc = QRadioButton('ascending')\n orderRadioAsc.setChecked(True)\n orderRadioDesc = QRadioButton('descending')\n orderRadioLayout.addWidget(orderRadioAsc)\n orderRadioLayout.addWidget(orderRadioDesc)\n\n formLayout.addRow(sortLabel, sortComboBox)\n formLayout.addRow(orderLabel, orderRadioLayout)\n\n # Ok and Cancel buttons\n buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)\n buttonBox.accepted.connect(dialog.accept)\n buttonBox.rejected.connect(dialog.reject)\n buttonBoxLayout.addStretch()\n buttonBoxLayout.addWidget(buttonBox)\n buttonBoxLayout.addStretch()\n\n dialogLayout.addLayout(formLayout)\n dialogLayout.addLayout(buttonBoxLayout)\n dialogLayout.setAlignment(Qt.AlignCenter)\n\n dialog.setLayout(dialogLayout)\n\n dialogResponse = dialog.exec()\n\n return dialogResponse, sortComboBox.currentIndex(), orderRadioAsc.isChecked()", "def _sortHandler(self) -> None:\n response, columnIndex, ascending = self._sortDialog()\n order = Qt.AscendingOrder if ascending else Qt.DescendingOrder\n if response:\n self._mainFileView.sortByColumn(columnIndex, order)", "def sort(self):\r\n return self.sort_targets([self])", "def sort(self):\n self.model_list.sort()\n for model in self.model_list:\n model.sort()", "def selection_sort(self, data):\n for i in range(len(data)-1, 0, -1):\n i_max = 0\n for j in range(1, i+1):\n if data[j] > data[i_max]:\n i_max = j\n tmp = data[i]\n data[i] = data[i_max]\n data[i_max] = tmp\n print \"pass\", i, data", "def selection_sort(cls, student_list):\n end_index = len(student_list) - 1\n for i in range(len(student_list) - 1):\n large_index = cls.get_largest_index(student_list, end_index + 1)\n student_list[large_index], student_list[end_index] = \\\n student_list[end_index], student_list[large_index]\n end_index -= 1", "def sort_results(self):\n pass", "def sort_plans(request):\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n select = request.GET['sortp']\n items = Product.objects.filter(category__icontains='P')\n if select == 'LtoH':\n results = items.order_by('price')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'HtoL':\n results = items.order_by('-price')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'AtoZ':\n results = items.order_by('name')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'ZtoA':\n results = items.order_by('-name')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})", "def selection_sort(items):\n # Repeat until all items are in sorted order\n # Find minimum item in unsorted items\n # Swap it with first unsorted item\n current = 0\n minimum = 0\n first = 0\n while not is_sorted(items):\n if items[current] < items[minimum]:\n minimum = current\n\n elif current == len(items) - 1:\n items[minimum], items[first] = items[first], items[minimum]\n first += 1\n current = first\n minimum = first\n \n else:\n current += 1", "def sort_list(self,list_):\r\n list_.sort()", "def start_sort():\n global data\n if algo_box.get() == \"Bubble Sort\":\n bubble_sort(data, display_data, speed_scale.get())\n elif algo_box.get() == \"Merge Sort\":\n merge_sort(data, display_data, speed_scale.get())", "def sort(self, col, order):\r\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\r\n self.mylist = sorted(self.mylist,\r\n key=operator.itemgetter(col))\r\n if order == QtCore.Qt.DescendingOrder:\r\n self.mylist.reverse()\r\n self.emit(SIGNAL(\"layoutChanged()\"))", "def as_sorted_list(options):\n if len(options) > 0:\n options.sort(None, key=lambda o: o.number)\n return options", "def on_sort(self, param, state):\n if state > 0: # From unchecked to checked\n self.grouped = False\n self.uncheck_group()\n if param not in self.param_list:\n self.param_list.append(param)\n else: # From checked to unchecked\n if param in self.param_list:\n if len(self.param_list) == 1:\n self.param_list = []\n else:\n self.param_list.remove(param)\n self.sorted_labels = utils.sort_by_param(self.nwb_path_list.values(), self.param_list)\n if self.param_list:\n self.sorted = True\n else:\n self.sorted = False\n self.musketeers_widget.session_widget.update_text_filter()\n self.musketeers_widget.session_widget.populate(self.sorted_labels)", "def sorted(self): \n pass", "def select_sort_by_name_ascendant(self):\n msg = \"The new order of the items is by ascendant name\"\n with self.allure.step(msg):\n self.__product_sort.select_by_text('Name (A to Z)')\n self.allure.attach_image(self.driver, msg)", "def selection_sort(L):\n for i in range(len(L)):\n # Find the index of the smellest item in L[i:] and swap that item with the item at index i.\n\n index_of_smallest = get_index_of_smallest(L,i)\n L[index_of_smallest], L[i] = L[i], L[index_of_smallest]", "def reverseSelectionSort(l):\r\n for k in range(len(l) - 1):\r\n\r\n min_pos = k\r\n for j in range(k + 1, len(l)):\r\n if l[j] > l[min_pos]:\r\n min_pos = j\r\n\r\n l[min_pos], l[k] = l[k], l[min_pos]", "def menu_items():\r\n menu_name = \"Coloring\"\r\n algorithm_list = [[\"Brute Force\", board_brute_force_coloring],\r\n [\"separator\", \"separator\"],\r\n [\"Greedy Coloring\", board_greedy_coloring],\r\n [\"Tabu Coloring Search\", board_tabu_coloring],\r\n [\"Tabu Pre-Coloring Search\", board_tabu_precoloring]]\r\n \r\n return [menu_name, algorithm_list]", "def sort(self, col, order):\r\n self.emit(SIGNAL(\"layoutAboutToBeChanged()\"))\r\n self.mylist = sorted(self.mylist,\r\n key=operator.itemgetter(col))\r\n if order == Qt.DescendingOrder:\r\n self.mylist.reverse()\r\n self.emit(SIGNAL(\"layoutChanged()\"))", "def sort_results(self, sort_option):\r\n self.model.sort_data(sort_option)", "def selection_sort(lst):\n l = len(lst)\n for i in range(l - 1):\n pos = i\n for j in range(i + 1, l):\n if lst[j] < lst[pos]:\n pos = j\n\n if pos > i:\n lst[i], lst[pos] = lst[pos], lst[i]\n\n return lst", "def sorting(list_object): # Takes in a ListItem object and returns the\r\n # priority value - from w3schools.com\r\n return list_object.priority", "def SelectionSort(ulist):\n for i in range(len(ulist)):\n mini = ulist[i]\n mpos = i;\n for j in range(i, len(ulist)):\n if mini < ulist[j]:\n mini = ulist[j]\n mpos = j\n ulist[i], ulist[mpos] = ulist[mpos], ulist[j]", "def selection_sort(a_list):\n for slot in reversed(range(len(a_list))):\n pos_of_max = 0\n for location in range(1, slot + 1):\n if a_list[location] > a_list[pos_of_max]:\n pos_of_max = location\n a_list[slot], a_list[pos_of_max] = a_list[pos_of_max], a_list[slot]\n return a_list", "def sort(self):\r\n self.list.sort(key=lambda x: ''.join(x))", "def selection_sort(items):\n\n n = len(items)\n for j in range(n):\n # Find the index of the smallest item in the range(j,n)\n i_min = j\n for i in range(j + 1, n):\n if (items[i] < items[i_min]):\n i_min = i\n\n # Swap the items at j and i_min if needed.\n if i_min != j:\n items[j], items[i_min] = items[i_min], items[j]\n\n return items", "def sort_according_to_choice(x, choices):\n x_sorted = np.zeros_like(x) * np.nan\n # Get values of chosen entries, and put them into first column\n x_sorted[:, 0] = x[np.arange(x.shape[0]), choices.astype(int)]\n # and everything else into the next columns\n others = np.vstack([x[i, np.where(np.arange(x.shape[1]) != c)]\n for i, c in enumerate(choices)])\n x_sorted[:, 1:] = others\n return x_sorted", "def sort_according_to_choice(x, choices):\n x_sorted = np.zeros_like(x) * np.nan\n # Get values of chosen entries, and put them into first column\n x_sorted[:, 0] = x[np.arange(x.shape[0]), choices.astype(int)]\n # and everything else into the next columns\n others = np.vstack([x[i, np.where(np.arange(x.shape[1]) != c)]\n for i, c in enumerate(choices)])\n x_sorted[:, 1:] = others\n return x_sorted", "def _mySort(self, alist):\n return sorted(alist, key=lambda x: (x[0].isdigit(), x.lower()))", "def OnReorder( self, event ):\n column = self.columns[event.GetColumn()]\n if column.sortOn:\n # multiple sorts for the click...\n columns = [ self.columnByAttribute( attr ) for attr in column.sortOn ]\n diff = [ (a,b) for a,b in zip( self.sortOrder, columns ) if b is not a[1]]\n if not diff:\n self.sortOrder[0] = (not self.sortOrder[0][0], column)\n else:\n self.sortOrder = [\n (c.defaultOrder,c) for c in columns \n ] + [ (a,b) for (a,b) in self.sortOrder if b not in columns]\n else:\n if column is self.sortOrder[0][1]:\n # reverse current major order\n self.sortOrder[0] = (not self.sortOrder[0][0], column)\n else:\n self.sortOrder = [(column.defaultOrder,column)] + [\n (a,b) \n for (a,b) in self.sortOrder if b is not column \n ]\n # TODO: store current selection and re-select after sorting...\n self.reorder()\n self.Refresh()", "def click_timed_sorting_button(self):\n self.my_sorted_list = self.sorting.sorting_alg(self.my_list)\n self.label_2[\"text\"] = self.set_my_sorted_list_label()", "def do_sort(request, item_container, app_name, my_title, app_types=[]):\n\n class dms_itemForm ( forms.Form ) :\n sections = forms.CharField(required=False,\n widget=forms.Textarea( attrs={'rows':5, 'cols':40,\n 'style':'width:50%;'}) )\n\n my_item = item_container.item\n objs = []\n has_user_folder = False\n user_perms = UserEditPerms(request.user.username,request.path)\n\n # wurden Zwischentitel geloescht, ergaenzt, umgeordnet?\n if request.POST.has_key('sections_form'):\n s = request.POST['sections']\n if s != item_container.container.sections:\n item_container.container.sections = s\n item_container.container.save()\n change_values = request.POST.has_key('drag_item_form')\n if change_values:\n items, sections, d_sections = get_folder_content(item_container, False, app_types)\n order_by_ids, new_sections_str = do_resort(request.POST['var_order_by_0'])\n n = 0\n c = []\n for i in items:\n if app_types==[] or i.item.app.name in app_types:\n has_changed = False\n if order_by_ids[i.item.id][0] != i.order_by:\n i.order_by = order_by_ids[i.item.id][0]\n has_changed = True\n sec = order_by_ids[i.item.id][1]\n if sec != i.section:\n i.section = sec\n has_changed = True\n if has_changed:\n i.save()\n # --- wurde die Reihenfolge der Zwischentitel geaendert?\n n = new_sections_str\n if item_container.container.sections != n:\n item_container.container.sections = n\n item_container.container.save()\n items, sections, d_sections = get_folder_content(item_container, False, app_types)\n js_head, drag_list, input_str, n_drag_titles = get_drag_list(sections, d_sections)\n max_items = len(items)\n # --- Zwischentitel\n data_init = {'sections' : decode_html(item_container.container.sections,) }\n f = dms_itemForm(data_init)\n tabs = [('tab_sections' , ['sections',]), ]\n sec_content = get_tabbed_form(tabs, help_form, 'lecture' , f, False)\n\n vars, user_perms = get_base_vars(request, item_container, 'frame-main-manage')\n v = { 'objs' : objs,\n 'js_head' : js_head,\n 'drag_list' : drag_list,\n 'input_str' : input_str,\n 'max_items' : n_drag_titles,\n 'id' : my_item.id,\n 'title' : my_title,\n 'sub_title' : my_item.title,\n 'name' : my_item.name,\n 'action' : get_folderish_actions(request, user_perms, item_container, app_name,\n item_container.item.has_comments,\n {'browseable_mode': False,\n 'navigation_mode': False}),\n 'sec_content': sec_content\n }\n vars.update(v)\n vars['image_url'] = ''\n vars['text'] = ''\n vars['text_more'] = ''\n return render_to_response ( 'app/base_sort.html', vars )", "def sort_results_by(driver, sorting_criteria):\n if sorting_criteria.lower() == 'relevance':\n return\n button = '//select[@id=\"jserp-sort-select\"]'\n option_path = '//option[@value=\"DD\"]'\n time.sleep(3)\n try:\n driver.find_element_by_xpath(button).click()\n except Exception as e:\n print(e)\n print(\" Could not sort results by '{}'\".format(sorting_criteria))\n else:\n time.sleep(3)\n try:\n driver.find_element_by_xpath(option_path).click()\n except Exception as e:\n print(\" Could not select 'sort by' option\")\n else:\n time.sleep(3)", "def sort(self):\n self.fragment_list.sort()", "def selection_sort(L):\r\n n = len(L)\r\n for i in range(n-1):\r\n min_index = i\r\n for j in range(i + 1, n):\r\n if L[j] < L[min_index]:\r\n min_index = j\r\n if i != min_index:\r\n swap(L, i, min_index)\r\n return L", "def _reorder_collected(self, data):\n priority = {\n 'post': 1,\n 'get': 2,\n 'put': 2,\n 'patch': 2,\n 'head': 2,\n 'options': 2,\n 'delete': 3,\n }\n data = sorted(\n data,\n key=lambda x: priority.get(getattr(x, 'name', ''), 4))\n return data", "def selection_sort(unsorted):\n n = len(unsorted)\n _sorted = []\n for _ in range(0, n):\n val = min(unsorted)\n _sorted.append(val)\n unsorted.remove(val)\n del unsorted", "def sort(self):\n self.list.sort(key=lambda x: ''.join)", "def sort_list(self, key_):\n options = {\n 'index': 0,\n 'name' : 1,\n 'surname': 2,\n 'email': 3,\n 'phone': 4,\n }\n if key_ in options.keys():\n key_ = options.get(key_)\n\n return(sorted(self.contacts, key = lambda x: x[key_]))", "def reversesort(self):\n ...", "def get_menu_items(self) -> typing.List[typing.Tuple[str, typing.List[typing.Tuple[str, typing.Callable[[], None]]]]]: #this method is to be queried by the root frame when it is creating the menu bar at the top of the screen and needs options to put in it\n return []", "def sort(request):\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n select = request.GET['sort']\n if select == 'LtoH':\n results = Product.objects.order_by('price')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'HtoL':\n results = Product.objects.order_by('-price')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'AtoZ':\n results = Product.objects.order_by('name')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})\n elif select == 'ZtoA':\n results = Product.objects.order_by('-name')\n return render(request, \"products.html\",\n {\"products\": results, 'stars': stars})", "def __roughSort ( self ):\n\n #-- 1 --\n # [ optionString := string for the 2nd argument of\n # getopt.getopt, based on self.switchSpecs ]\n optionString = self.__buildOptionString ( )\n \n #-- 2 --\n # [ if sys.argv is valid according to getopt.getopt ->\n # return (optList, argList) as getopt.getopt does\n # else ->\n # sys.stderr +:= (usage message) + (error message)\n # stop execution ]\n try:\n result = getopt.getopt ( sys.argv[1:], \n optionString )\n return result\n except getopt.GetoptError, detail:\n usage ( self.switchSpecs, self.posSpecs, str(detail) )", "def sort(self,desc):\n\tself.__sort(\"\",\"\",desc)", "def display_choices(index, entries):\n p = \"[P] - Previous Entry\"\n n = \"[N] - Next Entry\"\n e = \"[E] - Edit Entry\"\n d = \"[D] - Delete an entry\" \n m = \"[M] - Go back to Main Menu\"\n\n menu = [p,n,e,d,m]\n\n if index == 0:\n menu.remove(p)\n if index == len(entries) - 1:\n menu.remove(n)\n\n for menu in menu:\n print(menu)", "def selection_sort(my_list):\n if len(my_list) < 2:\n return my_list\n for index in range(0, len(my_list)-1, +1):\n index_of_min = index\n for location in range(index, len(my_list)):\n if my_list[location] < my_list[index_of_min]:\n index_of_min = location\n\n temp = my_list[index]\n my_list[index] = my_list[index_of_min]\n my_list[index_of_min] = temp\n\n return my_list", "def get_queryset(self):\n qs = super(SortForm, self).get_queryset()\n\n qs = self.pre_sort(qs)\n\n # Ensure that the form is valid\n if not self.is_valid():\n return qs\n\n # Do Sorting\n sorts = self.cleaned_data.get('sort', [])\n order_by = []\n for sort in sorts:\n param = self.HEADERS[abs(sort) - 1]['column']\n if sort < 0:\n param = '-' + param\n order_by.append(param)\n\n if order_by:\n qs = qs.order_by(*order_by)\n\n qs = self.post_sort(qs)\n\n return qs", "def sort(self, *args, **kargs):\n list.sort(self, *args, **kargs)\n self.emit('modified')", "def selection_sort(l):\n # Raise value\n if not isinstance(l, list):\n raise TypeError(\"Not a list\")\n \n # Initialize variables to count\n r = c = w = 0\n\n for i in range(len(l)):\n # Assign the smallest to the first item of the unsorted segment\n index_temp_min_value = i \n # Loop iterates over the unsorted items\n for j in range(i + 1, len(l)):\n c += 1 \n r += 2 \n if l[j] < l[index_temp_min_value]:\n index_temp_min_value = j \n \n c += 1\n if index_temp_min_value != i:\n # swap values of the lowest unsorted ele with the first unsorted ele \n l[i], l[index_temp_min_value] = l[index_temp_min_value], l[i]\n w += 2\n r += 2\n\n return c, r, w", "def sort(self):\n # sort the contents of the container alphabetically\n # this is done automatically whenever an item is added/removed from the Container\n self.items.sort(key=lambda item: item.name)", "def sort(self, col, order):\n self.layoutAboutToBeChanged.emit()\n self.mylist = sorted(self.mylist,\n key=operator.itemgetter(col))\n if order == Qt.DescendingOrder:\n self.mylist.reverse()\n self.layoutChanged.emit()", "def main():\n extension_choices = {}\n os.chdir(\"FilesToSort.old\")\n for file_name in os.listdir('.'):\n if os.path.isdir(file_name):\n continue\n\n file_extension = file_name.split('.')[-1]\n if file_extension not in extension_choices:\n choice = input(\"What file type would you like to sort {} files into? \".format(file_extension))\n extension_choices[file_extension] = choice\n try:\n os.mkdir(choice)\n except FileExistsError:\n pass\n\n os.rename(file_name, \"{}/{}\".format(extension_choices[file_extension], file_name))", "def sort_1(l):\n pass", "def sort_entries(self):\n if not len(self.student_list):\n print('There is no contents to sort')\n return\n\n opt = self.input_options(['n', 'a', 'g'], 1, 'Sort by name(n) or average(a) or grade(g)')\n if opt.upper() == 'N':\n self.print_dataframe(self.student_list.sort_values(by=['name', 'average'], ascending=[True,False]))\n elif opt.upper() == 'A' or opt.upper() == 'G':\n self.print_dataframe(self.student_list.sort_values(by=['average', 'name'], ascending=[False,True]))", "def _choose_best_option(self):", "def pre_sort(self, qs):\n return qs", "def sort(self):\r\n self.candidates.sort(key=self.sortFitness)\r\n return", "def sort_my_hands(self):\n self.hands_list.sort(reverse=True)", "def sorted_options(sort_options):\n return [\n dict(\n title=v['title'],\n value=('-{0}'.format(k)\n if v.get('default_order', 'asc') == 'desc' else k),\n )\n for k, v in\n sorted(sort_options.items(), key=lambda x: x[1].get('order', 0))\n ]" ]
[ "0.6233976", "0.6216146", "0.6190673", "0.598364", "0.59764874", "0.5956966", "0.59059286", "0.5865083", "0.58410734", "0.58394575", "0.5783629", "0.5779133", "0.5769487", "0.57629895", "0.5747787", "0.57295066", "0.57123274", "0.5697192", "0.5693883", "0.567952", "0.56586474", "0.5655346", "0.5652303", "0.5623778", "0.5623775", "0.5617385", "0.56035554", "0.56031", "0.5595827", "0.55749387", "0.55714166", "0.5563576", "0.5557752", "0.55563396", "0.55479735", "0.55420405", "0.5537565", "0.5537032", "0.55322117", "0.552344", "0.55215013", "0.5512228", "0.55085874", "0.5495249", "0.546591", "0.5461679", "0.5459923", "0.5458141", "0.5453572", "0.54527736", "0.54426736", "0.5435866", "0.5434547", "0.54276747", "0.54241467", "0.54236794", "0.54180545", "0.5412435", "0.54123664", "0.5407363", "0.5406927", "0.5404948", "0.540453", "0.5402241", "0.5385556", "0.53854823", "0.5383349", "0.5383333", "0.5383333", "0.53727335", "0.53676426", "0.53614444", "0.5357062", "0.5353569", "0.5352455", "0.5350882", "0.5346744", "0.53456855", "0.5342039", "0.53332895", "0.5330249", "0.5322918", "0.53225327", "0.53191566", "0.53188604", "0.530259", "0.5302119", "0.53016263", "0.5300189", "0.5285734", "0.5284568", "0.52814835", "0.5277829", "0.5275635", "0.52731085", "0.5263272", "0.5244007", "0.52386945", "0.52359205", "0.5232977" ]
0.77374196
0
Return formatted text, properly escaped if not in titleMode
def formatOutput(self, storedText, titleMode, internal=False): return TextFormat.formatOutput(self, storedText, titleMode, internal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def title(text, level=0):\n return '\\n' + text + '\\n' + '=-~_#%^' [level] * len(text) + '\\n\\n'", "def format_title(self, data):\n return data", "def output_plain_sep_title(title):\n print(f\"{plain_sep_mark}\\t{title}{plain_sep_mark}\")", "def get_text(downgrade_titles=False):", "def PROPER(text):\n return text.title()", "def title(self, string):\n return self.bold(string)", "def html_title(title):\n return '<center><h1>%s</h1></center>' % (title)", "def _get_title_text(self):\n return Text(\n self,\n self.settings.font_bold_filename,\n 96,\n self.settings.font_color,\n 'zuckbot',\n {'center': self.screen_rect.center},\n 0,\n -50,\n )", "def get_title_repr(self) -> str:\n try:\n return Title[self.title].value\n except (KeyError, ValueError):\n pass", "def editModeHeading(text):\n return u'<p style=\"editModeHeading\">%s</p>' % text", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def emph_text(text):\n\n if use_color():\n return colorama.Style.BRIGHT + text + colorama.Style.RESET_ALL\n else:\n return text", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()", "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "def formatOutput(self, storedText, titleMode, internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = ['<img src=\"%s\">' % escape(url, treedoc.escDict) for url\n in paths]\n return u'<br />'.join(results)", "def _text_formatting(bs4_tag):\n return bs4_tag.get_text().replace('\\n', '')", "def print_with_title(title, content, before='', after='', hl='='):\n cont_maxlen = max(len(s) for s in content.split('\\n'))\n hl_len = max(cont_maxlen, len(title))\n print('{}{}\\n{}\\n{}{}'.format(before, title, hl * hl_len, content, after))", "def title(self, txt):\n num = len(txt)\n ticks = \"=\" * num\n print(ticks)\n print(txt)\n print(ticks)", "def test_title(self):\n self.assertEquals(\"Title\\n=====\", trans(\"== Title ==\"))\n self.assertEquals(\"Title\\n-----\", trans(\"=== Title ===\"))\n self.assertEquals(\"#### Title\", trans(\"==== Title ====\"))\n self.assertEquals(\"##### Title\", trans(\"===== Title =====\"))", "def helptext(self):\n return \"\"", "def book_title(book_text):\n search = re.search(\"Title:(.*)\", book_text)\n title = search.group(1).replace(\"\\r\", \" \").strip()\n return title", "def get_title():", "def __str__(self):\n date_str = self.date.strftime(self.journal.config['timeformat'])\n title = date_str + \" \" + self.title\n body = self.body.strip()\n\n return \"{title}{sep}{body}\\n\".format(\n title=title,\n sep=\"\\n\" if self.body else \"\",\n body=body\n )", "def __str__(self) -> str:\n return textwrap.wrap(self.title, _POST_TITLE_MAX_LENGTH // 4)[0]", "def title_draw():\n nonlocal width\n widthTitle = len(self.str_title)\n if widthTitle > width:\n self.str_title = self.str_title[0:width-5] + '...'\n widthTitle = len(self.str_title)\n h_len = widthTitle + self.l_padding + self.r_padding\n top = ''.join(['┌'] + ['─' * h_len] + ['┐']) + '\\n'\n result = top + \\\n '│' + \\\n ' ' * self.l_padding + \\\n self.str_title + \\\n ' ' * self.r_padding + \\\n '│' + self.str_shadow + '\\n'\n offset = 2 + self.l_padding + len(self.str_title) + self.r_padding\n return result, offset", "def show_title():\r\n complement = (\r\n '\\n __ ')\r\n title = ('\\n _______ _______________ ____ _______ __ ___ _ _______/ /_ ____ _____ ____ ____ ')\r\n title += ('\\n / ___/ / / / ___/ ___/ _ \\/ __ \\/ ___/ / / / / _ \\| |/_/ ___/ __ \\/ __ `/ __ \\/ __ `/ _ \\ ')\r\n title += ('\\n/ /__/ /_/ / / / / / __/ / / / /__/ /_/ / / __/> </ /__/ / / / /_/ / / / / /_/ / __/ ')\r\n title += ('\\n\\___/\\__,_/_/ /_/ \\___/_/ /_/\\___/\\__, / \\___/_/|_|\\___/_/ /_/\\__,_/_/ /_/\\__, /\\___/ ')\r\n title += ('\\n /____/ /____/ ')\r\n # Add Styles\r\n break_line = ('-' * len(complement) + \"\\n\") * 2\r\n print(\"{}\\n{}\\n{}\\n\".format(break_line, title, break_line))", "def format_title(self, title):\n new_title = ''.join(word.lower().strip('!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ ') for word in title)\n return new_title", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def pretty_title(title):\n output = '-' * 5 + ' ' + title + ' ' + '-' * 5\n return output", "def html_title(self, title=None):\r\n if title is None:\r\n return \"<title>PyBossa</title>\"\r\n else:\r\n return \"<title>PyBossa &middot; %s</title>\" % title", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def output_sep_title(title):\n print(f\"{sep_mark}\\t{title}{sep_mark}\")", "def make_main_title(self, end, end_center=False):\n main_title = r\"\\begin{center}\"\n if self.detector is not None:\n main_title += \"%s \"%self.detector\n if self.selection is not None:\n main_title += \"%s Event Selection \"%self.selection\n main_title += end\n if end_center:\n main_title += r\"\\end{center}\"\n return main_title", "def title(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"=\")))", "def get_title(self):\n if not hasattr(self, '_title'):\n self._title = 'NO TITLE'\n if self._title:\n title = _(self._title)\n title = title.replace('&', '&amp;') \n title = title.replace('\"', '&quot;')\n return title\n else:\n return u''", "def escape_if_needed(text, options):\n if hasattr(text, '__html__'):\n # Text has escape itself:\n return to_string(text.__html__())\n if need_to_escape(options):\n return escape(to_string(text))\n return to_string(text)", "def ftitle(self, text):\n return \"{} - {}\".format(self._app_name, text)", "def get_rst_title_char(level):\n chars = (u'=', u'-', u'`', u\"'\", u'.', u'~', u'*', u'+', u'^')\n if level < len(chars):\n return chars[level]\n return chars[-1]", "def process_title(self, title):\n\t\t# strip apostrophes\n\t\tif '\\'' in title:\n\t\t\ttitle = re.sub('\\'', '', title)\n\t\tif '.' in title:\n\t\t\ttitle = re.sub('.', '', title)\n\t\treturn title", "def pretty_title(title):\n output = '-' * 5 + ' ' + title.lower() + ' ' + '-' * 5\n return output", "def html(self):\n bop = ('<b>' if self._bold else '')\n iop = ('<i>' if self._italic else '')\n icl = ('</i>' if self._italic else '')\n bcl = ('</b>' if self._bold else '')\n txt = escape(self._text)\n s = '%s%s%s%s%s' % (bop, iop, txt, icl, bcl)\n return '%s' % s", "def escape_for_display(s) :\n if len(s) == 0 :\n return \"[EMPTY]\"\n return s.replace(\"\\n\",\"[NL]\").replace(\"\\t\",\"[TAB]\") #.replace(\" \",\"[SP]\") # Escape newlines so not to confuse debug output.", "def style_title(self) -> str:\n style_title = \"\"\".title\n {margin-bottom: 10px}\\n\"\"\"\n self.html_doc = self.html_doc + style_title\n return self.html_doc", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def getHTMLText(self, s):\r\n\r\n # Removes any \"<\" or \">\" from the text, and replaces line ends with <br> tags\r\n if s is not None:\r\n res = str(s)\r\n res = string.replace(res, \">\", \"&gt;\")\r\n res = string.replace(res, \"<\", \"&lt;\")\r\n res = string.replace(s, \"\\n\", \"<br style='mso-data-placement:same-cell;'/>\")\r\n else:\r\n res = \"\"\r\n\r\n # Inserts formatting tag around text, if defined\r\n if self.formatBeginTag:\r\n res = self.formatBeginTag + res + self.formatEndTag\r\n\r\n return res", "def escape_single_quotes(custom_data):\n # https://stackoverflow.com/questions/10569438/how-to-print-unicode-character-in-python\n # https://regex101.com/r/nM4bXf/1\n if re.search(\"(?<!u)'(?!:|}|,)\", custom_data.get('title_name', '')):\n z = re.sub(r\"(?<!u)'(?!:|}|,)\", '\\\\\\'', custom_data.get('title_name', None))\n\n custom_data['title_name'] = z\n return custom_data\n return custom_data", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def format_title(self, ticket_id, subject):\n # TODO: strip block tags?\n title = \"#%i %s\" % (ticket_id, subject)\n return title.strip()", "def subtitle(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"-\")))", "def title(self, value):\n if len(value):\n self._title = self._wrap_line(value, self._width)\n\n # Add a blank line\n self._title.append('')", "def emphasize(text: str, tablefmt: str | TableFormat, strong: bool = False) -> str:\n # formats a title for a table produced using tabulate,\n # in the formats tabulate understands\n if tablefmt in [\"html\", \"unsafehtml\", html_with_borders_tablefmt]: # type: ignore\n if strong:\n emph_text = f\"<strong>{text}</strong>\"\n else:\n emph_text = f\"<em>{text}</em>\"\n elif tablefmt in [\"latex\", \"latex_raw\", \"latex_booktabs\", \"latex_longtable\"]:\n if strong:\n emph_text = r\"\\textbf{\" + text + r\"}\"\n else:\n emph_text = r\"\\emph{\" + text + r\"}\"\n else: # use the emphasis for tablefmt == \"pipe\" (Markdown)\n star = \"**\" if strong else \"*\"\n emph_text = f\"{star}{text}{star}\"\n return emph_text", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def textual(title, ordering_field=None):\n def decorator(func):\n def wraps(self, obj):\n result = func(self, obj)\n return result if result else u'---'\n\n wraps.short_description = title\n wraps.allow_tags = True\n\n if ordering_field:\n wraps.admin_order_field = ordering_field\n\n return wraps\n return decorator", "def outputText(self, item, titleMode, internal=False):\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''", "def inclusive_title(self):\n return self.title + (\" %s\" % (self.episode_to_string(self.latest_season, self.latest_episode),) if self.is_series() else \"\")", "def group_title(self, group):\n group_title = group.getProperty('title')\n if self.short:\n splitted = group_title.split('(')\n if len(splitted) > 1:\n group_title = group_title.split('(')[-1][:-1]\n return html.escape(group_title)", "def outputText(self, item, titleMode, internal=False):\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)", "def format_heading(self, level, text):\n underlining = ['=', '-', '~', ][level-1] * len(text)\n return '%s\\n%s\\n\\n' % (text, underlining)", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)", "def formatted(self) -> str:\r\n ...", "def __str__(self):\n return \"{title}\".format(title=self.title)", "def gen_title_rst(txt):\n # Just add a few useful directives\n txt = \".. highlight:: cmake\\n\\n\" + txt\n return txt", "def _prettyfilename(self):\n return self.title", "def wrap_title(title, mpl_layout):\n fig = mpl_layout.canvas.figure\n ax = fig.axes[0]\n ext_pixels = ax.get_window_extent()\n ext_inches = ext_pixels.transformed(fig.dpi_scale_trans.inverted())\n magic_number = 10\n letters_per_line = int(ext_inches.width * magic_number)\n title_wrapped = '\\n'.join(textwrap.wrap(title, letters_per_line))\n return title_wrapped", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title} :\" + s\n\n return s", "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "def get_title(text, uuid=None):\n if uuid is not None:\n text += get_provenance_link(uuid)\n title = pn.Row(pn.pane.HTML('<h2>{}</h2>'.format(text)), align='start')\n\n return title", "def print_title(title):\n print \"\\n\"+\"#\"*32+\"\\n# \"+title+\"\\n\"+\"#\"*32+\"\\n\"", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''", "def transform(text: str) -> str:\n return text.title()", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n # add prefix/suffix within the executable path:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' %\n (escape(path, treedoc.escDict), altText or url))\n return u'<br />'.join(results)", "def complete_alt_title(self, obj):\n return str(obj)", "def clean_title(\r\n title: str,\r\n mode: Literal[\"soft\", \"hard\", \"safe\"],\r\n allow_dot: bool = False,\r\n n: Optional[int] = None,\r\n) -> str:\r\n ...", "def text(self) -> str:", "def get_title(self):\n\n title = ''\n doc = self.article.doc\n\n title_element = self.parser.getElementsByTag(doc, tag='title')\n # no title found\n if title_element is None or len(title_element) == 0:\n return title\n\n # title elem found\n title_text = self.parser.getText(title_element[0])\n used_delimeter = False\n\n # split title with |\n if '|' in title_text:\n title_text = self.split_title(title_text, PIPE_SPLITTER)\n used_delimeter = True\n\n # split title with -\n if not used_delimeter and '-' in title_text:\n title_text = self.split_title(title_text, DASH_SPLITTER)\n used_delimeter = True\n\n # split title with »\n if not used_delimeter and u'»' in title_text:\n title_text = self.split_title(title_text, ARROWS_SPLITTER)\n used_delimeter = True\n\n # split title with :\n if not used_delimeter and ':' in title_text:\n title_text = self.split_title(title_text, COLON_SPLITTER)\n used_delimeter = True\n\n title = MOTLEY_REPLACEMENT.replaceAll(title_text)\n return title", "def print_title(title):\n\n print(\"\\n\" + title)\n print(\"=\" * len(title))", "def format_rich_quote(rich_text_quote):\n rich_text = format_rich_text(rich_text_quote)\n return \"> \" + \"\\n> \".join(rich_text.split(\"\\n\")) + \"\\n\"", "def SearchableText(self):\n ctool = getToolByName(self, 'portal_cpscalendar')\n if getattr(ctool, 'event_fulltext_index', False):\n return '%s %s' % (self.title, self.description)\n return ''", "def __str__(self):\n t = Template(\n \"\"\"\n <h4>$title</h4>\n $imgs\n $footnotes\n <hr/>\"\"\")\n # Return result.\n return t.substitute({\n \"title\": self.title,\n \"imgs\": self.render_images(),\n \"footnotes\": self.render_footnotes()\n })", "def visit_title_reference(self, node):\n self.body.append('\\\\emph{\\\\textbf{')", "def render(resolve_unicode,\n title_force_uppercase,\n msdos_eol_style,\n output_encoding,\n omit_fields=[]):", "def inject_title(self,template,title):\n return re.sub('TITLE',title,template)", "def format_screen(self,str):\n # Paragraph continue\n par_re = re.compile(r'\\\\$',re.MULTILINE)\n str = par_re.sub('',str)\n return str", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def title_content(label=\"A title\"):\n return {'label':label}", "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def title_p(self):\n self.run_command('title_p')", "def title(self):\n # Use the first line of the articles text as title, if not title\n # exists.\n title = self._text[:min(32, self._text.find(\"\\n\"))]\n return title", "def numbered_title(self):\n return f\"{self.title}\"", "def numbered_title(self):\n return f\"{self.title}\"", "def render_text(self):\n if self.text_type == 1:\n return markdown.markdown(self.text)\n else:\n return self.text", "def dumps(self):\n\n if not self.numbering:\n num = '*'\n else:\n num = ''\n\n string = Command(self.latex_name + num, self.title).dumps()\n string += '%\\n' + self.dumps_content()\n\n return string", "def processTitle(title):\n\n cleaned = re.sub(r'[@#\\\"]+', '', title.lower().strip())\n cleaned = re.sub(r'\\(\\d{4}.*\\)', '', cleaned)\n cleaned = re.sub(r':.+', '', cleaned).strip()\n return cleaned", "def show(self) -> str:\n return f'[{self.font}]{self.text}[{self.font}]' if self.font else self.text" ]
[ "0.67517006", "0.6623557", "0.64947814", "0.6347113", "0.621596", "0.6210496", "0.60684896", "0.60674477", "0.60663515", "0.60421175", "0.6019259", "0.59935653", "0.59802073", "0.59790826", "0.595393", "0.5948588", "0.5939195", "0.590317", "0.5872387", "0.58521676", "0.5838757", "0.5835408", "0.5834278", "0.5832544", "0.58303535", "0.58232164", "0.58196765", "0.5818879", "0.581837", "0.58134586", "0.58123326", "0.57893336", "0.5777435", "0.5773666", "0.5759935", "0.57562524", "0.57514244", "0.5736761", "0.5721786", "0.57156", "0.5693657", "0.56579095", "0.56524575", "0.56516933", "0.56416726", "0.5639766", "0.5630319", "0.56235963", "0.5607828", "0.55989367", "0.5597865", "0.5593643", "0.55868447", "0.5576239", "0.55753696", "0.5570099", "0.556155", "0.55568874", "0.55474097", "0.5539662", "0.5532411", "0.5531814", "0.5512975", "0.5479672", "0.54774815", "0.54768354", "0.5473451", "0.54682344", "0.5464578", "0.54521894", "0.5445922", "0.5437787", "0.54369724", "0.5422958", "0.5415149", "0.5415149", "0.5399354", "0.539413", "0.53890395", "0.5382889", "0.5382856", "0.53564143", "0.535306", "0.53529805", "0.5352455", "0.5347083", "0.5333787", "0.5333257", "0.5332394", "0.5331696", "0.53306514", "0.53304696", "0.53293514", "0.5327383", "0.53269297", "0.53269297", "0.53238297", "0.53169096", "0.5314785", "0.5314103" ]
0.6307539
4
Return tuple of text in edit format and bool validity, using edit format option
def formatEditText(self, storedText): return (storedText, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def reformat(ctx):\n pass", "def task_format_check():\n return {\"actions\": [[\"black\", HERE, \"--check\"]], \"verbosity\": 1}", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def formatted(s):\n matches = re.findall(_format_re, normalize(s))\n if len(matches) == 1 and matches[0][0] != '':\n return matches[0][0]\n def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):\n if txt_none != '':\n return FORMAT_NONE, txt_none\n elif txt_sw != '':\n return FORMAT_SW, txt_sw\n elif txt_rem != '':\n return FORMAT_REM, txt_rem\n elif txt_em != '':\n return FORMAT_EM, txt_em\n elif txt_a != '':\n return FORMAT_A, txt_a\n return [to_fmt(*m) for m in matches]", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def test_format_permissions_and_docstring(self):\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\"],\n {\"some\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"permission formatted string\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\"\n ),\n )\n\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\", \"another permission\"],\n {\"some\": \"docstring\", \"another\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"- permission formatted string\\n\"\n \"- another permission\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\\n\"\n \"- **another** : docstring\"\n ),\n )", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def text(value):\n return True", "def test_is_entry_formatted(self):\n\n valid_formats = test_case_data.get('valid_formats')\n for i, valid_entry in enumerate(test_case_data.get('valid_entries')):\n entry = [value.strip() for value in valid_entry.split(',')]\n format_fields = valid_formats[i].split(',')\n valid = self.parser._is_entry_formatted(entry, format_fields)\n self.assertTrue(valid, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [value.strip() for value in invalid_entry.split(',')]\n for f in valid_formats:\n format_fields = f.split(',')\n entry_dict = self.parser._is_entry_formatted(entry, format_fields)\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def _check_style(file_path, clang_format_bin):\n with open(file_path, 'r') as f:\n is_valid_header = f.read().startswith(CppFormatter.standard_header)\n\n cmd = [\n clang_format_bin,\n \"-style=file\",\n \"-output-replacements-xml\",\n file_path,\n ]\n result = subprocess.check_output(cmd).decode(\"utf-8\")\n if \"<replacement \" in result:\n is_valid_style = False\n else:\n is_valid_style = True\n return (is_valid_style, is_valid_header)", "def info(level):\n if level == 'basic':\n string = _(\"Basic markup\")\n text = _(\"Only basic text tags are available in this input field.\")\n elif level == 'rich':\n string = _(\"Rich markup\")\n text = _(\"Rich and basic text tags are available in this input field.\") \n elif level == 'full':\n string = _(\"Full markup\")\n text = _(\"Every tags are available in this input field.\") \n elif level == 'none':\n string = _(\"No markup\")\n text = _(\"No tags are available in this input field.\") \n\n if level != 'none':\n text = text + \" \" + _(\"Check the markup reminder in related documentation for a description of these tags.\")\n\n return '<span class=\"help\" title=' + quoteattr(text) \\\n + '><img src=\"' + settings.STATIC_MEDIA_URL \\\n + 'images/savane/common/misc.default/edit.png' \\\n + ' border=\"0\" class=\"icon\" alt=\"\" />' \\\n + string + '</span>'", "def fancyString(inVal, correctOutput, funcOutput):\r\n checkCorrect = \"Correct = \" + u'\\u2713'*(funcOutput == correctOutput) + 'X'*(funcOutput != correctOutput)\r\n # Check mark code from site below:\r\n # https://stackoverflow.com/questions/16676101/print-the-approval-sign-check-mark-u2713-in-python\r\n return \"Input(s) = {:<15} Output = {:<25} Your Output = {:<35} \".format(str(inVal), str(correctOutput), str(funcOutput)) + checkCorrect", "def human_readable(self):\n if self.no_flags_set():\n return \"no flags set\"\n else:\n flag_desc = []\n for name in (\"bookmarked\", \"for_validation\", \"candidate\", \"final causative\"):\n if getattr(self, \"flag_%s\" % name.replace(\" \", \"_\")):\n flag_desc.append(name)\n for name in (\"visual\", \"validation\", \"molecular\", \"phenotype_match\", \"summary\"):\n field = getattr(self, \"flag_%s\" % name)\n if field and field != \"empty\":\n flag_desc.append(\"%s rating is %s\" % (name.split(\"_\")[0], field))\n return \", \".join(flag_desc)", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))", "def parse_edits(line):\n \n # Get edits inside brackets allowing parentheses\n edit_strings = re.findall(r'\\((.*?)\\)[,\\]]', line)\n \n edits = []\n for edit_string in edit_strings:\n # Splitting by comma is not enough. Some of the edits actually fix spacing\n # when commas are used, so we may can't use edit_string.split(', ')\n # For some reason, the unicode gold strings are enclosed in a list\n m = re.match(r'^(\\d+), (\\d+), (.*), \\[?(.*)\\]?$', edit_string)\n edit_items = [m.group(i) for i in range(1, 5)]\n \n # No way to handle this in regex\n if edit_items[3].endswith(']'):\n edit_items[3] = edit_items[3][:-1]\n \n # Cast the indices\n edit_items[0] = int(edit_items[0])\n edit_items[1] = int(edit_items[1])\n \n # Convert unicode-string-inside-string into actual unicode string\n edit_items[2] = codecs.decode(edit_items[2][2:-1], 'unicode_escape') or 'NIL'\n edit_items[3] = codecs.decode(edit_items[3][2:-1], 'unicode_escape') or 'NIL'\n \n edits.append(edit_items)\n \n return edits", "def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text", "def show_line_edit_dialog(text):\n dialog = QDialog()\n interface = lineEditEntryGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True, str(interface.lineEdit.text())\n else:\n return False, \"\"", "def as_tuple(self, selected: bool) -> tuple:\n if self.check_ans:\n current = self.num.ljust(2) + self.answer\n else:\n current = self.num.ljust(2)+self.current.rjust(1)\n return ((\n \"fg:\"+self.style_fg + \" bg:\" # apply foreground color\n + (self.style_bg_selected if selected else self.style_bg),\n current\n if self.answer != \".\" else \"▐\" + \"█\" + \"▌\"), ('bg:#fefefe fg:#000', \"│\"))", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def hints(s):\n if s == 'hello':\n # string, color, bold\n return (' World', 35, False)\n return None", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_data_from_nonformat_text():\n pass", "def FormatYesNo(value):\n if value:\n return u'Yes'\n else:\n return u'No'", "def validateWorkFormat(format):\n\n if not(format):\n return \"You must select a work format.\"", "def rich(text):\n return full(text, False)", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def change_prompt_format(self, arg, **_):\n if not arg:\n message = 'Missing required argument, format.'\n return [(None, None, None, message)]\n\n self.prompt_format = self.get_prompt(arg)\n return [(None, None, None, \"Changed prompt format to %s\" % arg)]", "def test_command_edit_info_boolean_flags():\n def f(inputfile):\n with tempfile.NamedTemporaryFile() as tmp:\n shutil.copy(inputfile, tmp.name)\n\n for flag in (\"write_protected\", \"synchronized\", \"cleaned\"):\n for true_value, false_value in ((\"1\", \"0\"),\n (\"yes\", \"no\"),\n (\"YES\", \"No\"),\n (\"true\", \"false\"),\n (\"tRuE\", \"FaLsE\")):\n wozardry.parse_args([\"edit\", \"-i\", \"%s:%s\" % (flag, true_value), tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[flag] == True\n wozardry.parse_args([\"edit\", \"-i\", \"%s:%s\" % (flag, false_value), tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[flag] == False\n f(kValid1)\n f(kValid2)", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def get_help_text(self):\n requirements = \"Your password must contain at least: {min_length} \" \\\n \"character(s), {min_uppercase} uppercase letter(s), \" \\\n \"{min_lowercase} lowercase letter(s) and \" \\\n \"{min_digits} digit(s). \".format(\n min_length=self.min_length,\n min_uppercase=self.min_uppercase,\n min_lowercase=self.min_lowercase,\n min_digits=self.min_digits)\n return requirements", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def help_for(self, flag: str) -> Tuple[str, str]:\n # Obtain arg obj\n if flag not in self.flags:\n err = \"{!r} is not a valid flag for this context! Valid flags are: {!r}\" # noqa\n raise ValueError(err.format(flag, self.flags.keys()))\n arg = self.flags[flag]\n # Determine expected value type, if any\n value = {str: \"STRING\", int: \"INT\"}.get(arg.kind)\n # Format & go\n full_names = []\n for name in self.names_for(flag):\n if value:\n # Short flags are -f VAL, long are --foo=VAL\n # When optional, also, -f [VAL] and --foo[=VAL]\n if len(name.strip(\"-\")) == 1:\n value_ = (\"[{}]\".format(value)) if arg.optional else value\n valuestr = \" {}\".format(value_)\n else:\n valuestr = \"={}\".format(value)\n if arg.optional:\n valuestr = \"[{}]\".format(valuestr)\n else:\n # no value => boolean\n # check for inverse\n if name in self.inverse_flags.values():\n name = \"--[no-]{}\".format(name[2:])\n\n valuestr = \"\"\n # Tack together\n full_names.append(name + valuestr)\n namestr = \", \".join(sorted(full_names, key=len))\n helpstr = arg.help or \"\"\n return namestr, helpstr", "def __check_format(node, lint_ctx, profile: str, allow_ext=False):\n if \"format_source\" in node.attrib and (\"ext\" in node.attrib or \"format\" in node.attrib):\n lint_ctx.warn(\n f\"Tool {node.tag} output '{node.attrib.get('name', 'with missing name')}' should use either format_source or format/ext\",\n node=node,\n )\n if \"format_source\" in node.attrib:\n return True\n if node.find(\".//action[@type='format']\") is not None:\n return True\n # if allowed (e.g. for discover_datasets), ext takes precedence over format\n fmt = None\n if allow_ext:\n fmt = node.attrib.get(\"ext\")\n if fmt is None:\n fmt = node.attrib.get(\"format\")\n if fmt == \"input\":\n message = f\"Using format='input' on {node.tag} is deprecated. Use the format_source attribute.\"\n if Version(str(profile)) <= Version(\"16.01\"):\n lint_ctx.warn(message, node=node)\n else:\n lint_ctx.error(message, node=node)\n\n return fmt is not None", "def get_opt_formatted(self, command):\n if \"formatted\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"formatted\"]\n else:\n return CommandDict.DEFAULT_OPT_FORMATTED", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def verify_diagnostics_and_usage_text():\r\n msg, status = \"\", True\r\n try:\r\n sleep(10)\r\n if g.platform == 'android':\r\n sleep(3)\r\n 'verify end user license agreement label'\r\n flag1,msg = element_textvalidation('Diagnostics_usage_lbl','Diagnostics and Usage')\r\n sleep(4) \r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_Diagnostcs_nd_usage_txt)\r\n 'verify end user license agreement label'\r\n flag2,msg = element_textvalidation('Diagnostics_usage_txt',text_to_verify)\r\n \r\n flag = False if not (flag1 and flag2) else True\r\n else:\r\n\r\n \r\n 'Verify diagnostics usage text'\r\n flag1 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_lbl'))\r\n 'Verify diagnostics usage text'\r\n flag2 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_txt'))\r\n \r\n sleep(4) \r\n \r\n flag = False if not (flag1 and flag2) else True\r\n if not flag:\r\n return False, msg\r\n else:\r\n print \"License agreement screen name is displayed properly\"\r\n \r\n \r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return False, msg\r\n return True, msg", "def _format_action(self, action):\n parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)\n if action.nargs == argparse.PARSER:\n parts = \"\\n\".join(parts.split(\"\\n\")[1:])\n return parts", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def _engine_option_string_and_comment(option: engine.Option, value: engine.ConfigValue) -> Tuple[str, str]:\n if value is None:\n value = ''\n name_equals_val = f'{option.name}={value}'\n if option.type == 'check' or option.type == 'string' or option.type == 'button':\n return (name_equals_val, f'type={option.type}')\n if option.type == 'spin':\n return (name_equals_val, f'type=spin, min={option.min}, max={option.max}')\n if option.type == 'combo':\n return (name_equals_val, f'type=combo, var={option.var}')\n return (name_equals_val, 'type=unknown')", "def TEXT(number, format_type):\n raise NotImplementedError()", "def test_form_help_text_is_correct(self):\n # https://stackoverflow.com/questions/24344981/how-to-change-help-\n # text-of-a-django-form-field\n\n # Above link helped figure out how to access help_text.\n self.assertEqual(\n self.form.fields[\"texture\"].help_text,\n \"One word descriptions seperated by commas.\",\n )", "def reformat():\n toolkit.reformat()", "def __verify_plot_options(self, options_str):\n default_line = '-'\n default_marker = ''\n default_colour = 'k'\n\n # Split str into chars list\n options_split = list(options_str)\n\n # If 0, set defaults and return early\n if len(options_split) == 0:\n return [default_line, default_marker, default_colour]\n\n # If line_style given, join the first two options if applicable\n # (some types have 2 characters)\n for char in range(0, len(options_split) - 1):\n # If char is '-' (only leading character in double length option)\n if options_split[char] == '-' and len(options_split) > 1:\n # If one of the leading characters is valid\n if options_split[char + 1] == '-' or \\\n options_split[char + 1] == '.':\n # Join the two into the first\n options_split[char] = options_split[char] \\\n + options_split[char + 1]\n # Shuffle down the rest\n for idx in range(char + 2, len(options_split)):\n options_split[idx - 1] = options_split[idx]\n # Remove duplicate extra\n options_split.pop()\n\n # If any unknown, throw error\n for option in options_split:\n if option not in self.__line_styles and \\\n option not in self.__marker_styles and \\\n option not in self.__colour_styles:\n error_string = \"Unknown character entered: '{0}'\"\n raise ValueError(error_string.format(option))\n\n ##############################\n # Verify Line Style\n ##############################\n line_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n line_style_index = 0\n for option in options_split:\n if option in self.__line_styles:\n line_style_count = line_style_count + 1\n line_style_index = self.__line_styles.index(option)\n\n # If more than one, throw error\n if line_style_count > 1:\n raise ValueError(\n \"Too many line style arguments given. Only one allowed\")\n # If none, set as solid\n elif line_style_count == 0 or not any(\n item in options_split for item in self.__line_styles):\n output_line = default_line\n # If one, set as given\n else:\n output_line = self.__line_styles[line_style_index]\n ##############################\n\n ##############################\n # Verify Marker Style\n ##############################\n marker_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n marker_style_index = 0\n for option in options_split:\n if option in self.__marker_styles:\n marker_style_count = marker_style_count + 1\n marker_style_index = self.__marker_styles.index(option)\n\n # If more than one, throw error\n if marker_style_count > 1:\n raise ValueError(\n \"Too many marker style arguments given. Only one allowed\")\n # If none, set as no-marker\n elif marker_style_count == 0 or not any(\n item in options_split for item in self.__marker_styles):\n output_marker = default_marker\n # If one, set as given\n else:\n output_marker = self.__marker_styles[marker_style_index]\n # If marker set and no line given, turn line to no-line\n if line_style_count == 0 or not any(\n item in options_split for item in self.__line_styles):\n output_line = ''\n ##############################\n\n ##############################\n # Verify Colour Style\n ##############################\n colour_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n colour_style_index = 0\n for option in options_split:\n if option in self.__colour_styles:\n colour_style_count = colour_style_count + 1\n colour_style_index = self.__colour_styles.index(option)\n\n # If more than one, throw error\n if colour_style_count > 1:\n raise ValueError(\n \"Too many colour style arguments given. Only one allowed\")\n # If none, set as black\n elif colour_style_count == 0 or not any(\n item in options_split for item in self.__colour_styles):\n output_colour = default_colour\n # If one, set as given\n else:\n output_colour = self.__colour_styles[colour_style_index]\n ##############################\n\n return [output_line, output_marker, output_colour]", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def validate_format(self):\n raise NotImplementedError()", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def _validate_performatives(performative: str) -> Tuple[bool, str]:\n # check performative is not a reserved name\n if _is_reserved_name(performative):\n return (\n False,\n \"Invalid name for performative '{}'. This name is reserved.\".format(\n performative,\n ),\n )\n\n # check performative's format\n if not _is_valid_regex(PERFORMATIVE_REGEX_PATTERN, performative):\n return (\n False,\n \"Invalid name for performative '{}'. Performative names must match the following regular expression: {} \".format(\n performative, PERFORMATIVE_REGEX_PATTERN\n ),\n )\n\n return True, \"Performative '{}' is valid.\".format(performative)", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def _format (color, style=''):\n _format = QtGui.QTextCharFormat()\n if color != '':\n _format.setForeground(getattr(QtCore.Qt, color))\n if 'bold' in style:\n _format.setFontWeight(QtGui.QFont.Bold)\n if 'italic' in style:\n _format.setFontItalic(True)\n return _format", "def get_replacement():\n run_linter_throw(\"path/to/file\",\n \"{s}\\n{m} Text{e}\",\n style,\n whitelist=[\"headerblock/desc_space\"])", "def edit_form_entry_help_text_extra(cls):\n return \"\"\"\n <ul class=\"{container_class}\">\n {edit_option_html}\n <li><a href=\"{delete_url}\">\n <span class=\"{delete_option_class}\"></span> {delete_text}</a>\n </li>\n </ul>\n <input type=\"hidden\" value=\"{form_element_position}\"\n name=\"form-{counter}-position\"\n id=\"id_form-{counter}-position\"\n class=\"form-element-position\">\n <input type=\"hidden\" value=\"{form_element_pk}\"\n name=\"form-{counter}-id\" id=\"id_form-{counter}-id\">\n \"\"\".format(\n container_class=cls.form_list_container_class,\n edit_option_html=\"{edit_option_html}\",\n delete_url=\"{delete_url}\",\n delete_option_class=cls.form_delete_form_entry_option_class,\n delete_text=\"{delete_text}\",\n form_element_position=\"{form_element_position}\",\n counter=\"{counter}\",\n form_element_pk=\"{form_element_pk}\",\n )", "def extension (formatStr):\n assert False, \"TODO:\"", "def _generateReadOnly(self, obj, **args):\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'readonly'\n if self._script.utilities.isReadOnlyTextArea(obj):\n result.append(self._script.formatting.getString(**args))\n return result", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def is_valid(self):\r\n for lineedit in self.lineedits:\r\n if lineedit in self.validate_data and lineedit.isEnabled():\r\n validator, invalid_msg = self.validate_data[lineedit]\r\n text = to_text_string(lineedit.text())\r\n if not validator(text):\r\n QMessageBox.critical(self, self.get_name(),\r\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\r\n QMessageBox.Ok)\r\n return False\r\n return True", "def test_incorrect_format_1(self):\n changelog = changelog_temp.format(\n before_changelog=\"## [Not yet released]\\n\\n### Added\\n\\n- Added a new feature\\n\"\n )\n with pytest.raises(ValueError):\n parse_changelog(changelog)", "def test_match_entry_to_format(self):\n\n # matches valid entries with valid formats\n for valid_entry in test_case_data.get('valid_entries'):\n entry = [e.strip() for e in valid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertTrue(entry_dict, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [e.strip() for e in invalid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def is_text_editable(path):\n return False", "def _format_answer(self, text):\n text = str(text).replace('\\n', ' ')\n answer_width = 70\n pretty_text = '\\n\\t'.join(textwrap.wrap(text, answer_width))\n\n return pretty_text", "def flags(self, f):\n if f.is_inlined:\n return \" (inlined)\"\n return \"\"", "def text_to_display(level):\n if level == \"html\":\n return html_answers, html_text\n elif level == \"css\":\n return css_answers, css_text\n elif level == \"python\":\n return python_answers, python_text", "def editorForTyp(typ):\n\n if typ == \"quint32\":\n return (\"QSpinBox\", \"setValue\", \"value\")\n elif typ == \"QString\":\n return (\"QLineEdit\", \"setText\", \"text\")\n elif typ == \"bool\":\n return (\"QCheckBox\", \"setChecked\", \"isChecked\")\n return (None, None, None)", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def show_fields(*fields):\n\n fields = filter( lambda x: x, fields )\n target_len = max( len(name) for name, value in fields ) + 2\n for name, value in fields:\n line = name + ':' + \" \" * (target_len - len(name))\n if type(value) == bool:\n line += color_text(\"Yes\", 'green') if value else color_text(\"No\", 'red')\n else:\n line += str(value)\n print line", "def testCheckRequiredFormat(self):\n plugin = gdrive_synclog.GoogleDriveSyncLogTextPlugin()\n\n file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()\n file_system_builder.AddFile('/file.txt', (\n b'2018-01-24 18:25:08,454 -0800 INFO pid=2376 7780:MainThread '\n b'logging_config.py:295 OS: Windows/6.1-SP1\\n'))\n\n file_entry = file_system_builder.file_system.GetFileEntryByPath('/file.txt')\n\n parser_mediator = self._CreateParserMediator(None, file_entry=file_entry)\n\n file_object = file_entry.GetFileObject()\n text_reader = text_parser.EncodedTextReader(file_object)\n text_reader.ReadLines()\n\n result = plugin.CheckRequiredFormat(parser_mediator, text_reader)\n self.assertTrue(result)", "def formatted(self) -> str:\r\n ...", "def ISTEXT(value):\n return isinstance(value, (basestring, AltText))", "def text_editor():\n return True", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text", "def validate(self, txt, pos):\n state, rpos = qt.QDoubleValidator.validate(self, txt, pos)\n if txt.length() == 0:\n state = qt.QValidator.Acceptable\n return state, rpos" ]
[ "0.74416876", "0.7370665", "0.71592844", "0.70363986", "0.7028924", "0.68846506", "0.6550997", "0.64535177", "0.6392951", "0.63109875", "0.6300976", "0.6188802", "0.6026202", "0.5835225", "0.5771935", "0.5735613", "0.5613498", "0.5609731", "0.56050605", "0.5547816", "0.5309345", "0.5299432", "0.5209862", "0.5204991", "0.5204991", "0.5204991", "0.5204991", "0.5204991", "0.52003527", "0.51723677", "0.51647925", "0.5160046", "0.5119939", "0.51153874", "0.5094579", "0.5093168", "0.50772464", "0.5077213", "0.5065839", "0.5057548", "0.50492096", "0.501106", "0.50011784", "0.49889597", "0.49738917", "0.49718243", "0.49464372", "0.49350786", "0.4930196", "0.49300286", "0.492046", "0.4900359", "0.4896047", "0.4889429", "0.48845533", "0.4872616", "0.48598924", "0.48541707", "0.4853287", "0.48496547", "0.48475158", "0.48429024", "0.48423597", "0.4839126", "0.48285526", "0.48253557", "0.48237336", "0.48226026", "0.48198703", "0.48191673", "0.48190477", "0.4813445", "0.48076433", "0.4807577", "0.48015848", "0.48007897", "0.47960508", "0.47933918", "0.47912502", "0.47794497", "0.47762585", "0.47564918", "0.47562456", "0.4751227", "0.47500658", "0.47482246", "0.47376072", "0.47373587", "0.47357956", "0.47351882", "0.4733689", "0.4732709", "0.47288054", "0.47235525", "0.47193804", "0.4715197", "0.4714657", "0.4711986", "0.47118586" ]
0.71025413
4
Return tuple of stored text from edited text and bool validity, using edit format option
def storedText(self, editText): if editText: return (editText, True) return (editText, not self.isRequired)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def get_data_from_nonformat_text():\n pass", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def on_edit(self, event, text):\n return None", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text", "def reformat(ctx):\n pass", "def main(text,result=\"latex\",check_text=True,index_project='',file_name=''): # TODO detect and make a warning for genuine latex marks\n if isinstance(text,str):\n text = text.split('\\n')\n \n if check_text:\n check(text)\n \n print(text)\n \n ### managing placeholders\n text = parsers['v'].main(text)\n \n ### saving names\n if index_project:\n indexer.parse(text,index_project,file_name)\n \n \n for i in range(len(text)):\n line = text[i]\n ### managing end of line\n line = line.replace(\" ,,\",\"\\\\\\\\\")\n \n while line.count(opening_mark):\n first_part, mark, late_part = line.partition(',;')\n if not late_part:\n break\n late_part, text = parsers[late_part[0]].main(late_part = late_part,\n text=text,\n result=result,\n line_nb = i)\n line = first_part + late_part\n text[i] = line\n \n return '\\n'.join(text)", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def get_edit_text(self):\n # grab edit page\n response = self._get_page('edit.php')\n html = response.text\n # parse out existing plan\n soup = bs4.BeautifulSoup(html, 'html5lib')\n plan = soup.find('textarea')\n if plan is None:\n raise PlansError(\"Couldn't get edit text, are we logged in?\")\n else:\n plan = u'' + plan.contents[0]\n # prepending the empty string somehow prevents BS from\n # escaping all the HTML characters (weird)\n assert type(plan) == str\n # convert to CRLF line endings\n plan = convert_endings(plan, 'CRLF')\n # parse out plan md5\n md5sum = soup.find('input',\n attrs={'name': 'edit_text_md5'}).attrs['value']\n # also, explicitly compute the hash, for kicks\n assert md5sum == plans_md5(plan)\n # verify that username has not changed\n assert self.username == self.parser.username\n return plan, md5sum", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def parse_edits(line):\n \n # Get edits inside brackets allowing parentheses\n edit_strings = re.findall(r'\\((.*?)\\)[,\\]]', line)\n \n edits = []\n for edit_string in edit_strings:\n # Splitting by comma is not enough. Some of the edits actually fix spacing\n # when commas are used, so we may can't use edit_string.split(', ')\n # For some reason, the unicode gold strings are enclosed in a list\n m = re.match(r'^(\\d+), (\\d+), (.*), \\[?(.*)\\]?$', edit_string)\n edit_items = [m.group(i) for i in range(1, 5)]\n \n # No way to handle this in regex\n if edit_items[3].endswith(']'):\n edit_items[3] = edit_items[3][:-1]\n \n # Cast the indices\n edit_items[0] = int(edit_items[0])\n edit_items[1] = int(edit_items[1])\n \n # Convert unicode-string-inside-string into actual unicode string\n edit_items[2] = codecs.decode(edit_items[2][2:-1], 'unicode_escape') or 'NIL'\n edit_items[3] = codecs.decode(edit_items[3][2:-1], 'unicode_escape') or 'NIL'\n \n edits.append(edit_items)\n \n return edits", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_edits(text):\n edit_p = re.compile(\"(?P<open><edit.*?>)(?P<inner>.*?)(?P<close></edit>)\")\n corr_p = re.compile(\"<corrections>.*?</corrections>\")\n edits = []\n\n offset = 0\n\n for m in re.finditer(edit_p, text):\n # Make an edit object\n edit_text = \"\".join(m.groups())\n edit = ET.XML(m.group(0))\n\n # Set the bounds of the original text and adjust offset\n inner_string = m.group('inner') \n start = m.start() - offset\n corr_m = re.search(corr_p, inner_string)\n \n if corr_m: # Replacement/insertion have a correction\n offset += len(corr_m.group(0)) \n \n if not inner_string.startswith(\"<empty/>\"):\n end = start + corr_m.start()\n else:\n offset += len(\"<empty/>\") # It is \"\" in plain text\n end = start\n else:\n # Deletions may not have a correction\n if not inner_string.startswith(\"<empty/>\"):\n end = start + len(inner_string)\n else: # Unspecified error <empty/> is \"\" in plain text\n end = start\n offset += len(inner_string)\n\n\n edit.set(\"start\", \"%d\" % start) \n edit.set(\"end\", \"%d\" % end)\n\n offset += len(m.group('open')) + len(m.group('close'))\n \n\n # Make the original text a subelement of <edit>\n # Original text may be a string or <empty/> element.\n original = ET.SubElement(edit, \"original\")\n \n if edit.text:\n original.text = edit.text\n edit.text = \"\"\n else:\n empty = edit.find('empty')\n \n try:\n edit.remove(empty)\n original.append(empty)\n except Exception as e:\n pass\n \n edits.append(edit)\n\n return edits", "def refang(self, text: str):", "def edit_text(self, _, val):\n t_edit = text_editor.TextEditor(val, \"value\")\n t_edit.execute = self.execute", "def handle_data(self, text):\n if self.bankacctfrom:\n if self.bankid:\n self.compte['banque'] = text.strip()\n self.bankid = False\n if self.branchid:\n self.compte['guichet'] = text.strip()\n self.branchid = False\n if self.acctid:\n self.compte['compte'] = text.strip()\n self.acctid = False\n if self.banktranlist:\n if self.stmttrn:\n if self.dtposted:\n self.ecriture_tmp['date'] = datetime.strptime(text.strip(), \"%Y%m%d\")\n self.dtposted = False\n if self.trnamt:\n self.ecriture_tmp['montant'] = locale.atof(text.strip())\n self.trnamt = False\n if self.trntype:\n self.ecriture_tmp['type'] = text.strip()\n self.trntype = False\n if self.name:\n self.ecriture_tmp['name'] = text.strip()\n self.name = False\n if self.memo:\n self.ecriture_tmp['memo'] = text.strip()\n self.memo = False", "def get_mark(text, short):\n\n line = text.readline()\n\n # check that the line begins with a valid entry type\n if not short and not re.match(r'^\\s*(text|mark) = \"', line):\n raise ValueError('Bad entry: ' + line)\n\n # read until the number of double-quotes is even\n while line.count('\"') % 2:\n next_line = text.readline()\n\n if not next_line:\n raise EOFError('Bad entry: ' + line[:20] + '...')\n\n line += next_line\n if short:\n pattern = r'^\"(.*?)\"\\s*$'\n else:\n pattern = r'^\\s*(text|mark) = \"(.*?)\"\\s*$'\n entry = re.match(pattern, line, re.DOTALL)\n\n return entry.groups()[-1].replace('\"\"', '\"')", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def getText(self):", "def get_text(text_input):\r\n return text_input", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def rich(text):\n return full(text, False)", "def text(value):\n return True", "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def edit():", "def get_text_editor_input(initial_msg):\n EDITOR = os.environ.get('EDITOR', 'vi')\n CROP_MARK = ('\\n\\nAnything above this line will be ignored:\\n' +\n ('-' * 34) + '>8' + ('-' * 34) + '\\n')\n\n wrapper = TextWrapper(replace_whitespace=False, drop_whitespace=False)\n initial_msg = '\\n'.join(wrapper.wrap(initial_msg))\n initial_msg += CROP_MARK\n\n with tempfile.NamedTemporaryFile(suffix='.md') as temp:\n temp.write(initial_msg.encode('utf-8'))\n temp.flush() # Write buffer to the file\n subprocess.call([EDITOR, temp.name])\n\n # The pointer was already after the initial message, but we return to\n # the beginning just in case the user added content before the mark\n temp.seek(0)\n return temp.read().decode('utf-8').split(CROP_MARK, 1)[1].strip()", "def text_example():\n \n text_store = \"01000001011000010010000001000010011000100000110100001010001100010011001000110011\"\n text.delete('1.0', tk.END) \n text.insert(tk.END, text_store) \n box=tk.Tk()\n m = tk.Message(box, text=\"You should be able to save this file and open it in a text editor like Notepad or Nano to read it. If you edit the values you may find it does not display properly as text. Unchanged, it should be interpreted by a text editor as:\\n\\nAa Bb\\n123\\n\\nAs the file was made on a Windows machines you may find other systems display the line breaks differently.\")\n m.config(padx=50, pady=50, width=350)\n m.pack()", "def is_text_editable(path):\n return False", "def show_line_edit_dialog(text):\n dialog = QDialog()\n interface = lineEditEntryGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True, str(interface.lineEdit.text())\n else:\n return False, \"\"", "def on_text(self, instance, value):\n if not EVENTS['IS_OBJ']:\n EVENTS['EDITOR_SAVED'] = False\n\n if value:\n self.valid_text = True\n EVENTS['IS_RAM_EMPTY'] = False\n else:\n self.valid_text = False", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def edit_once(self, text):\n return self._edit_engine(text, break_on_success=True)", "def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text", "def edit_google_vision_text(self,text):\n s1=text\n try:\n log_info(\"Correcting google vision text to remove extra spacing\",MODULE_CONTEXT)\n i=0\n while(i<len(text)):\n s1=text\n if text[i] in [\"/\",\"।\",'।' ,':','|',\",\" ,'०',\"]\",\"-\",\")\",\"}\"] and text[i-1]==\" \": \n text=text[:i-1]+text[i:]\n if i > 0 :\n if text[i-1] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i]==\" \":\n text=text[:i]+text[i+1:]\n elif text[i] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i+1]==\" \":\n text=text[:i+1]+text[i+2:]\n i=i+1\n except Exception as e:\n log_exception(\"Exception while correcting google vision text\", MODULE_CONTEXT, e)\n return s1\n return text", "def process_text(self, text, language):", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def get_row_input_text(self, row_idx):\n return self.row_items[row_idx][1].get()", "def updateText(widget,text,format=''):\n # autorecognition\n if format not in ['plain','html','rest']:\n if type(text) is str and text.startswith('..'):\n format = 'rest'\n\n # conversion\n if format == 'rest' and pf.options.rst2html:\n html = utils.rst2html(text)\n if html[:10] == text[:10]:\n #print \"CONVERSION TO HTML FAILED\"\n text += \"\\n\\nNote: This reStructuredText is displayed as plain text because it could not be converted to html. If you install python-docutils, you will see this text (and other pyFormex messages) in a much nicer layout!\\n\"\n else:\n text = html\n\n # We leave the format undefined, because we are not sure\n # that the conversion function (docutils) is available\n # and always produces good results\n format = ''\n\n if format == 'plain':\n widget.setPlainText(text)\n elif format == 'html':\n widget.setHtml(text)\n else:\n # As a last rescue, try QT4's autorecognition\n widget.setText(text)", "def modified_flag(self, event):\n text = self.get_current()\n text.modified = 1", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def get_text_from_editor():\n with tempfile.NamedTemporaryFile(suffix='.tmp', mode='w+t') as f:\n # Create a temporary file with instructions on describing bug\n f.write(message + '\\n\\n')\n f.flush()\n # Open the editor and allow the user to type\n editor = os.environ.get('EDITOR', 'vim')\n subprocess.call([editor, f.name])\n # Read and clean the file\n f.seek(0)\n text = ''.join([line.lstrip() for line in f.readlines()\n if line and not line.lstrip().startswith('#')])\n return '\\n'.join(textwrap.wrap(text, width=100))", "def storeTextEditValue(self):\n\n\t\tcategory, attr = self.getWidgetMeta(self.sender())\n\t\tvalue = self.sender().toPlainText()\n\t\tself.storeValue(category, attr, value)", "def _editorText(self):\n if self.__lineEditKind:\n return self._editor.text()\n else:\n return self._editor.currentText()", "def _hidden_in_unicode(self, txt):", "def fix_document(key, value, _format, _meta):\n if key == \"Link\":\n url = value[2][0]\n if url.startswith(\"user-manual\") or url.startswith(\"developers-guide\"):\n # Return the link text\n return value[1]\n # Reformat the text inside block quotes\n elif key == \"BlockQuote\":\n try:\n first_string = value[0][\"c\"][0][\"c\"]\n if first_string == \"[!NOTE]\":\n value[0][\"c\"][0] = Strong([Str(\"Note:\")])\n return BlockQuote(value)\n elif first_string == \"[!INFO]\":\n value[0][\"c\"][0] = Strong([Str(\"Info:\")])\n return BlockQuote(value)\n elif first_string == \"[!TIP]\":\n value[0][\"c\"][0] = Strong([Str(\"Tip:\")])\n return BlockQuote(value)\n elif first_string == \"[!WARNING]\":\n value[0][\"c\"][0] = Strong([Str(\"Warning:\")])\n return BlockQuote(value)\n elif first_string == \"[!ATTENTION]\":\n value[0][\"c\"][0] = Strong([Str(\"Attention:\")])\n return BlockQuote(value)\n except Exception:\n return\n return", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def edit_type(self, candidate, word):\n edit = [False] * 4\n correct = \"\"\n error = \"\"\n replaced = ''\n replacer = ''\n for i in range(min([len(word), len(candidate)]) - 1):\n if candidate[0:i + 1] != word[0:i + 1]: # inconsistency in the first (i + 1) characters of the two strings\n if candidate[i:] == word[i - 1:]:\n edit[1] = True # deletion\n correct = candidate[i - 1] # candidate[i - 1] is deleted and we get word\n error = ''\n replaced = candidate[i - 2]\n replacer = candidate[i - 2] + candidate[i - 1]\n break\n elif candidate[i:] == word[i + 1:]:\n edit[0] = True # insertion\n correct = ''\n error = word[i] # word[i] is redundant\n if i == 0:\n replacer = '@'\n replaced = '@' + error\n else:\n replacer = word[i - 1]\n replaced = word[i - 1] + error\n break\n if candidate[i + 1:] == word[i + 1:]:\n edit[2] = True # substitution\n correct = candidate[i]\n error = word[i]\n replaced = error\n replacer = correct\n break\n if candidate[i] == word[i + 1] and candidate[i + 2:] == word[i + 2:]:\n edit[3] = True # transposition\n correct = candidate[i] + candidate[i + 1]\n error = word[i] + word[i + 1]\n replaced = error\n replacer = correct\n break\n\n # string inversion\n candidate = candidate[::-1]\n word = word[::-1]\n\n for i in range(min([len(word), len(candidate)]) - 1):\n if candidate[0:i + 1] != word[0:i + 1]:\n if candidate[i:] == word[i - 1:]:\n edit[1] = True\n correct = candidate[i - 1]\n error = ''\n replaced = candidate[i - 2]\n replacer = candidate[i - 2] + candidate[i - 1]\n break\n elif candidate[i:] == word[i + 1:]:\n correct = ''\n error = word[i]\n if i == 0:\n replacer = '@'\n replaced = '@' + error\n else:\n replacer = word[i - 1]\n replaced = word[i - 1] + error\n edit[0] = True\n break\n if candidate[i + 1:] == word[i + 1:]:\n edit[2] = True\n correct = candidate[i]\n error = word[i]\n replaced = error\n replacer = correct\n break\n if candidate[i] == word[i + 1] and candidate[i + 2:] == word[i + 2:]:\n edit[3] = True\n correct = candidate[i] + candidate[i + 1]\n error = word[i] + word[i + 1]\n replaced = error\n replacer = correct\n break\n\n if word == candidate:\n return \"None\", '', '', '', ''\n if edit[0]:\n return EDIT_TYPE_INSERTION, correct, error, replaced, replacer\n elif edit[1]:\n return EDIT_TYPE_DELETION, correct, error, replaced, replacer\n elif edit[2]:\n return EDIT_TYPE_SUBSTITUTION, correct, error, replaced, replacer\n elif edit[3]:\n return EDIT_TYPE_TRANSPOSITION, correct, error, replaced, replacer", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def text_editor():\n return True", "def task_format_check():\n return {\"actions\": [[\"black\", HERE, \"--check\"]], \"verbosity\": 1}", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def stepText2Changed(build, step, text2):", "def retrieve_input():\r\n inputValue = simpleText.get(\"1.0\",\"end-1c\") #Our Variable\r\n #\"1.0\" = start from first character in the text widget\r\n #\"end-1c = delete the last character that Text creates every time\"\r\n return inputValue", "def on_idEdit_textChanged(self, txt):\n self.__generateDefaultCommitMessage()\n self.__updateOK()", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def element_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier))\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def reformat():\n toolkit.reformat()", "def process_raw_text(self, file_name, column_side):\n self.mvc_check()\n\n model_txt = None\n if column_side == LEFT_TEXT:\n model_txt = self.model.txt1\n elif column_side == RIGHT_TEXT:\n model_txt = self.model.txt2\n\n model_txt.open_raw(file_name)\n model_txt.process_raw()\n self.opened_txt[column_side] = True\n self.can_align = self.opened_txt[LEFT_TEXT] and self.opened_txt[RIGHT_TEXT]\n\n # Goldsmith\n model_txt.make_trie(column_side)\n model_txt.apply_goldsmith(1.1, 20, column_side)\n\n # Associate word for alignment if both text were opened\n if self.can_align:\n for view in self.views:\n view.end_task()\n view.change_task(\"Associating words\")\n self.model.associate_words(1.5)\n for view in self.views:\n view.end_task()\n\n # TODO : coherent saving to database using model.save_data\n\n return model_txt.str", "def handleFormatText(paragraphContent):\n # We tokenize and remove the stop word\n words = tokenizeWord(paragraphContent) \n \n stemWords = []\n # We loop on each word.\n for word in words:\n stemWord = STEMMER.stem(word)\n \n # Selection on a part of string.\n stemWord = re.sub(\"[*\\'\\.+:,\\`:/]\", '', stemWord)\n if stemWord.isdigit() or len(stemWord) < 2:\n continue\n \n stemWords.append(stemWord)\n my_r_string = stemWords.pop(0)\n for word in stemWords:\n my_r_string += \" \"+str(word)\n return my_r_string", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def get_text(self):\n\n if self.text: return self.text\n # retrieve from args and return if exists\n text = Settings.get_text() or None\n if text: \n self.text = text\n return text\n # prompt skip\n if not Settings.prompt(\"text\"): return None\n question = {\n 'type': 'input',\n 'name': 'text',\n 'message': 'Text:'\n }\n text = prompt(question)[\"text\"]\n # confirm text\n if not Settings.confirm(text): return self.get_text()\n self.text = text\n return self.text", "def read_plain_txt(input_fn: str) -> Tuple[List[str], List[str]]:\n\n with open(input_fn, 'r') as f:\n migrations = []\n queries = []\n mode = 'none'\n for line in f:\n stripped = line.strip()\n if len(stripped) == 0:\n continue\n if stripped.lower() == '== migrations':\n if mode != 'none':\n raise ValueError(f'Invalid {input_fn}: The migrations section should appear first.')\n mode = 'migrations'\n elif stripped.lower() == '== queries':\n if mode != 'migrations':\n raise ValueError(f'Invalid {input_fn}: The queries section should appear after the migrations section.')\n mode = 'queries'\n elif stripped[0] == '#':\n pass\n else:\n if mode == 'migrations':\n migrations.append(stripped)\n elif mode == 'queries':\n queries.append(stripped)\n else:\n pass\n return migrations, queries", "def on_lineEdit_textChanged(self, p0):\n # str_me = \"我爱我的祖国\"\n # self.lineEdit.setText(str_me) # 设置单行文本内容\n input_text = self.lineEdit.text()\n self.textEdit.setPlainText(input_text)\n # self.textEdit.setHtml(input_text) # 显示Html,如 <font color='red' size='20'>HELLO!</font>\n a = self.textEdit.toPlainText()\n print(a)", "def post_process_text(self, text):\n\t\treturn text", "def text(self) -> str:", "def editText(self, text, jumpIndex=None, highlight=None):\n try:\n import gui\n except ImportError, e:\n print 'Could not load GUI modules: %s' % e\n return text\n editor = gui.EditBoxWindow()\n return editor.edit(text, jumpIndex=jumpIndex, highlight=highlight)", "def alter_text_format(self):\n service = self.slides_service\n requests = [\n {\n 'updateParagraphStyle': {\n 'objectId': self.song_numbers_box_id,\n 'style': {\n 'alignment': 'CENTER'\n },\n 'fields': 'alignment'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.song_numbers_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.TITLE_FONT_SIZE, # numbers slightly larger than lyrics\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.left_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.FONT_SIZE,\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.right_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.FONT_SIZE,\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n }\n ]\n body = {\n 'requests': requests\n }\n response = service.presentations().batchUpdate(presentationId=self.presentation_id, body=body).execute()\n print(f'Updated the text style for shape with ID: {self.left_box_id}')\n return response", "def T(value):\n return (value if isinstance(value, basestring) else\n str(value) if isinstance(value, AltText) else \"\")", "def as_tuple(self, selected: bool) -> tuple:\n if self.check_ans:\n current = self.num.ljust(2) + self.answer\n else:\n current = self.num.ljust(2)+self.current.rjust(1)\n return ((\n \"fg:\"+self.style_fg + \" bg:\" # apply foreground color\n + (self.style_bg_selected if selected else self.style_bg),\n current\n if self.answer != \".\" else \"▐\" + \"█\" + \"▌\"), ('bg:#fefefe fg:#000', \"│\"))", "def edit(self,edits):\n\t\tself.alphanumeric=edits['alphanumeric'] if 'alphanumeric' in edits else None\n\t\tself.alphanumeric_color = edits['alphanumeric_color'] if 'alphanumeric_color' in edits else None\n\t\tif self.alphanumeric_color ==\"grey\":\n\t\t\tself.alphanumeric_color = \"gray\"\n\t\tself.background_color = edits['background_color'] if 'background_color' in edits else None\n\t\tif self.background_color == \"grey\":\n\t\t\tself.background_color = \"gray\";\n\t\tshapeChoices = dict((x,y) for x,y in Target.SHAPE_CHOICES)\n\t\tself.shape = str(shapeChoices[edits['shape']]) if 'shape' in edits else None\n\t\tself.orientation = edits['orientation'] if 'orientation' in edits else None\n\t\tself.ptype = edits['ptype']\n\t\tself.description = edits['description'] if 'description' in edits else None\n\t\tself.save()", "def label_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),label=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def displayText():\n global entryWidget,entryWidget1,entryWidget2,entryWidget3,entryWidget4 ,entryWidget5,entryWidget6\n global thefilename,itrial,do_stim, delaylen,ntest_arms,stop_if_error,timeout_arm_sec\n thefilename=entryWidget.get().strip()\n itrial=entryWidget1.get().strip()\n do_stim=entryWidget2.get().strip()\n delaylen=entryWidget3.get().strip()\n ntest_arms=entryWidget4.get().strip()\n stop_if_error=int(entryWidget5.get().strip())==1 # convert to logical\n print 'stop_if_error is ', stop_if_error\n\n\n timeout_arm_sec=entryWidget6.get().strip()\n root.destroy()\n return thefilename,itrial,do_stim,delaylen,ntest_arms,stop_if_error,timeout_arm_sec" ]
[ "0.78716373", "0.76830506", "0.75691116", "0.75691116", "0.7379154", "0.73117137", "0.7152062", "0.7089976", "0.6903923", "0.6863199", "0.68065554", "0.6748621", "0.6604557", "0.62711895", "0.61224514", "0.6009547", "0.5690611", "0.5690611", "0.5690611", "0.5690611", "0.5690611", "0.5534457", "0.5529326", "0.55119324", "0.54897064", "0.54593766", "0.53941077", "0.53884834", "0.53541094", "0.5348279", "0.5336523", "0.53298044", "0.53044033", "0.53017735", "0.5284678", "0.52548796", "0.5231703", "0.52075195", "0.51657903", "0.5139631", "0.51269805", "0.51183087", "0.50954133", "0.5086037", "0.50556576", "0.50475675", "0.50413114", "0.5033974", "0.50320536", "0.50238174", "0.50172436", "0.501209", "0.5011348", "0.50095177", "0.499828", "0.49958882", "0.49862808", "0.49802482", "0.49685866", "0.49656975", "0.49588487", "0.4951691", "0.49488887", "0.49448055", "0.49138415", "0.49082175", "0.48921612", "0.48836753", "0.48688877", "0.48642147", "0.48558703", "0.48427588", "0.48402458", "0.48379573", "0.48347312", "0.4829869", "0.48117617", "0.48040468", "0.48027003", "0.47989967", "0.47953638", "0.47919485", "0.47787616", "0.47736892", "0.47728088", "0.47708187", "0.4769437", "0.4768398", "0.47677627", "0.47633177", "0.47631097", "0.4755773", "0.47515184", "0.4750719", "0.47494507", "0.47457764", "0.47452554", "0.4735827", "0.47239852", "0.47187877" ]
0.7183602
6
Any format, prefix, suffix, html info in attrs dict
def __init__(self, name, attrs={}): TextFormat.__init__(self, name, attrs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def html_attrs(attrs):\n html = \"\"\n for a in attrs.items():\n if a[1]:\n html = html + \"%s=\\\"%s\\\" \"%(a)\n return html", "def attr(*attrs: ATTRIBUTE) -> str:\n return PyHTML.attr(*attrs)", "def attrs(context):\n result = \"\"\n for key, value in context.flatten().items():\n if key not in [\"True\", \"False\", \"None\", \"content\", \"element\"]:\n if \"hx_\" in key:\n key = key.replace(\"_\", \"-\")\n result += f' {key}=\"{value}\"'\n return mark_safe(result)", "def __get_attr_format (self, attrs):\r\n format = { \r\n 'editor': None,\r\n 'min': None,\r\n 'max': None,\r\n 'step': None,\r\n 'subtype': None,\r\n 'flags': None,\r\n 'enums': None\r\n }\r\n\r\n for attr in attrs: \r\n attr_type = attr[\"type\"]\r\n if \"editor\" == attr_type:\r\n format['editor'] = attr[\"value\"] \r\n if \"min\" == attr_type:\r\n format['min'] = attr[\"value\"] \r\n if \"max\" == attr_type:\r\n format['max'] = attr[\"value\"] \r\n if \"default\" == attr_type:\r\n format['default'] = attr[\"value\"] \r\n if \"step\" == attr_type:\r\n format['step'] = attr[\"value\"]\r\n if \"subtype\" == attr_type:\r\n format['subtype'] = attr[\"value\"]\r\n if \"flags\" == attr_type:\r\n format['flags'] = attr['value']\r\n if \"enums\" == attr_type:\r\n format['enums'] = attr['value']\r\n\r\n return format", "def _formatAttributes(self, attr=None, allowed_attrs=None, **kw):\n\n # Merge the attr dict and kw dict into a single attributes\n # dictionary (rewriting any attribute names, extracting\n # namespaces, and merging some values like css classes).\n attributes = {} # dict of key=(namespace,name): value=attribute_value\n if attr:\n for a, v in attr.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n if kw:\n for a, v in kw.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n\n # Add title attribute if missing, but it has an alt.\n if ('html', 'alt') in attributes and ('html', 'title') not in attributes:\n attributes[('html', 'title')] = attributes[('html', 'alt')]\n\n # Force both lang and xml:lang to be present and identical if\n # either exists. The lang takes precedence over xml:lang if\n # both exist.\n #if ('html', 'lang') in attributes:\n # attributes[('xml', 'lang')] = attributes[('html', 'lang')]\n #elif ('xml', 'lang') in attributes:\n # attributes[('html', 'lang')] = attributes[('xml', 'lang')]\n\n # Check all the HTML attributes to see if they are known and\n # allowed. Ignore attributes if in non-HTML namespaces.\n if allowed_attrs:\n for name in [key[1] for key in attributes if key[0] == 'html']:\n if name in _common_attributes or name in allowed_attrs:\n pass\n elif name.startswith('on'):\n pass # Too many event handlers to enumerate, just let them all pass.\n else:\n # Unknown or unallowed attribute.\n err = 'Illegal HTML attribute \"%s\" passed to formatter' % name\n raise ValueError(err)\n\n # Finally, format them all as a single string.\n if attributes:\n # Construct a formatted string containing all attributes\n # with their values escaped. Any html:* namespace\n # attributes drop the namespace prefix. We build this by\n # separating the attributes into three categories:\n #\n # * Those without any namespace (should only be xmlns attributes)\n # * Those in the HTML namespace (we drop the html: prefix for these)\n # * Those in any other non-HTML namespace, including xml:\n\n xmlnslist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if not k[0]]\n htmllist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] == 'html']\n otherlist = ['%s:%s=\"%s\"' % (k[0], k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] and k[0] != 'html']\n\n # Join all these lists together in a space-separated string. Also\n # prefix the whole thing with a space too.\n htmllist.sort()\n otherlist.sort()\n all = [''] + xmlnslist + htmllist + otherlist\n return ' '.join(all)\n return ''", "def get_format_attrs(self, name, field, alt_field_info={}):\n # important_props = ('initial', 'autofocus', 'widget')\n if name in alt_field_info:\n field = deepcopy(field)\n for prop, value in alt_field_info[name].items():\n setattr(field, prop, value)\n initial = field.initial\n initial = initial() if callable(initial) else initial\n attrs, result = {}, []\n if initial and not isinstance(field.widget, Textarea):\n attrs['value'] = str(initial)\n data_val = self.form.data.get(get_html_name(self.form, name), None)\n if data_val not in ('', None):\n attrs['value'] = data_val\n attrs.update(field.widget_attrs(field.widget))\n result = ''.join(f'{key}=\"{val}\" ' for key, val in attrs.items())\n if getattr(field, 'autofocus', None):\n result += 'autofocus '\n if issubclass(self.form.__class__, FormOverrideMixIn):\n # TODO: Expand for actual output when using FormOverrideMixIn, or a sub-class of it.\n result += '%(attrs)s' # content '%(attrs)s'\n else:\n result = '%(attrs)s' + result # '%(attrs)s' content\n return result", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def render_attr(key, value, attr_format='{key}=\"{value}\"'):\n\n if not key or ' ' in key:\n raise InvalidAttribute('Invalid name \"{}\"'.format(key))\n\n if value:\n if type(value) is RawNode:\n value = str(value)\n else:\n value = html.escape(str(value))\n\n return attr_format.format(key=key, value=value)\n\n return key", "def make_attrs(self, mixed):\n if isinstance(mixed, dict):\n return ''.join('%s=\"%s\" ' % (k, v) for k, v in mixed.items())\n return str(mixed)", "def gen_tag_attrs(self, *a, **kw):\n return gen_tag_attrs(self, *a, **kw)", "def attributes(self):\n _attrs = [\"label\"]\n if self.attrs:\n _attrs.append(\"attrs\")\n return _attrs", "def back_to_tag(tag, attrs):\n sol = '<' + tag\n for (prop, val) in attrs:\n sol += ' ' + prop + '=\"' + val + '\"'\n sol += '>'\n return sol", "def add_attrs(value, arg):\n try:\n # Split list on comma\n kv_pairs = arg.split(\",\")\n except ValueError:\n raise template.TemplateSyntaxError(\n \"add_attrs requires as an argument a string in the format 'key:value, key1:value1, key2:value2...'\"\n )\n\n\n # Create dictionary\n html_attrs = dict()\n\n # Clean items and add attribute pairs to dictionary\n for item in kv_pairs:\n item = item.strip()\n k, v = item.split(\":\")\n html_attrs.update({k.strip():v.strip()})\n\n return value.as_widget(attrs=html_attrs)", "def _wrap_attr(attr):\n return '={0},'.format(attr)", "def string_for_attrs(attrs):\n if not attrs: return ''\n return ''.join(' %s=\"%s\"' % (attr, value) for attr, value in attrs)", "def attr(*attrs: ATTRIBUTE) -> str:\n formatted = []\n for attr_ in attrs:\n if isinstance(attr_, str):\n formatted.append(attr_)\n elif isinstance(attr_, tuple) and len(attr_) == 2:\n formatted.append(f'{attr_[0]}=\"{attr_[1]}\"')\n else:\n raise ValueError(f\"Bad attribute: {attr_}\")\n return \" \".join(formatted)", "def attrs(self):\n return self.size, self.propSuffix, self.specified", "def attrs(**kwds):\n\n def decorate(f):\n for k in kwds:\n setattr(f, k, kwds[k])\n return f\n\n return decorate", "def dot_node_attrs(self):\n\n lbl_name = '%s' % self.format_name(True, True, 24)\n lbl_acc = '<font point-size=\"8.0\">%s</font>' % self.format_id()\n label = self.node_label_fmt % (self.url(), self.name,\n lbl_name, lbl_acc)\n\n node_attrs = {'label': label}\n return node_attrs", "def attrs(*attributes):\n return ';'.join([ str(i) for i in attributes ])", "def ATTRIBUTE():\n return \"author\", \"title\", \"publisher\", \"shelf\", \"category\", \"subject\"", "def getAttributeInfoDictionary(attr, format=None):\n format = format or _getDocFormat(attr)\n return {'name': attr.getName(),\n 'doc': renderText(attr.getDoc() or '', format=format)}", "def attrsToString(self, attrs):\n string = \"\"\n # for every attribut\n for attr in attrs:\n # converts its name and value to string and adds this to string\n string += \" {}=\\\"{}\\\"\".format(attr[0], attr[1])\n # no exception!\n print(\"Das Attribut ist zu lang!\") if len(attr) > 2 else None\n return string", "def format_attr(attr: str) -> str:\r\n prefix = query_params[Toml.REMOVE_PREFIX]\r\n suffix = query_params[Toml.REMOVE_SUFFIX]\r\n prefix_len = len(prefix)\r\n suffix_len = len(suffix)\r\n stripped = attr.strip()\r\n if stripped[:prefix_len] == prefix:\r\n stripped = stripped[prefix_len:]\r\n if stripped[-suffix_len:] == suffix:\r\n stripped = stripped[:-suffix_len]\r\n return constcase(stripped).replace('__', '_')", "def handleAttributes(text, parent):\r\n def attributeCallback(match):\r\n parent.set(match.group(1), match.group(2).replace('\\n', ' '))\r\n return ATTR_RE.sub(attributeCallback, text)", "def _attrs(self, element, attrs):\n for attr, val in list(attrs.items()):\n element.setAttribute(attr, val)\n return element", "def date_attrs(name):\n attrs = battrs(name)\n attrs.update({'class': 'form-control datepicker'})\n return attrs", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def extract_attrs(attr_string):\n attributes = {}\n for name, val in FIND_ATTRS.findall(attr_string):\n val = (\n val.replace(\"&lt;\", \"<\")\n .replace(\"&gt;\", \">\")\n .replace(\"&quot;\", '\"')\n .replace(\"&amp;\", \"&\")\n )\n attributes[name] = val\n return attributes", "def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):\n attrs = dict(base_attrs, **kwargs)\n if extra_attrs:\n attrs.update(extra_attrs)\n return attrs", "def get_attrs(foreground, background, style):\n return foreground + (background << 4) + style", "def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def extend_attribute_dictionary(attributedict, ns, name, value):\n\n key = ns, name\n if value is None:\n if key in attributedict:\n del attributedict[key]\n else:\n if ns == 'html' and key in attributedict:\n if name == 'class':\n # CSS classes are appended by space-separated list\n value = attributedict[key] + ' ' + value\n elif name == 'style':\n # CSS styles are appended by semicolon-separated rules list\n value = attributedict[key] + '; ' + value\n elif name in _html_attribute_boolflags:\n # All attributes must have a value. According to XHTML those\n # traditionally used as flags should have their value set to\n # the same as the attribute name.\n value = name\n attributedict[key] = value", "def _attribs(self, name=None, description=None):\n a = {}\n if name:\n a['name'] = name\n if description:\n a['description'] = description\n return a", "def _base_attrs(self, service):\n keys = ['name', 'desc', 'url']\n return {name:getattr(service, name, None) for name in keys}", "def addattrs(field, my_attrs):\n my_attrs = my_attrs.split(',')\n my_attrs = dict([attr.split('=') for attr in my_attrs])\n return field.as_widget(attrs=my_attrs)", "def attrs(xml):\r\n return lxml.html.fromstring(xml).attrib", "def parse_tag_attrs(tag_str, options_d=None, font_d=None, case=\"\", **kwargs):\n attr_b = kwargs.pop(\"attr\", \"\")\n auto_b = kwargs.pop(\"auto\", False)\n font_d = kwargs.pop(\"font_d\", font_d or {})\n options_d = kwargs.pop(\"options_d\", options_d or {})\n case = kwargs.pop(\"case\", case)\n widget = kwargs.pop(\"widget\", None)\n text_w = kwargs.pop(text_s, None)\n bad_opts = []\n # INTs: height repeatdelay repeatinterval underline width; size fun fov\n for keyval in split_attrs(tag_str):\n if \"=\" in keyval:\n key, val = keyval.split(\"=\")\n val = unquote(val)\n elif keyval:\n key, val = keyval, None\n else:\n continue\n key = key.lower()\n key2, key3, key4 = key[:2], key[:3], key[:4]\n lowval = val.lower() if val else val\n key = unalias(key)\n kalias = alias(key)\n if val == \"None\": # in ('False', 'None') #\n pass\n elif key3 in (\n bg_s,\n background_s[:3],\n fg_s,\n foreground_s[:3],\n ) or kalias in (bg_s, fg_s):\n options_d.update(**{key: val})\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n options_d.update(**{key: val})\n if auto_b and compound_s not in options_d:\n options_d.update(compound=tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],) or kalias == bd_s:\n options_d.update(borderwidth=val)\n elif key4 in (command_s[:4], compound_s[:4],) or kalias in (\n command_as,\n compound_as,\n ):\n options_d.update(**{key: val})\n elif (\n key2 in (height_s[:2], width_s[:2])\n or key3 in (repeatdelay_s[:3], repeatinterval_s[:3])\n or kalias\n in (height_as, width_as, repeatdelay_as, repeatinterval_as)\n ):\n options_d.update(**{key: int(val)})\n elif (\n key2 in (cursor_s[:2],)\n or key3 == font_s[:3]\n or kalias in (cursor_as, font_as)\n ):\n options_d.update(**{key: val})\n elif key2 in (\"r\", relief_s[:2],) or kalias == relief_as:\n options_d.update(relief=val)\n if auto_b and borderwidth_s not in options_d and val != tk.FLAT:\n options_d.update(borderwidth=str(1))\n elif key2 == underline_s[:2] or kalias == underline_as:\n options_d.update(underline=-1 if val is None else int(val))\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ) or kalias in (selectbackground_as, selectforeground_as):\n options_d.update(**{key: val})\n # special for fonts\n elif key2 in (family_s[:2],) or kalias == family_as:\n font_d[family_s] = val\n elif key2 in (size_s[:2],) or kalias == size_as:\n try:\n font_d[size_s] = int(val)\n except ValueError:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: ERROR Setting Font Size to %r\" % val,\n Raise=True,\n )\n elif key3 in (bold_as, tk_font.BOLD[:3]) or kalias == bold_as:\n font_d[weight_s] = (\n tk_font.BOLD\n if str(val) not in (\"0\", \"False\",)\n else tk_font.NORMAL\n )\n elif key2 in (weight_s[:2],) or kalias == weight_as:\n font_d[weight_s] = val\n elif key2 in (italic_as, tk_font.ITALIC[:2]) or kalias == italic_as:\n font_d[slant_s] = (\n tk_font.ITALIC\n if str(val) not in (\"0\", \"False\",)\n else tk_font.ROMAN\n )\n elif key2 in (slant_s[:2],) or kalias == slant_as:\n font_d[slant_s] = val\n elif (\n key3 in (funderline_as, funderline_s[:3])\n or kalias == funderline_as\n ):\n font_d[underline_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n elif (\n key3 in (foverstrike_as, foverstrike_s[:3])\n or kalias == foverstrike_as\n ):\n font_d[overstrike_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n # special \"case\" implementation\n elif key3 in (case_s[:3],) or kalias == case_as:\n for s in (upper_s, capitalize_s, lower_s, title_s, swapcase_s):\n if s.startswith(lowval):\n case = s if s != capitalize_s else upper_s\n break\n elif (\n key2 == upper_s[:2]\n or key3 in (capitalize_s[:3],)\n or kalias in (upper_as, capitalize_as)\n ):\n if str(val) not in (\"0\", \"False\",):\n case = upper_s\n elif key2 in (lower_s[:2],) or kalias == lower_as:\n if str(val) not in (\"0\", \"False\",):\n case = lower_s\n elif key2 == title_s[:2] or kalias == title_as:\n if str(val) not in (\"0\", \"False\",):\n case = title_s\n elif key2 == swapcase_s[:2] or kalias == swapcase_as:\n if str(val) not in (\"0\", \"False\",):\n case = swapcase_s\n elif key in ():\n bad_opts.append((key, val))\n else:\n options_d.update(**{key: val})\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n if attr_b:\n return (\n case\n if attr_b == case_s\n else options_d.get(attr_b, font_d.get(attr_b))\n )\n return options_d, font_d, case", "def gen_tag_attrs(widget=None, options_d=None, font=None, case=None, **kwargs):\n auto_b = kwargs.get(\"auto\", False)\n case = kwargs.get(case_s, case)\n extend_b = kwargs.get(\"extend\", False)\n font = kwargs.pop(\"font\", font or {})\n index_i = kwargs.pop(\"index\", None)\n kmode_s = kwargs.get(\"kmode\", \"\") # a=alias, o=options, ''=unchanged\n options_d = kwargs.pop(\"options\", options_d or {})\n pare_b = kwargs.get(\"pare\", True)\n widget = kwargs.pop(\"widget\", widget)\n text_w = kwargs.get(text_s, None)\n recurse_b = kwargs.pop(\"recurse\", widget and isinstance(widget, TTWidget))\n fmt_s = \"\"\n font_d = {}\n w_font_d, w_options_d = {}, {}\n if index_i is not None and widget is None:\n raise Exception(\"Cannot set 'index' when 'widget' is None\")\n if widget: # and isinstance(widget, TTWidget): #\n excludes_t = () if widget.emulation_b else ()\n w_options_d = {\n k: v[-1]\n for k, v in widget.config().items()\n if len(v) == 5 and str(v[-1]) != str(v[-2]) and k not in excludes_t\n }\n try:\n w_options_d[case_s] = widget.case\n except AttributeError:\n pass\n w_font = widget.cget(font_s) # w_options_d.pop(font_s, None)\n w_font_d = get_font_dict(w_font) if w_font else {}\n if pare_b and w_font_d:\n def_w_font = widget.config(font_s)[-2]\n def_w_font_d = get_font_dict(def_w_font)\n w_font_d = pare_dict(w_font_d, def_w_font_d)\n if font:\n if isinstance(font, str):\n try:\n font = tk_font.nametofont(font)\n except tk.TclError:\n pass\n elif type(font) in (list, tuple):\n font = tk_font.Font(font=font)\n if isinstance(font, tk_font.Font):\n font = font.actual()\n if isinstance(font, dict):\n font_d = font\n if case: # is not None:\n options_d = _merge_dicts(options_d, dict(case=case))\n d = _merge_dicts(\n w_options_d,\n convert_font_dict_to_ttoptions_dict(w_font_d),\n options_d,\n convert_font_dict_to_ttoptions_dict(font_d),\n kwargs,\n )\n bad_opts = []\n for key, val in d.items():\n key = key.lower()\n if key in (\"auto\", \"extend\", \"kmode\", \"pare\",): # text_s, ): #\n continue\n key2, key3, key4 = key[:2], key[:3], key[:4]\n kalias = alias(key)\n koption = unalias(key)\n if kmode_s:\n if kmode_s[0] == \"a\": # alias\n keyout = kalias\n kfunc = alias\n auto_cpd, auto_bd = compound_as, bd_s\n elif kmode_s[0] == \"o\": # option\n keyout = koption\n kfunc = unalias\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n else:\n keyout = key\n kfunc = str\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n if val:\n val = quote(val)\n if (\n key3 in (bg_s, background_s[:3], fg_s, foreground_s[:3])\n or key2 == underline_s[:2]\n or kalias in (bg_s, fg_s, underline_as)\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_cpd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_cpd, tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],):\n if \"%s=%s \" % (auto_bd, 1) in fmt_s:\n if val != 1:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_bd, 1), \"%s=%s \" % (keyout, val)\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key4 in (compound_s[:4],) or kalias == compound_as:\n if \"%s=%s \" % (auto_cpd, tk.CENTER) in fmt_s:\n if val != tk.CENTER:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_cpd, tk.CENTER),\n \"%s=%s \" % (keyout, val),\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == cursor_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == font_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, get_named_font(val))\n elif key2 in (relief_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_bd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_bd, 1)\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sbd_s,\n selectborderwidth_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n # special for fonts\n elif key2 in (family_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (size_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (weight_s[:2],):\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.BOLD),\n 1\n if isinstance(val, str) and val.lower() == tk_font.BOLD\n else 0,\n )\n elif key2 == slant_s[:2]:\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.ITALIC),\n 1\n if isinstance(val, str) and val.lower() == tk_font.ITALIC\n else 0,\n )\n elif key3 in (funderline_as, funderline_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(funderline_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n elif key3 in (foverstrike_as, foverstrike_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(foverstrike_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n # special \"case\" implementation\n elif key3 == case_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(case_s), val)\n elif key2 == upper_s[:2] or key3 == capitalize_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(upper_s), val)\n elif key2 in (lower_s[:2], title_s[:2], swapcase_s[:2]):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key in ():\n bad_opts.append((key, val))\n elif key in (text_s, text_as):\n if extend_b or widget:\n fmt_s += \"%s=%s \" % (keyout, val)\n else:\n # bad_opts.append((key, val))\n fmt_s += \"%s=%s \" % (keyout, val)\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n fmt = fmt_s.strip()\n if widget and isinstance(widget, TTWidget) and recurse_b:\n fmt = [\n fmt,\n ]\n for _, gathering in widget._get_kids(items=True):\n child = gathering[\"label\"]\n case = gathering.get(case_s, \"\")\n kid_options = {\n k: v[-1]\n for k, v in child.config().items()\n if len(v) == 5\n and str(v[-1]) != str(v[-2])\n and (k, v[-1]) not in w_options_d.items()\n and not (k in label_override_d and str(v[-1]) == \"0\")\n } #\n cf = kid_options.pop(font_s, None)\n cdf = child.config(font_s)[-2]\n if cf != cdf:\n c_font_d = pare_dict(get_font_dict(cf), get_font_dict(cdf))\n else:\n c_font_d = {}\n if case:\n kid_options.update(case=case)\n fmt.append(\n gen_tag_attrs(options=kid_options, font=c_font_d, **kwargs)\n )\n return fmt if index_i is None else fmt[index_i]", "def format_attributes(attributes):\n return ';'.join([k + '=' + v for k, v in attributes.items()])", "def handle_meta(self, tag, attrs):\n ad = {}\n for tup in attrs:\n ad[tup[0]] = tup[1]\n if 'name' in ad.keys() \\\n and 'keywords' == ad['name'] \\\n and 'content' in ad.keys():\n self.filetype = ad['content']\n if 'name' in ad.keys() \\\n and 'description' == ad['name']:\n self.description = 'present'\n if 'charset' in ad.keys():\n self.charset = 'present'", "def add_attributes(self, attrs):\n self.attrs.add_container(attrs)", "def set_attrs(dict, elem, attrs):\n for attr in attrs:\n if attr in elem.keys():\n dict[attr] = elem.get(attr)", "def __init__(self, attrs: Dict[str, Any]) -> None:\n self.attrs = attrs", "def prepare_node_attrs(self):", "def get_attribute_data(self, attrs):\n return {\n 'id': attrs['data-id'],\n }", "def get_attrs(self):\n req_attrv = self._ptr.contents.attrv\n attrs = {}\n if bool(req_attrv):\n i = 0\n while 1:\n s = bytestostr(req_attrv[i])\n i += 1\n if s == None:\n break\n try:\n k, v = s.split(\"=\", 1)\n attrs[k] = v\n except:\n pass\n return attrs", "def attkey_to_SVG_attribs(self,k):\n atts= k.split('@')\n o= ''\n acodes= {'C':'stroke','W':'stroke-width','S':'stroke-dasharray','O':'stroke-opacity'}\n for a in atts:\n if a[0] in acodes:\n o+= '%s=\"%s\" ' % (acodes[a[0]],a[1:])\n# elif a[0] == 'S': # Maybe do something special like this.\n# o+= 'stroke-dasharray=\"%\" ' % a[1:]\n return o", "def add_attributes(self, attrs):\n self.attrs.add_attributes(attrs)", "def fix_attributes(string):\n defs = re.compile('<dl class=\"attribute\">(?P<descrip>.*?)</dl>',flags=re.DOTALL)\n name = re.compile('<code class=\"descclassname\">(?P<name>[^<]*)</code>')\n prefix = ''\n remain = string\n \n match = defs.search(remain)\n while match:\n prefix += remain[:match.start(1)]\n prefsub = ''\n remnsub = remain[match.start(1):match.end(1)]\n descrip = name.search(remnsub)\n if descrip:\n prefix += remnsub[:descrip.start()]\n prefix += remnsub[descrip.end():]\n prefix += remain[match.end(1):match.end(0)]\n else:\n prefix += remain[match.start(1):match.end(0)]\n remain = remain[match.end(0):]\n match = defs.search(remain)\n return prefix+remain", "def a_attr_dict (self) :\n return dict (href = self.abs_href)", "def attributes(table,attrs): \n if isinstance(table,Table):\n table.html_attributes = attrs\n return table", "def read_attribs(self):\n\n attribs = {}\n while self.index < self.length:\n self.ignore_whitespaces()\n if self.xtext[self.index] == '>':\n break\n name = self.read_until('=')\n self.index += 1\n self.read_until('\"')\n self.index += 1\n value = self.read_until('\"')\n self.index += 1\n\n attribs[name] = value\n\n return attribs", "def extensible_attributes():\n return 'extensibleattributedef?'", "def transform_attributes(attrs):\n transformed = {}\n for key, value in attrs.items():\n if key in [\"raw_message\", \"text\"]:\n transformed[\"raw_content\"] = value\n elif key in [\"diaspora_handle\", \"sender_handle\", \"author\"]:\n transformed[\"handle\"] = value\n elif key == \"recipient_handle\":\n transformed[\"target_handle\"] = value\n elif key == \"parent_guid\":\n transformed[\"target_guid\"] = value\n elif key == \"first_name\":\n transformed[\"name\"] = value\n elif key == \"image_url\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"large\"] = value\n elif key == \"image_url_small\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"small\"] = value\n elif key == \"image_url_medium\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"medium\"] = value\n elif key == \"tag_string\":\n transformed[\"tag_list\"] = value.replace(\"#\", \"\").split(\" \")\n elif key == \"bio\":\n transformed[\"raw_content\"] = value\n elif key == \"searchable\":\n transformed[\"public\"] = True if value == \"true\" else False\n elif key == \"target_type\":\n transformed[\"entity_type\"] = DiasporaRetraction.entity_type_from_remote(value)\n elif key == \"remote_photo_path\":\n transformed[\"remote_path\"] = value\n elif key == \"remote_photo_name\":\n transformed[\"remote_name\"] = value\n elif key == \"status_message_guid\":\n transformed[\"linked_guid\"] = value\n transformed[\"linked_type\"] = \"Post\"\n elif key in BOOLEAN_KEYS:\n transformed[key] = True if value == \"true\" else False\n elif key in DATETIME_KEYS:\n try:\n # New style timestamps since in protocol 0.1.6\n transformed[key] = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n # Legacy style timestamps\n transformed[key] = datetime.strptime(value, \"%Y-%m-%d %H:%M:%S %Z\")\n elif key in INTEGER_KEYS:\n transformed[key] = int(value)\n else:\n transformed[key] = value or \"\"\n return transformed", "def tag(func):\n @functools.wraps(func)\n def wrapper(**kwargs):\n\n name = func.__name__\n\n if kwargs:\n \n try:\n\n check_text = kwargs['text']\n del kwargs['text']\n \n \n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n open('index.html', 'a+').write(f\"{check_text}\")\n open('index.html', 'a+').write(f\"</{name}>\")\n\n except KeyError:\n\n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n else:\n\n open('index.html', 'a+').write(f\"\\n<{name}>\")\n\n\n func(**kwargs)\n \n return wrapper", "def getAttrsDict(attrs):\r\n attrsDict = json.loads(re.sub('/\\\"(?!(,\\s\"|}))','\\\\\"',attrs).replace(\"\\t\",\" \").replace(\"\\n\",\" \")) if len(attrs)>0 else {}\r\n return attrsDict", "def parse_tag_attrs(\n self, tags_str, options_d=None, font_d=None, case=\"\", **kwargs\n ):\n return parse_tag_attrs(\n tags_str,\n options_d,\n font_d,\n case,\n widget=self,\n text=getattr(self, \"debug_text\", None),\n **kwargs\n )", "def _get_annotation_data_attr(self, index, el):\r\n\r\n data_attrs = {}\r\n attrs_map = {\r\n 'body': 'data-comment-body',\r\n 'title': 'data-comment-title',\r\n 'problem': 'data-problem-id'\r\n }\r\n\r\n for xml_key in attrs_map.keys():\r\n if xml_key in el.attrib:\r\n value = el.get(xml_key, '')\r\n html_key = attrs_map[xml_key]\r\n data_attrs[html_key] = {'value': value, '_delete': xml_key}\r\n\r\n return data_attrs", "def get_attributes(self) -> Dict[str, str]:\n pass", "def transform(attrs: dict) -> dict:\n\n pass", "def get_html_element_attributes(self):\n html_element_attributes = {\n 'class': self.css_classes or False, # Fall back to false to avoid class=\"\"\n }\n if self.should_render_as_link():\n html_element_attributes['href'] = self.url\n return html_element_attributes", "def create_descr(self, attr_name):", "def _checkTableAttr(self, attrs, prefix):\n if not attrs:\n return {}\n\n result = {}\n s = [] # we collect synthesized style in s\n for key, val in attrs.items():\n # Ignore keys that don't start with prefix\n if prefix and key[:len(prefix)] != prefix:\n continue\n key = key[len(prefix):]\n val = val.strip('\"')\n # remove invalid attrs from dict and synthesize style\n if key == 'width':\n s.append(\"width: %s\" % val)\n elif key == 'height':\n s.append(\"height: %s\" % val)\n elif key == 'bgcolor':\n s.append(\"background-color: %s\" % val)\n elif key == 'align':\n s.append(\"text-align: %s\" % val)\n elif key == 'valign':\n s.append(\"vertical-align: %s\" % val)\n # Ignore unknown keys\n if key not in self._allowed_table_attrs[prefix]:\n continue\n result[key] = val\n st = result.get('style', '').split(';')\n st = '; '.join(st + s)\n st = st.strip(';')\n st = st.strip()\n if not st:\n try:\n del result['style'] # avoid empty style attr\n except:\n pass\n else:\n result['style'] = st\n #logging.debug(\"_checkTableAttr returns %r\" % result)\n return result", "def set_attrs(self, username, attrs):\n pass", "def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,\r\n prettyPrint=False, indentLevel=0):\r\n\r\n encodedName = self.toEncoding(self.name, encoding)\r\n\r\n attrs = []\r\n if self.attrs:\r\n for key, val in self.attrs:\r\n fmt = '%s=\"%s\"'\r\n if isString(val):\r\n if self.containsSubstitutions and '%SOUP-ENCODING%' in val:\r\n val = self.substituteEncoding(val, encoding)\r\n\r\n # The attribute value either:\r\n #\r\n # * Contains no embedded double quotes or single quotes.\r\n # No problem: we enclose it in double quotes.\r\n # * Contains embedded single quotes. No problem:\r\n # double quotes work here too.\r\n # * Contains embedded double quotes. No problem:\r\n # we enclose it in single quotes.\r\n # * Embeds both single _and_ double quotes. This\r\n # can't happen naturally, but it can happen if\r\n # you modify an attribute value after parsing\r\n # the document. Now we have a bit of a\r\n # problem. We solve it by enclosing the\r\n # attribute in single quotes, and escaping any\r\n # embedded single quotes to XML entities.\r\n if '\"' in val:\r\n fmt = \"%s='%s'\"\r\n if \"'\" in val:\r\n # TODO: replace with apos when\r\n # appropriate.\r\n val = val.replace(\"'\", \"&squot;\")\r\n\r\n # Now we're okay w/r/t quotes. But the attribute\r\n # value might also contain angle brackets, or\r\n # ampersands that aren't part of entities. We need\r\n # to escape those to XML entities too.\r\n val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)\r\n\r\n attrs.append(fmt % (self.toEncoding(key, encoding),\r\n self.toEncoding(val, encoding)))\r\n close = ''\r\n closeTag = ''\r\n if self.isSelfClosing:\r\n close = ' /'\r\n else:\r\n closeTag = '</%s>' % encodedName\r\n\r\n indentTag, indentContents = 0, 0\r\n if prettyPrint:\r\n indentTag = indentLevel\r\n space = (' ' * (indentTag-1))\r\n indentContents = indentTag + 1\r\n contents = self.renderContents(encoding, prettyPrint, indentContents)\r\n if self.hidden:\r\n s = contents\r\n else:\r\n s = []\r\n attributeString = ''\r\n if attrs:\r\n attributeString = ' ' + ' '.join(attrs)\r\n if prettyPrint:\r\n s.append(space)\r\n s.append('<%s%s%s>' % (encodedName, attributeString, close))\r\n if prettyPrint:\r\n s.append(\"\\n\")\r\n s.append(contents)\r\n if prettyPrint and contents and contents[-1] != \"\\n\":\r\n s.append(\"\\n\")\r\n if prettyPrint and closeTag:\r\n s.append(space)\r\n s.append(closeTag)\r\n if prettyPrint and closeTag and self.nextSibling:\r\n s.append(\"\\n\")\r\n s = ''.join(s)\r\n return s", "def attributes(self):\n _attrs = [\"type\", \"name\", \"value\"]\n if self.confidence is not None:\n _attrs.append(\"confidence\")\n if self.constant:\n _attrs.append(\"constant\")\n if self.tags:\n _attrs.append(\"tags\")\n\n return _attrs", "def Attribute(name, value=None):\r\n if value:\r\n return '{}=\"{}\"'.format(name, value)\r\n else:\r\n return ''", "def convert_attributes(cls, attrs):\n return {}", "def get_switched_form_field_attrs(self, prefix, input_type, name):\n attributes = {'class': 'switched', 'data-switch-on': prefix + 'field'}\n attributes['data-' + prefix + 'field-' + input_type] = name\n return attributes", "def AttributeString(self) -> str:", "def AttributeString(self) -> str:", "def proc_attr(inp):\n dic = {}\n for att in inp.attrs.keys():\n if getattr(inp.attrs[att], \"dtype\", None) is None:\n dic[att] = inp.attrs[att]\n elif inp.attrs[att].dtype.char == 'S':\n dic[att] = [\n x.strip() for x in inp.attrs[att].tostring().decode('ascii').split(',')\n ]\n else:\n dic[att] = (\n inp.attrs[att][0]\n if isinstance(inp.attrs[att],np.ndarray) and\n inp.attrs[att].size==1\n else inp.attrs[att]\n )\n return dic\n pass", "def add_attr(self, section_name: str, attr_name: str, value: str) -> None:\n pass", "def _get_attrs_symbols():\n return {\n 'True', 'False', 'None', # those are identifiers in Python 2.7\n 'self',\n 'parent',\n 'id',\n 'uid',\n 'context',\n 'context_today',\n 'active_id',\n 'active_ids',\n 'allowed_company_ids',\n 'current_company_id',\n 'active_model',\n 'time',\n 'datetime',\n 'relativedelta',\n 'current_date',\n 'abs',\n 'len',\n 'bool',\n 'float',\n 'str',\n 'unicode',\n }", "def widget_attrs(self, widget):\n\n attrs = super(RelateField, self).widget_attrs(widget)\n\n attrs.update({'content_type': self.content_types})\n\n return attrs", "def attributes(doc, header, renderer=Attribute, item_class=DefinitionItem):\n items = doc.extract_items(item_class)\n lines = []\n renderer = renderer()\n for item in items:\n renderer.item = item\n lines += renderer.to_rst()\n lines.append('')\n return lines", "def info_from_entry(self, entry):\n info = super().info_from_entry(entry)\n info[ATTR_NAME] = info[ATTR_PROPERTIES]['Name'].replace('\\xa0', ' ')\n return info", "def img(self, **kwargs):\n attrs = ''\n for item in kwargs.items():\n if not item[0] in IMGATTRS:\n raise AttributeError, 'Invalid img tag attribute: %s'%item[0]\n attrs += '%s=\"%s\" '%item\n return '<img src=\"%s\" %s>'%(str(self),attrs)", "def gen_tag_attrs(self, *a, **kw):\n if kw.get(\"widget\", sentinel) is not None:\n raise Exception(\n \"TTToolTip.gen_tag_attrs(): 'widget' keyword must be set\"\n \" to None\"\n )\n return gen_tag_attrs(None, *a, **kw)", "def init_attrs(self):\n raise NotImplementedError", "def _arg_attr(identifier, attr1, attr2):\n return attr1 if identifier.startswith('t') else attr2", "def _get_var_attrs(var):\n\n generic_dict = {'instrument': '', 'valid_range': (-1e+35,1e+35),\n 'missing_value': -9999, 'height': '',\n 'standard_name': '', 'group_name': '',\n 'serial_number': ''}\n\n generic_dict.update(attrs_dict[var])\n return generic_dict", "def parseAttrLine(line):\n\tpre, post = line.strip().split(':')\n\tnumber, attr = pre.strip().split('.')\n\tattr = attr.strip().replace('%','').replace(' ', '-')\n\tvals = [clean(x) for x in post.strip().strip('.').split(',')]\n\treturn {'num':int(number), 'attr':clean(attr), 'vals':vals}", "def _style_to_basic_html_attributes(self, element, style_content,\n force=False):\n if style_content.count('}') and \\\n style_content.count('{') == style_content.count('{'):\n style_content = style_content.split('}')[0][1:]\n\n attributes = {}\n for rule in style_content.split(';'):\n split = rule.split(':')\n if len(split) != 2:\n continue\n key = split[0].strip()\n value = split[1]\n\n if key == 'text-align':\n attributes['align'] = value.strip()\n elif key == 'background-color':\n attributes['bgcolor'] = value.strip()\n elif key == 'width' or key == 'height':\n value = value.strip()\n if value.endswith('px'):\n value = value[:-2]\n attributes[key] = value\n\n for key, value in list(attributes.items()):\n if key in element.attrib and not force or key in self.disable_basic_attributes:\n # already set, don't dare to overwrite\n continue\n element.attrib[key] = value", "def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}", "def domAttributesToString( node ):\n strOut = \"node has %d attribute(s):\\n\" % node.attributes.length;\n for i in range(node.attributes.length):\n attr = node.attributes.item(i);\n strOut += \"- %s:'%s'\\n\" % (attr.name, attr.value );\n return strOut;", "def dict_with_attrs2(*args):\n class CustomDict(object):\n __slots__ = args\n __dict__ = {}\n\n def __init__(self, *args, **kwargs):\n super(CustomDict, self).__init__()\n if args:\n self.__dict__.update(*args)\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __getattr__(self, name):\n return self.__dict__[name]\n\n return CustomDict", "def attributes(self):\n attrs_ = [\"name\", \"type\"]\n if self.exclusive:\n attrs_.append(\"exclusive\")\n if self.default is not None:\n attrs_.append(\"default\")\n\n return attrs_", "def replace_tag_attributes(code_attrs, tag, tag_attrs):\n\n new_attrs = code_attrs.copy()\n for key, value in tag_attrs.items():\n if key in new_attrs:\n new_attrs[key] = new_attrs[key].replace(tag, value)\n\n return new_attrs", "def _iterattrs(self, handle=\"\"):\n if not handle:\n handle = self.handle\n attr = gv.firstattr(handle)\n while gv.ok(attr):\n yield gv.nameof(attr), decode_page(gv.getv(handle, attr))\n attr = gv.nextattr(handle, attr)", "def as_html(self):\n return mark_safe(\" \".join([\n self.attribute_template % (k, escape(v if not callable(v) else v()))\n for k, v in six.iteritems(self) if not v in EMPTY_VALUES]))", "def strpatt(self, name):\n return name.replace(\"att.\", \"\")", "def format_link(attrs: Dict[tuple, str], new: bool = False):\n try:\n p = urlparse(attrs[(None, 'href')])\n except KeyError:\n # no href, probably an anchor\n return attrs\n\n if not any([p.scheme, p.netloc, p.path]) and p.fragment:\n # the link isn't going anywhere, probably a fragment link\n return attrs\n\n c = urlparse(settings.SITE_URL)\n if p.netloc != c.netloc:\n # link is external - secure and mark\n attrs[(None, 'target')] = '_blank'\n attrs[(None, 'class')] = attrs.get((None, 'class'), '') + ' external'\n attrs[(None, 'rel')] = 'nofollow noopener noreferrer'\n\n return attrs", "def extractAttrs(obj, justLabel=False, dictName=''):\n return extractAttrsCore(obj, {}, justLabel, dictName)", "def parseAttrs(self,attrs,date_type):\n\tattrs=copy.copy(attrs) #make sure we don't change user/group attributes\n \tattr_holders=self.getAttrHolders(attrs)\n\tmap(lambda x:x.setDateType(date_type),attr_holders)\n\tmap(lambda x:attrs.update(x.getParsedDic()),attr_holders)\n\treturn attrs", "def add_attributes(self, attrs):\n for attr in attrs:\n self.add_attribute(attr)", "def _parse_attr(self, attr_proto):\n attrs = {}\n for a in attr_proto:\n for f in ['f', 'i', 's']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['floats', 'ints', 'strings']:\n if list(getattr(a, f)):\n assert a.name not in attrs, \"Only one type of attr is allowed\"\n attrs[a.name] = tuple(getattr(a, f))\n for f in ['t', 'g']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['tensors', 'graphs']:\n if list(getattr(a, f)):\n raise NotImplementedError(\"Filed {} is not supported in mxnet.\".format(f))\n if a.name not in attrs:\n raise ValueError(\"Cannot parse attribute: \\n{}\\n.\".format(a))\n return attrs" ]
[ "0.735201", "0.6754294", "0.67166066", "0.67071074", "0.66780305", "0.65807486", "0.6522693", "0.6522693", "0.65187657", "0.6471306", "0.6269984", "0.62653935", "0.6153201", "0.6090701", "0.60323846", "0.60278016", "0.6011661", "0.60042846", "0.59841794", "0.5941162", "0.59205276", "0.5918955", "0.59121054", "0.5903962", "0.5884743", "0.5876164", "0.5857109", "0.5851559", "0.583173", "0.58274394", "0.5816038", "0.58061635", "0.5784312", "0.5755998", "0.5755998", "0.57360405", "0.57051307", "0.5701552", "0.5687975", "0.5650812", "0.5618766", "0.561154", "0.5605911", "0.56030387", "0.5602799", "0.55926436", "0.5587559", "0.5571399", "0.5567558", "0.55631375", "0.555545", "0.5550559", "0.55490625", "0.55470836", "0.55410224", "0.5519966", "0.55098814", "0.5492064", "0.547102", "0.5470936", "0.54692423", "0.5467515", "0.54661024", "0.54518676", "0.54405665", "0.5438651", "0.54003173", "0.5388153", "0.5382598", "0.5375904", "0.5375076", "0.53706104", "0.5359634", "0.5354708", "0.5354708", "0.5331472", "0.5324531", "0.53227526", "0.5316361", "0.5309617", "0.5308968", "0.53067", "0.5306182", "0.5299369", "0.52990687", "0.5287107", "0.52791494", "0.5277907", "0.5276578", "0.52742803", "0.5270845", "0.52608305", "0.52524847", "0.5244876", "0.5239417", "0.5234171", "0.5224983", "0.5215326", "0.521457", "0.5212088", "0.5203955" ]
0.0
-1
Return formatted text, properly escaped if not in titleMode
def formatOutput(self, storedText, titleMode, internal=False): try: text = GenDate(storedText).dateStr(self.format) except GenDateError: text = _errorStr return TextFormat.formatOutput(self, text, titleMode, internal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def title(text, level=0):\n return '\\n' + text + '\\n' + '=-~_#%^' [level] * len(text) + '\\n\\n'", "def format_title(self, data):\n return data", "def output_plain_sep_title(title):\n print(f\"{plain_sep_mark}\\t{title}{plain_sep_mark}\")", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_text(downgrade_titles=False):", "def PROPER(text):\n return text.title()", "def title(self, string):\n return self.bold(string)", "def html_title(title):\n return '<center><h1>%s</h1></center>' % (title)", "def _get_title_text(self):\n return Text(\n self,\n self.settings.font_bold_filename,\n 96,\n self.settings.font_color,\n 'zuckbot',\n {'center': self.screen_rect.center},\n 0,\n -50,\n )", "def get_title_repr(self) -> str:\n try:\n return Title[self.title].value\n except (KeyError, ValueError):\n pass", "def editModeHeading(text):\n return u'<p style=\"editModeHeading\">%s</p>' % text", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def emph_text(text):\n\n if use_color():\n return colorama.Style.BRIGHT + text + colorama.Style.RESET_ALL\n else:\n return text", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()", "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "def formatOutput(self, storedText, titleMode, internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = ['<img src=\"%s\">' % escape(url, treedoc.escDict) for url\n in paths]\n return u'<br />'.join(results)", "def _text_formatting(bs4_tag):\n return bs4_tag.get_text().replace('\\n', '')", "def print_with_title(title, content, before='', after='', hl='='):\n cont_maxlen = max(len(s) for s in content.split('\\n'))\n hl_len = max(cont_maxlen, len(title))\n print('{}{}\\n{}\\n{}{}'.format(before, title, hl * hl_len, content, after))", "def title(self, txt):\n num = len(txt)\n ticks = \"=\" * num\n print(ticks)\n print(txt)\n print(ticks)", "def test_title(self):\n self.assertEquals(\"Title\\n=====\", trans(\"== Title ==\"))\n self.assertEquals(\"Title\\n-----\", trans(\"=== Title ===\"))\n self.assertEquals(\"#### Title\", trans(\"==== Title ====\"))\n self.assertEquals(\"##### Title\", trans(\"===== Title =====\"))", "def helptext(self):\n return \"\"", "def book_title(book_text):\n search = re.search(\"Title:(.*)\", book_text)\n title = search.group(1).replace(\"\\r\", \" \").strip()\n return title", "def get_title():", "def __str__(self):\n date_str = self.date.strftime(self.journal.config['timeformat'])\n title = date_str + \" \" + self.title\n body = self.body.strip()\n\n return \"{title}{sep}{body}\\n\".format(\n title=title,\n sep=\"\\n\" if self.body else \"\",\n body=body\n )", "def __str__(self) -> str:\n return textwrap.wrap(self.title, _POST_TITLE_MAX_LENGTH // 4)[0]", "def title_draw():\n nonlocal width\n widthTitle = len(self.str_title)\n if widthTitle > width:\n self.str_title = self.str_title[0:width-5] + '...'\n widthTitle = len(self.str_title)\n h_len = widthTitle + self.l_padding + self.r_padding\n top = ''.join(['┌'] + ['─' * h_len] + ['┐']) + '\\n'\n result = top + \\\n '│' + \\\n ' ' * self.l_padding + \\\n self.str_title + \\\n ' ' * self.r_padding + \\\n '│' + self.str_shadow + '\\n'\n offset = 2 + self.l_padding + len(self.str_title) + self.r_padding\n return result, offset", "def show_title():\r\n complement = (\r\n '\\n __ ')\r\n title = ('\\n _______ _______________ ____ _______ __ ___ _ _______/ /_ ____ _____ ____ ____ ')\r\n title += ('\\n / ___/ / / / ___/ ___/ _ \\/ __ \\/ ___/ / / / / _ \\| |/_/ ___/ __ \\/ __ `/ __ \\/ __ `/ _ \\ ')\r\n title += ('\\n/ /__/ /_/ / / / / / __/ / / / /__/ /_/ / / __/> </ /__/ / / / /_/ / / / / /_/ / __/ ')\r\n title += ('\\n\\___/\\__,_/_/ /_/ \\___/_/ /_/\\___/\\__, / \\___/_/|_|\\___/_/ /_/\\__,_/_/ /_/\\__, /\\___/ ')\r\n title += ('\\n /____/ /____/ ')\r\n # Add Styles\r\n break_line = ('-' * len(complement) + \"\\n\") * 2\r\n print(\"{}\\n{}\\n{}\\n\".format(break_line, title, break_line))", "def format_title(self, title):\n new_title = ''.join(word.lower().strip('!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ ') for word in title)\n return new_title", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def pretty_title(title):\n output = '-' * 5 + ' ' + title + ' ' + '-' * 5\n return output", "def html_title(self, title=None):\r\n if title is None:\r\n return \"<title>PyBossa</title>\"\r\n else:\r\n return \"<title>PyBossa &middot; %s</title>\" % title", "def output_sep_title(title):\n print(f\"{sep_mark}\\t{title}{sep_mark}\")", "def make_main_title(self, end, end_center=False):\n main_title = r\"\\begin{center}\"\n if self.detector is not None:\n main_title += \"%s \"%self.detector\n if self.selection is not None:\n main_title += \"%s Event Selection \"%self.selection\n main_title += end\n if end_center:\n main_title += r\"\\end{center}\"\n return main_title", "def title(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"=\")))", "def get_title(self):\n if not hasattr(self, '_title'):\n self._title = 'NO TITLE'\n if self._title:\n title = _(self._title)\n title = title.replace('&', '&amp;') \n title = title.replace('\"', '&quot;')\n return title\n else:\n return u''", "def escape_if_needed(text, options):\n if hasattr(text, '__html__'):\n # Text has escape itself:\n return to_string(text.__html__())\n if need_to_escape(options):\n return escape(to_string(text))\n return to_string(text)", "def ftitle(self, text):\n return \"{} - {}\".format(self._app_name, text)", "def get_rst_title_char(level):\n chars = (u'=', u'-', u'`', u\"'\", u'.', u'~', u'*', u'+', u'^')\n if level < len(chars):\n return chars[level]\n return chars[-1]", "def process_title(self, title):\n\t\t# strip apostrophes\n\t\tif '\\'' in title:\n\t\t\ttitle = re.sub('\\'', '', title)\n\t\tif '.' in title:\n\t\t\ttitle = re.sub('.', '', title)\n\t\treturn title", "def pretty_title(title):\n output = '-' * 5 + ' ' + title.lower() + ' ' + '-' * 5\n return output", "def html(self):\n bop = ('<b>' if self._bold else '')\n iop = ('<i>' if self._italic else '')\n icl = ('</i>' if self._italic else '')\n bcl = ('</b>' if self._bold else '')\n txt = escape(self._text)\n s = '%s%s%s%s%s' % (bop, iop, txt, icl, bcl)\n return '%s' % s", "def escape_for_display(s) :\n if len(s) == 0 :\n return \"[EMPTY]\"\n return s.replace(\"\\n\",\"[NL]\").replace(\"\\t\",\"[TAB]\") #.replace(\" \",\"[SP]\") # Escape newlines so not to confuse debug output.", "def style_title(self) -> str:\n style_title = \"\"\".title\n {margin-bottom: 10px}\\n\"\"\"\n self.html_doc = self.html_doc + style_title\n return self.html_doc", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def getHTMLText(self, s):\r\n\r\n # Removes any \"<\" or \">\" from the text, and replaces line ends with <br> tags\r\n if s is not None:\r\n res = str(s)\r\n res = string.replace(res, \">\", \"&gt;\")\r\n res = string.replace(res, \"<\", \"&lt;\")\r\n res = string.replace(s, \"\\n\", \"<br style='mso-data-placement:same-cell;'/>\")\r\n else:\r\n res = \"\"\r\n\r\n # Inserts formatting tag around text, if defined\r\n if self.formatBeginTag:\r\n res = self.formatBeginTag + res + self.formatEndTag\r\n\r\n return res", "def escape_single_quotes(custom_data):\n # https://stackoverflow.com/questions/10569438/how-to-print-unicode-character-in-python\n # https://regex101.com/r/nM4bXf/1\n if re.search(\"(?<!u)'(?!:|}|,)\", custom_data.get('title_name', '')):\n z = re.sub(r\"(?<!u)'(?!:|}|,)\", '\\\\\\'', custom_data.get('title_name', None))\n\n custom_data['title_name'] = z\n return custom_data\n return custom_data", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def format_title(self, ticket_id, subject):\n # TODO: strip block tags?\n title = \"#%i %s\" % (ticket_id, subject)\n return title.strip()", "def subtitle(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"-\")))", "def title(self, value):\n if len(value):\n self._title = self._wrap_line(value, self._width)\n\n # Add a blank line\n self._title.append('')", "def emphasize(text: str, tablefmt: str | TableFormat, strong: bool = False) -> str:\n # formats a title for a table produced using tabulate,\n # in the formats tabulate understands\n if tablefmt in [\"html\", \"unsafehtml\", html_with_borders_tablefmt]: # type: ignore\n if strong:\n emph_text = f\"<strong>{text}</strong>\"\n else:\n emph_text = f\"<em>{text}</em>\"\n elif tablefmt in [\"latex\", \"latex_raw\", \"latex_booktabs\", \"latex_longtable\"]:\n if strong:\n emph_text = r\"\\textbf{\" + text + r\"}\"\n else:\n emph_text = r\"\\emph{\" + text + r\"}\"\n else: # use the emphasis for tablefmt == \"pipe\" (Markdown)\n star = \"**\" if strong else \"*\"\n emph_text = f\"{star}{text}{star}\"\n return emph_text", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def textual(title, ordering_field=None):\n def decorator(func):\n def wraps(self, obj):\n result = func(self, obj)\n return result if result else u'---'\n\n wraps.short_description = title\n wraps.allow_tags = True\n\n if ordering_field:\n wraps.admin_order_field = ordering_field\n\n return wraps\n return decorator", "def outputText(self, item, titleMode, internal=False):\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''", "def inclusive_title(self):\n return self.title + (\" %s\" % (self.episode_to_string(self.latest_season, self.latest_episode),) if self.is_series() else \"\")", "def group_title(self, group):\n group_title = group.getProperty('title')\n if self.short:\n splitted = group_title.split('(')\n if len(splitted) > 1:\n group_title = group_title.split('(')[-1][:-1]\n return html.escape(group_title)", "def outputText(self, item, titleMode, internal=False):\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)", "def format_heading(self, level, text):\n underlining = ['=', '-', '~', ][level-1] * len(text)\n return '%s\\n%s\\n\\n' % (text, underlining)", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)", "def formatted(self) -> str:\r\n ...", "def __str__(self):\n return \"{title}\".format(title=self.title)", "def gen_title_rst(txt):\n # Just add a few useful directives\n txt = \".. highlight:: cmake\\n\\n\" + txt\n return txt", "def _prettyfilename(self):\n return self.title", "def wrap_title(title, mpl_layout):\n fig = mpl_layout.canvas.figure\n ax = fig.axes[0]\n ext_pixels = ax.get_window_extent()\n ext_inches = ext_pixels.transformed(fig.dpi_scale_trans.inverted())\n magic_number = 10\n letters_per_line = int(ext_inches.width * magic_number)\n title_wrapped = '\\n'.join(textwrap.wrap(title, letters_per_line))\n return title_wrapped", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title} :\" + s\n\n return s", "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "def get_title(text, uuid=None):\n if uuid is not None:\n text += get_provenance_link(uuid)\n title = pn.Row(pn.pane.HTML('<h2>{}</h2>'.format(text)), align='start')\n\n return title", "def print_title(title):\n print \"\\n\"+\"#\"*32+\"\\n# \"+title+\"\\n\"+\"#\"*32+\"\\n\"", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''", "def transform(text: str) -> str:\n return text.title()", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n # add prefix/suffix within the executable path:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' %\n (escape(path, treedoc.escDict), altText or url))\n return u'<br />'.join(results)", "def complete_alt_title(self, obj):\n return str(obj)", "def clean_title(\r\n title: str,\r\n mode: Literal[\"soft\", \"hard\", \"safe\"],\r\n allow_dot: bool = False,\r\n n: Optional[int] = None,\r\n) -> str:\r\n ...", "def text(self) -> str:", "def get_title(self):\n\n title = ''\n doc = self.article.doc\n\n title_element = self.parser.getElementsByTag(doc, tag='title')\n # no title found\n if title_element is None or len(title_element) == 0:\n return title\n\n # title elem found\n title_text = self.parser.getText(title_element[0])\n used_delimeter = False\n\n # split title with |\n if '|' in title_text:\n title_text = self.split_title(title_text, PIPE_SPLITTER)\n used_delimeter = True\n\n # split title with -\n if not used_delimeter and '-' in title_text:\n title_text = self.split_title(title_text, DASH_SPLITTER)\n used_delimeter = True\n\n # split title with »\n if not used_delimeter and u'»' in title_text:\n title_text = self.split_title(title_text, ARROWS_SPLITTER)\n used_delimeter = True\n\n # split title with :\n if not used_delimeter and ':' in title_text:\n title_text = self.split_title(title_text, COLON_SPLITTER)\n used_delimeter = True\n\n title = MOTLEY_REPLACEMENT.replaceAll(title_text)\n return title", "def print_title(title):\n\n print(\"\\n\" + title)\n print(\"=\" * len(title))", "def format_rich_quote(rich_text_quote):\n rich_text = format_rich_text(rich_text_quote)\n return \"> \" + \"\\n> \".join(rich_text.split(\"\\n\")) + \"\\n\"", "def SearchableText(self):\n ctool = getToolByName(self, 'portal_cpscalendar')\n if getattr(ctool, 'event_fulltext_index', False):\n return '%s %s' % (self.title, self.description)\n return ''", "def __str__(self):\n t = Template(\n \"\"\"\n <h4>$title</h4>\n $imgs\n $footnotes\n <hr/>\"\"\")\n # Return result.\n return t.substitute({\n \"title\": self.title,\n \"imgs\": self.render_images(),\n \"footnotes\": self.render_footnotes()\n })", "def visit_title_reference(self, node):\n self.body.append('\\\\emph{\\\\textbf{')", "def render(resolve_unicode,\n title_force_uppercase,\n msdos_eol_style,\n output_encoding,\n omit_fields=[]):", "def inject_title(self,template,title):\n return re.sub('TITLE',title,template)", "def format_screen(self,str):\n # Paragraph continue\n par_re = re.compile(r'\\\\$',re.MULTILINE)\n str = par_re.sub('',str)\n return str", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def title_content(label=\"A title\"):\n return {'label':label}", "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def title_p(self):\n self.run_command('title_p')", "def title(self):\n # Use the first line of the articles text as title, if not title\n # exists.\n title = self._text[:min(32, self._text.find(\"\\n\"))]\n return title", "def numbered_title(self):\n return f\"{self.title}\"", "def numbered_title(self):\n return f\"{self.title}\"", "def render_text(self):\n if self.text_type == 1:\n return markdown.markdown(self.text)\n else:\n return self.text", "def dumps(self):\n\n if not self.numbering:\n num = '*'\n else:\n num = ''\n\n string = Command(self.latex_name + num, self.title).dumps()\n string += '%\\n' + self.dumps_content()\n\n return string", "def processTitle(title):\n\n cleaned = re.sub(r'[@#\\\"]+', '', title.lower().strip())\n cleaned = re.sub(r'\\(\\d{4}.*\\)', '', cleaned)\n cleaned = re.sub(r':.+', '', cleaned).strip()\n return cleaned", "def show(self) -> str:\n return f'[{self.font}]{self.text}[{self.font}]' if self.font else self.text" ]
[ "0.67517006", "0.6623557", "0.64947814", "0.6347113", "0.6307539", "0.621596", "0.6210496", "0.60684896", "0.60674477", "0.60663515", "0.60421175", "0.6019259", "0.59935653", "0.59802073", "0.59790826", "0.595393", "0.5948588", "0.5939195", "0.590317", "0.5872387", "0.58521676", "0.5838757", "0.5835408", "0.5834278", "0.5832544", "0.58303535", "0.58232164", "0.58196765", "0.5818879", "0.581837", "0.58134586", "0.58123326", "0.57893336", "0.5777435", "0.5759935", "0.57562524", "0.57514244", "0.5736761", "0.5721786", "0.57156", "0.5693657", "0.56579095", "0.56524575", "0.56516933", "0.56416726", "0.5639766", "0.5630319", "0.56235963", "0.5607828", "0.55989367", "0.5597865", "0.5593643", "0.55868447", "0.5576239", "0.55753696", "0.5570099", "0.556155", "0.55568874", "0.55474097", "0.5539662", "0.5532411", "0.5531814", "0.5512975", "0.5479672", "0.54774815", "0.54768354", "0.5473451", "0.54682344", "0.5464578", "0.54521894", "0.5445922", "0.5437787", "0.54369724", "0.5422958", "0.5415149", "0.5415149", "0.5399354", "0.539413", "0.53890395", "0.5382889", "0.5382856", "0.53564143", "0.535306", "0.53529805", "0.5352455", "0.5347083", "0.5333787", "0.5333257", "0.5332394", "0.5331696", "0.53306514", "0.53304696", "0.53293514", "0.5327383", "0.53269297", "0.53269297", "0.53238297", "0.53169096", "0.5314785", "0.5314103" ]
0.5773666
34
Return tuple of text in edit format and bool validity, using edit format option
def formatEditText(self, storedText): format = globalref.options.strData('EditDateFormat', True) try: return (GenDate(storedText).dateStr(format), True) except GenDateError: return (storedText, not storedText)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def reformat(ctx):\n pass", "def task_format_check():\n return {\"actions\": [[\"black\", HERE, \"--check\"]], \"verbosity\": 1}", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def formatted(s):\n matches = re.findall(_format_re, normalize(s))\n if len(matches) == 1 and matches[0][0] != '':\n return matches[0][0]\n def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):\n if txt_none != '':\n return FORMAT_NONE, txt_none\n elif txt_sw != '':\n return FORMAT_SW, txt_sw\n elif txt_rem != '':\n return FORMAT_REM, txt_rem\n elif txt_em != '':\n return FORMAT_EM, txt_em\n elif txt_a != '':\n return FORMAT_A, txt_a\n return [to_fmt(*m) for m in matches]", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def test_format_permissions_and_docstring(self):\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\"],\n {\"some\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"permission formatted string\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\"\n ),\n )\n\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\", \"another permission\"],\n {\"some\": \"docstring\", \"another\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"- permission formatted string\\n\"\n \"- another permission\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\\n\"\n \"- **another** : docstring\"\n ),\n )", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def text(value):\n return True", "def test_is_entry_formatted(self):\n\n valid_formats = test_case_data.get('valid_formats')\n for i, valid_entry in enumerate(test_case_data.get('valid_entries')):\n entry = [value.strip() for value in valid_entry.split(',')]\n format_fields = valid_formats[i].split(',')\n valid = self.parser._is_entry_formatted(entry, format_fields)\n self.assertTrue(valid, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [value.strip() for value in invalid_entry.split(',')]\n for f in valid_formats:\n format_fields = f.split(',')\n entry_dict = self.parser._is_entry_formatted(entry, format_fields)\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def _check_style(file_path, clang_format_bin):\n with open(file_path, 'r') as f:\n is_valid_header = f.read().startswith(CppFormatter.standard_header)\n\n cmd = [\n clang_format_bin,\n \"-style=file\",\n \"-output-replacements-xml\",\n file_path,\n ]\n result = subprocess.check_output(cmd).decode(\"utf-8\")\n if \"<replacement \" in result:\n is_valid_style = False\n else:\n is_valid_style = True\n return (is_valid_style, is_valid_header)", "def info(level):\n if level == 'basic':\n string = _(\"Basic markup\")\n text = _(\"Only basic text tags are available in this input field.\")\n elif level == 'rich':\n string = _(\"Rich markup\")\n text = _(\"Rich and basic text tags are available in this input field.\") \n elif level == 'full':\n string = _(\"Full markup\")\n text = _(\"Every tags are available in this input field.\") \n elif level == 'none':\n string = _(\"No markup\")\n text = _(\"No tags are available in this input field.\") \n\n if level != 'none':\n text = text + \" \" + _(\"Check the markup reminder in related documentation for a description of these tags.\")\n\n return '<span class=\"help\" title=' + quoteattr(text) \\\n + '><img src=\"' + settings.STATIC_MEDIA_URL \\\n + 'images/savane/common/misc.default/edit.png' \\\n + ' border=\"0\" class=\"icon\" alt=\"\" />' \\\n + string + '</span>'", "def fancyString(inVal, correctOutput, funcOutput):\r\n checkCorrect = \"Correct = \" + u'\\u2713'*(funcOutput == correctOutput) + 'X'*(funcOutput != correctOutput)\r\n # Check mark code from site below:\r\n # https://stackoverflow.com/questions/16676101/print-the-approval-sign-check-mark-u2713-in-python\r\n return \"Input(s) = {:<15} Output = {:<25} Your Output = {:<35} \".format(str(inVal), str(correctOutput), str(funcOutput)) + checkCorrect", "def human_readable(self):\n if self.no_flags_set():\n return \"no flags set\"\n else:\n flag_desc = []\n for name in (\"bookmarked\", \"for_validation\", \"candidate\", \"final causative\"):\n if getattr(self, \"flag_%s\" % name.replace(\" \", \"_\")):\n flag_desc.append(name)\n for name in (\"visual\", \"validation\", \"molecular\", \"phenotype_match\", \"summary\"):\n field = getattr(self, \"flag_%s\" % name)\n if field and field != \"empty\":\n flag_desc.append(\"%s rating is %s\" % (name.split(\"_\")[0], field))\n return \", \".join(flag_desc)", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))", "def parse_edits(line):\n \n # Get edits inside brackets allowing parentheses\n edit_strings = re.findall(r'\\((.*?)\\)[,\\]]', line)\n \n edits = []\n for edit_string in edit_strings:\n # Splitting by comma is not enough. Some of the edits actually fix spacing\n # when commas are used, so we may can't use edit_string.split(', ')\n # For some reason, the unicode gold strings are enclosed in a list\n m = re.match(r'^(\\d+), (\\d+), (.*), \\[?(.*)\\]?$', edit_string)\n edit_items = [m.group(i) for i in range(1, 5)]\n \n # No way to handle this in regex\n if edit_items[3].endswith(']'):\n edit_items[3] = edit_items[3][:-1]\n \n # Cast the indices\n edit_items[0] = int(edit_items[0])\n edit_items[1] = int(edit_items[1])\n \n # Convert unicode-string-inside-string into actual unicode string\n edit_items[2] = codecs.decode(edit_items[2][2:-1], 'unicode_escape') or 'NIL'\n edit_items[3] = codecs.decode(edit_items[3][2:-1], 'unicode_escape') or 'NIL'\n \n edits.append(edit_items)\n \n return edits", "def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text", "def show_line_edit_dialog(text):\n dialog = QDialog()\n interface = lineEditEntryGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True, str(interface.lineEdit.text())\n else:\n return False, \"\"", "def as_tuple(self, selected: bool) -> tuple:\n if self.check_ans:\n current = self.num.ljust(2) + self.answer\n else:\n current = self.num.ljust(2)+self.current.rjust(1)\n return ((\n \"fg:\"+self.style_fg + \" bg:\" # apply foreground color\n + (self.style_bg_selected if selected else self.style_bg),\n current\n if self.answer != \".\" else \"▐\" + \"█\" + \"▌\"), ('bg:#fefefe fg:#000', \"│\"))", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def hints(s):\n if s == 'hello':\n # string, color, bold\n return (' World', 35, False)\n return None", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_data_from_nonformat_text():\n pass", "def FormatYesNo(value):\n if value:\n return u'Yes'\n else:\n return u'No'", "def validateWorkFormat(format):\n\n if not(format):\n return \"You must select a work format.\"", "def rich(text):\n return full(text, False)", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def change_prompt_format(self, arg, **_):\n if not arg:\n message = 'Missing required argument, format.'\n return [(None, None, None, message)]\n\n self.prompt_format = self.get_prompt(arg)\n return [(None, None, None, \"Changed prompt format to %s\" % arg)]", "def test_command_edit_info_boolean_flags():\n def f(inputfile):\n with tempfile.NamedTemporaryFile() as tmp:\n shutil.copy(inputfile, tmp.name)\n\n for flag in (\"write_protected\", \"synchronized\", \"cleaned\"):\n for true_value, false_value in ((\"1\", \"0\"),\n (\"yes\", \"no\"),\n (\"YES\", \"No\"),\n (\"true\", \"false\"),\n (\"tRuE\", \"FaLsE\")):\n wozardry.parse_args([\"edit\", \"-i\", \"%s:%s\" % (flag, true_value), tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[flag] == True\n wozardry.parse_args([\"edit\", \"-i\", \"%s:%s\" % (flag, false_value), tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[flag] == False\n f(kValid1)\n f(kValid2)", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def get_help_text(self):\n requirements = \"Your password must contain at least: {min_length} \" \\\n \"character(s), {min_uppercase} uppercase letter(s), \" \\\n \"{min_lowercase} lowercase letter(s) and \" \\\n \"{min_digits} digit(s). \".format(\n min_length=self.min_length,\n min_uppercase=self.min_uppercase,\n min_lowercase=self.min_lowercase,\n min_digits=self.min_digits)\n return requirements", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def help_for(self, flag: str) -> Tuple[str, str]:\n # Obtain arg obj\n if flag not in self.flags:\n err = \"{!r} is not a valid flag for this context! Valid flags are: {!r}\" # noqa\n raise ValueError(err.format(flag, self.flags.keys()))\n arg = self.flags[flag]\n # Determine expected value type, if any\n value = {str: \"STRING\", int: \"INT\"}.get(arg.kind)\n # Format & go\n full_names = []\n for name in self.names_for(flag):\n if value:\n # Short flags are -f VAL, long are --foo=VAL\n # When optional, also, -f [VAL] and --foo[=VAL]\n if len(name.strip(\"-\")) == 1:\n value_ = (\"[{}]\".format(value)) if arg.optional else value\n valuestr = \" {}\".format(value_)\n else:\n valuestr = \"={}\".format(value)\n if arg.optional:\n valuestr = \"[{}]\".format(valuestr)\n else:\n # no value => boolean\n # check for inverse\n if name in self.inverse_flags.values():\n name = \"--[no-]{}\".format(name[2:])\n\n valuestr = \"\"\n # Tack together\n full_names.append(name + valuestr)\n namestr = \", \".join(sorted(full_names, key=len))\n helpstr = arg.help or \"\"\n return namestr, helpstr", "def __check_format(node, lint_ctx, profile: str, allow_ext=False):\n if \"format_source\" in node.attrib and (\"ext\" in node.attrib or \"format\" in node.attrib):\n lint_ctx.warn(\n f\"Tool {node.tag} output '{node.attrib.get('name', 'with missing name')}' should use either format_source or format/ext\",\n node=node,\n )\n if \"format_source\" in node.attrib:\n return True\n if node.find(\".//action[@type='format']\") is not None:\n return True\n # if allowed (e.g. for discover_datasets), ext takes precedence over format\n fmt = None\n if allow_ext:\n fmt = node.attrib.get(\"ext\")\n if fmt is None:\n fmt = node.attrib.get(\"format\")\n if fmt == \"input\":\n message = f\"Using format='input' on {node.tag} is deprecated. Use the format_source attribute.\"\n if Version(str(profile)) <= Version(\"16.01\"):\n lint_ctx.warn(message, node=node)\n else:\n lint_ctx.error(message, node=node)\n\n return fmt is not None", "def get_opt_formatted(self, command):\n if \"formatted\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"formatted\"]\n else:\n return CommandDict.DEFAULT_OPT_FORMATTED", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def verify_diagnostics_and_usage_text():\r\n msg, status = \"\", True\r\n try:\r\n sleep(10)\r\n if g.platform == 'android':\r\n sleep(3)\r\n 'verify end user license agreement label'\r\n flag1,msg = element_textvalidation('Diagnostics_usage_lbl','Diagnostics and Usage')\r\n sleep(4) \r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_Diagnostcs_nd_usage_txt)\r\n 'verify end user license agreement label'\r\n flag2,msg = element_textvalidation('Diagnostics_usage_txt',text_to_verify)\r\n \r\n flag = False if not (flag1 and flag2) else True\r\n else:\r\n\r\n \r\n 'Verify diagnostics usage text'\r\n flag1 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_lbl'))\r\n 'Verify diagnostics usage text'\r\n flag2 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_txt'))\r\n \r\n sleep(4) \r\n \r\n flag = False if not (flag1 and flag2) else True\r\n if not flag:\r\n return False, msg\r\n else:\r\n print \"License agreement screen name is displayed properly\"\r\n \r\n \r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return False, msg\r\n return True, msg", "def _format_action(self, action):\n parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)\n if action.nargs == argparse.PARSER:\n parts = \"\\n\".join(parts.split(\"\\n\")[1:])\n return parts", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def _engine_option_string_and_comment(option: engine.Option, value: engine.ConfigValue) -> Tuple[str, str]:\n if value is None:\n value = ''\n name_equals_val = f'{option.name}={value}'\n if option.type == 'check' or option.type == 'string' or option.type == 'button':\n return (name_equals_val, f'type={option.type}')\n if option.type == 'spin':\n return (name_equals_val, f'type=spin, min={option.min}, max={option.max}')\n if option.type == 'combo':\n return (name_equals_val, f'type=combo, var={option.var}')\n return (name_equals_val, 'type=unknown')", "def TEXT(number, format_type):\n raise NotImplementedError()", "def test_form_help_text_is_correct(self):\n # https://stackoverflow.com/questions/24344981/how-to-change-help-\n # text-of-a-django-form-field\n\n # Above link helped figure out how to access help_text.\n self.assertEqual(\n self.form.fields[\"texture\"].help_text,\n \"One word descriptions seperated by commas.\",\n )", "def reformat():\n toolkit.reformat()", "def __verify_plot_options(self, options_str):\n default_line = '-'\n default_marker = ''\n default_colour = 'k'\n\n # Split str into chars list\n options_split = list(options_str)\n\n # If 0, set defaults and return early\n if len(options_split) == 0:\n return [default_line, default_marker, default_colour]\n\n # If line_style given, join the first two options if applicable\n # (some types have 2 characters)\n for char in range(0, len(options_split) - 1):\n # If char is '-' (only leading character in double length option)\n if options_split[char] == '-' and len(options_split) > 1:\n # If one of the leading characters is valid\n if options_split[char + 1] == '-' or \\\n options_split[char + 1] == '.':\n # Join the two into the first\n options_split[char] = options_split[char] \\\n + options_split[char + 1]\n # Shuffle down the rest\n for idx in range(char + 2, len(options_split)):\n options_split[idx - 1] = options_split[idx]\n # Remove duplicate extra\n options_split.pop()\n\n # If any unknown, throw error\n for option in options_split:\n if option not in self.__line_styles and \\\n option not in self.__marker_styles and \\\n option not in self.__colour_styles:\n error_string = \"Unknown character entered: '{0}'\"\n raise ValueError(error_string.format(option))\n\n ##############################\n # Verify Line Style\n ##############################\n line_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n line_style_index = 0\n for option in options_split:\n if option in self.__line_styles:\n line_style_count = line_style_count + 1\n line_style_index = self.__line_styles.index(option)\n\n # If more than one, throw error\n if line_style_count > 1:\n raise ValueError(\n \"Too many line style arguments given. Only one allowed\")\n # If none, set as solid\n elif line_style_count == 0 or not any(\n item in options_split for item in self.__line_styles):\n output_line = default_line\n # If one, set as given\n else:\n output_line = self.__line_styles[line_style_index]\n ##############################\n\n ##############################\n # Verify Marker Style\n ##############################\n marker_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n marker_style_index = 0\n for option in options_split:\n if option in self.__marker_styles:\n marker_style_count = marker_style_count + 1\n marker_style_index = self.__marker_styles.index(option)\n\n # If more than one, throw error\n if marker_style_count > 1:\n raise ValueError(\n \"Too many marker style arguments given. Only one allowed\")\n # If none, set as no-marker\n elif marker_style_count == 0 or not any(\n item in options_split for item in self.__marker_styles):\n output_marker = default_marker\n # If one, set as given\n else:\n output_marker = self.__marker_styles[marker_style_index]\n # If marker set and no line given, turn line to no-line\n if line_style_count == 0 or not any(\n item in options_split for item in self.__line_styles):\n output_line = ''\n ##############################\n\n ##############################\n # Verify Colour Style\n ##############################\n colour_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n colour_style_index = 0\n for option in options_split:\n if option in self.__colour_styles:\n colour_style_count = colour_style_count + 1\n colour_style_index = self.__colour_styles.index(option)\n\n # If more than one, throw error\n if colour_style_count > 1:\n raise ValueError(\n \"Too many colour style arguments given. Only one allowed\")\n # If none, set as black\n elif colour_style_count == 0 or not any(\n item in options_split for item in self.__colour_styles):\n output_colour = default_colour\n # If one, set as given\n else:\n output_colour = self.__colour_styles[colour_style_index]\n ##############################\n\n return [output_line, output_marker, output_colour]", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def validate_format(self):\n raise NotImplementedError()", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def _validate_performatives(performative: str) -> Tuple[bool, str]:\n # check performative is not a reserved name\n if _is_reserved_name(performative):\n return (\n False,\n \"Invalid name for performative '{}'. This name is reserved.\".format(\n performative,\n ),\n )\n\n # check performative's format\n if not _is_valid_regex(PERFORMATIVE_REGEX_PATTERN, performative):\n return (\n False,\n \"Invalid name for performative '{}'. Performative names must match the following regular expression: {} \".format(\n performative, PERFORMATIVE_REGEX_PATTERN\n ),\n )\n\n return True, \"Performative '{}' is valid.\".format(performative)", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def _format (color, style=''):\n _format = QtGui.QTextCharFormat()\n if color != '':\n _format.setForeground(getattr(QtCore.Qt, color))\n if 'bold' in style:\n _format.setFontWeight(QtGui.QFont.Bold)\n if 'italic' in style:\n _format.setFontItalic(True)\n return _format", "def get_replacement():\n run_linter_throw(\"path/to/file\",\n \"{s}\\n{m} Text{e}\",\n style,\n whitelist=[\"headerblock/desc_space\"])", "def edit_form_entry_help_text_extra(cls):\n return \"\"\"\n <ul class=\"{container_class}\">\n {edit_option_html}\n <li><a href=\"{delete_url}\">\n <span class=\"{delete_option_class}\"></span> {delete_text}</a>\n </li>\n </ul>\n <input type=\"hidden\" value=\"{form_element_position}\"\n name=\"form-{counter}-position\"\n id=\"id_form-{counter}-position\"\n class=\"form-element-position\">\n <input type=\"hidden\" value=\"{form_element_pk}\"\n name=\"form-{counter}-id\" id=\"id_form-{counter}-id\">\n \"\"\".format(\n container_class=cls.form_list_container_class,\n edit_option_html=\"{edit_option_html}\",\n delete_url=\"{delete_url}\",\n delete_option_class=cls.form_delete_form_entry_option_class,\n delete_text=\"{delete_text}\",\n form_element_position=\"{form_element_position}\",\n counter=\"{counter}\",\n form_element_pk=\"{form_element_pk}\",\n )", "def extension (formatStr):\n assert False, \"TODO:\"", "def _generateReadOnly(self, obj, **args):\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'readonly'\n if self._script.utilities.isReadOnlyTextArea(obj):\n result.append(self._script.formatting.getString(**args))\n return result", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def is_valid(self):\r\n for lineedit in self.lineedits:\r\n if lineedit in self.validate_data and lineedit.isEnabled():\r\n validator, invalid_msg = self.validate_data[lineedit]\r\n text = to_text_string(lineedit.text())\r\n if not validator(text):\r\n QMessageBox.critical(self, self.get_name(),\r\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\r\n QMessageBox.Ok)\r\n return False\r\n return True", "def test_incorrect_format_1(self):\n changelog = changelog_temp.format(\n before_changelog=\"## [Not yet released]\\n\\n### Added\\n\\n- Added a new feature\\n\"\n )\n with pytest.raises(ValueError):\n parse_changelog(changelog)", "def test_match_entry_to_format(self):\n\n # matches valid entries with valid formats\n for valid_entry in test_case_data.get('valid_entries'):\n entry = [e.strip() for e in valid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertTrue(entry_dict, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [e.strip() for e in invalid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def is_text_editable(path):\n return False", "def _format_answer(self, text):\n text = str(text).replace('\\n', ' ')\n answer_width = 70\n pretty_text = '\\n\\t'.join(textwrap.wrap(text, answer_width))\n\n return pretty_text", "def flags(self, f):\n if f.is_inlined:\n return \" (inlined)\"\n return \"\"", "def text_to_display(level):\n if level == \"html\":\n return html_answers, html_text\n elif level == \"css\":\n return css_answers, css_text\n elif level == \"python\":\n return python_answers, python_text", "def editorForTyp(typ):\n\n if typ == \"quint32\":\n return (\"QSpinBox\", \"setValue\", \"value\")\n elif typ == \"QString\":\n return (\"QLineEdit\", \"setText\", \"text\")\n elif typ == \"bool\":\n return (\"QCheckBox\", \"setChecked\", \"isChecked\")\n return (None, None, None)", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def show_fields(*fields):\n\n fields = filter( lambda x: x, fields )\n target_len = max( len(name) for name, value in fields ) + 2\n for name, value in fields:\n line = name + ':' + \" \" * (target_len - len(name))\n if type(value) == bool:\n line += color_text(\"Yes\", 'green') if value else color_text(\"No\", 'red')\n else:\n line += str(value)\n print line", "def testCheckRequiredFormat(self):\n plugin = gdrive_synclog.GoogleDriveSyncLogTextPlugin()\n\n file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()\n file_system_builder.AddFile('/file.txt', (\n b'2018-01-24 18:25:08,454 -0800 INFO pid=2376 7780:MainThread '\n b'logging_config.py:295 OS: Windows/6.1-SP1\\n'))\n\n file_entry = file_system_builder.file_system.GetFileEntryByPath('/file.txt')\n\n parser_mediator = self._CreateParserMediator(None, file_entry=file_entry)\n\n file_object = file_entry.GetFileObject()\n text_reader = text_parser.EncodedTextReader(file_object)\n text_reader.ReadLines()\n\n result = plugin.CheckRequiredFormat(parser_mediator, text_reader)\n self.assertTrue(result)", "def formatted(self) -> str:\r\n ...", "def ISTEXT(value):\n return isinstance(value, (basestring, AltText))", "def text_editor():\n return True", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text", "def validate(self, txt, pos):\n state, rpos = qt.QDoubleValidator.validate(self, txt, pos)\n if txt.length() == 0:\n state = qt.QValidator.Acceptable\n return state, rpos" ]
[ "0.74416876", "0.7370665", "0.71592844", "0.71025413", "0.71025413", "0.70363986", "0.7028924", "0.68846506", "0.6550997", "0.64535177", "0.6392951", "0.63109875", "0.6300976", "0.6188802", "0.5835225", "0.5771935", "0.5735613", "0.5613498", "0.5609731", "0.56050605", "0.5547816", "0.5309345", "0.5299432", "0.5209862", "0.5204991", "0.5204991", "0.5204991", "0.5204991", "0.5204991", "0.52003527", "0.51723677", "0.51647925", "0.5160046", "0.5119939", "0.51153874", "0.5094579", "0.5093168", "0.50772464", "0.5077213", "0.5065839", "0.5057548", "0.50492096", "0.501106", "0.50011784", "0.49889597", "0.49738917", "0.49718243", "0.49464372", "0.49350786", "0.4930196", "0.49300286", "0.492046", "0.4900359", "0.4896047", "0.4889429", "0.48845533", "0.4872616", "0.48598924", "0.48541707", "0.4853287", "0.48496547", "0.48475158", "0.48429024", "0.48423597", "0.4839126", "0.48285526", "0.48253557", "0.48237336", "0.48226026", "0.48198703", "0.48191673", "0.48190477", "0.4813445", "0.48076433", "0.4807577", "0.48015848", "0.48007897", "0.47960508", "0.47933918", "0.47912502", "0.47794497", "0.47762585", "0.47564918", "0.47562456", "0.4751227", "0.47500658", "0.47482246", "0.47376072", "0.47373587", "0.47357956", "0.47351882", "0.4733689", "0.4732709", "0.47288054", "0.47235525", "0.47193804", "0.4715197", "0.4714657", "0.4711986", "0.47118586" ]
0.6026202
14
Return tuple of stored text from edited text and bool validity, using edit format option
def storedText(self, editText): format = globalref.options.strData('EditDateFormat', True) try: return (repr(GenDate().setFromStr(editText, format)), True) except GenDateError: return (editText, not editText and not self.isRequired)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def get_data_from_nonformat_text():\n pass", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def on_edit(self, event, text):\n return None", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text", "def reformat(ctx):\n pass", "def main(text,result=\"latex\",check_text=True,index_project='',file_name=''): # TODO detect and make a warning for genuine latex marks\n if isinstance(text,str):\n text = text.split('\\n')\n \n if check_text:\n check(text)\n \n print(text)\n \n ### managing placeholders\n text = parsers['v'].main(text)\n \n ### saving names\n if index_project:\n indexer.parse(text,index_project,file_name)\n \n \n for i in range(len(text)):\n line = text[i]\n ### managing end of line\n line = line.replace(\" ,,\",\"\\\\\\\\\")\n \n while line.count(opening_mark):\n first_part, mark, late_part = line.partition(',;')\n if not late_part:\n break\n late_part, text = parsers[late_part[0]].main(late_part = late_part,\n text=text,\n result=result,\n line_nb = i)\n line = first_part + late_part\n text[i] = line\n \n return '\\n'.join(text)", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def get_edit_text(self):\n # grab edit page\n response = self._get_page('edit.php')\n html = response.text\n # parse out existing plan\n soup = bs4.BeautifulSoup(html, 'html5lib')\n plan = soup.find('textarea')\n if plan is None:\n raise PlansError(\"Couldn't get edit text, are we logged in?\")\n else:\n plan = u'' + plan.contents[0]\n # prepending the empty string somehow prevents BS from\n # escaping all the HTML characters (weird)\n assert type(plan) == str\n # convert to CRLF line endings\n plan = convert_endings(plan, 'CRLF')\n # parse out plan md5\n md5sum = soup.find('input',\n attrs={'name': 'edit_text_md5'}).attrs['value']\n # also, explicitly compute the hash, for kicks\n assert md5sum == plans_md5(plan)\n # verify that username has not changed\n assert self.username == self.parser.username\n return plan, md5sum", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def parse_edits(line):\n \n # Get edits inside brackets allowing parentheses\n edit_strings = re.findall(r'\\((.*?)\\)[,\\]]', line)\n \n edits = []\n for edit_string in edit_strings:\n # Splitting by comma is not enough. Some of the edits actually fix spacing\n # when commas are used, so we may can't use edit_string.split(', ')\n # For some reason, the unicode gold strings are enclosed in a list\n m = re.match(r'^(\\d+), (\\d+), (.*), \\[?(.*)\\]?$', edit_string)\n edit_items = [m.group(i) for i in range(1, 5)]\n \n # No way to handle this in regex\n if edit_items[3].endswith(']'):\n edit_items[3] = edit_items[3][:-1]\n \n # Cast the indices\n edit_items[0] = int(edit_items[0])\n edit_items[1] = int(edit_items[1])\n \n # Convert unicode-string-inside-string into actual unicode string\n edit_items[2] = codecs.decode(edit_items[2][2:-1], 'unicode_escape') or 'NIL'\n edit_items[3] = codecs.decode(edit_items[3][2:-1], 'unicode_escape') or 'NIL'\n \n edits.append(edit_items)\n \n return edits", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_edits(text):\n edit_p = re.compile(\"(?P<open><edit.*?>)(?P<inner>.*?)(?P<close></edit>)\")\n corr_p = re.compile(\"<corrections>.*?</corrections>\")\n edits = []\n\n offset = 0\n\n for m in re.finditer(edit_p, text):\n # Make an edit object\n edit_text = \"\".join(m.groups())\n edit = ET.XML(m.group(0))\n\n # Set the bounds of the original text and adjust offset\n inner_string = m.group('inner') \n start = m.start() - offset\n corr_m = re.search(corr_p, inner_string)\n \n if corr_m: # Replacement/insertion have a correction\n offset += len(corr_m.group(0)) \n \n if not inner_string.startswith(\"<empty/>\"):\n end = start + corr_m.start()\n else:\n offset += len(\"<empty/>\") # It is \"\" in plain text\n end = start\n else:\n # Deletions may not have a correction\n if not inner_string.startswith(\"<empty/>\"):\n end = start + len(inner_string)\n else: # Unspecified error <empty/> is \"\" in plain text\n end = start\n offset += len(inner_string)\n\n\n edit.set(\"start\", \"%d\" % start) \n edit.set(\"end\", \"%d\" % end)\n\n offset += len(m.group('open')) + len(m.group('close'))\n \n\n # Make the original text a subelement of <edit>\n # Original text may be a string or <empty/> element.\n original = ET.SubElement(edit, \"original\")\n \n if edit.text:\n original.text = edit.text\n edit.text = \"\"\n else:\n empty = edit.find('empty')\n \n try:\n edit.remove(empty)\n original.append(empty)\n except Exception as e:\n pass\n \n edits.append(edit)\n\n return edits", "def refang(self, text: str):", "def edit_text(self, _, val):\n t_edit = text_editor.TextEditor(val, \"value\")\n t_edit.execute = self.execute", "def handle_data(self, text):\n if self.bankacctfrom:\n if self.bankid:\n self.compte['banque'] = text.strip()\n self.bankid = False\n if self.branchid:\n self.compte['guichet'] = text.strip()\n self.branchid = False\n if self.acctid:\n self.compte['compte'] = text.strip()\n self.acctid = False\n if self.banktranlist:\n if self.stmttrn:\n if self.dtposted:\n self.ecriture_tmp['date'] = datetime.strptime(text.strip(), \"%Y%m%d\")\n self.dtposted = False\n if self.trnamt:\n self.ecriture_tmp['montant'] = locale.atof(text.strip())\n self.trnamt = False\n if self.trntype:\n self.ecriture_tmp['type'] = text.strip()\n self.trntype = False\n if self.name:\n self.ecriture_tmp['name'] = text.strip()\n self.name = False\n if self.memo:\n self.ecriture_tmp['memo'] = text.strip()\n self.memo = False", "def get_mark(text, short):\n\n line = text.readline()\n\n # check that the line begins with a valid entry type\n if not short and not re.match(r'^\\s*(text|mark) = \"', line):\n raise ValueError('Bad entry: ' + line)\n\n # read until the number of double-quotes is even\n while line.count('\"') % 2:\n next_line = text.readline()\n\n if not next_line:\n raise EOFError('Bad entry: ' + line[:20] + '...')\n\n line += next_line\n if short:\n pattern = r'^\"(.*?)\"\\s*$'\n else:\n pattern = r'^\\s*(text|mark) = \"(.*?)\"\\s*$'\n entry = re.match(pattern, line, re.DOTALL)\n\n return entry.groups()[-1].replace('\"\"', '\"')", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def getText(self):", "def get_text(text_input):\r\n return text_input", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def rich(text):\n return full(text, False)", "def text(value):\n return True", "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def edit():", "def get_text_editor_input(initial_msg):\n EDITOR = os.environ.get('EDITOR', 'vi')\n CROP_MARK = ('\\n\\nAnything above this line will be ignored:\\n' +\n ('-' * 34) + '>8' + ('-' * 34) + '\\n')\n\n wrapper = TextWrapper(replace_whitespace=False, drop_whitespace=False)\n initial_msg = '\\n'.join(wrapper.wrap(initial_msg))\n initial_msg += CROP_MARK\n\n with tempfile.NamedTemporaryFile(suffix='.md') as temp:\n temp.write(initial_msg.encode('utf-8'))\n temp.flush() # Write buffer to the file\n subprocess.call([EDITOR, temp.name])\n\n # The pointer was already after the initial message, but we return to\n # the beginning just in case the user added content before the mark\n temp.seek(0)\n return temp.read().decode('utf-8').split(CROP_MARK, 1)[1].strip()", "def text_example():\n \n text_store = \"01000001011000010010000001000010011000100000110100001010001100010011001000110011\"\n text.delete('1.0', tk.END) \n text.insert(tk.END, text_store) \n box=tk.Tk()\n m = tk.Message(box, text=\"You should be able to save this file and open it in a text editor like Notepad or Nano to read it. If you edit the values you may find it does not display properly as text. Unchanged, it should be interpreted by a text editor as:\\n\\nAa Bb\\n123\\n\\nAs the file was made on a Windows machines you may find other systems display the line breaks differently.\")\n m.config(padx=50, pady=50, width=350)\n m.pack()", "def is_text_editable(path):\n return False", "def show_line_edit_dialog(text):\n dialog = QDialog()\n interface = lineEditEntryGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True, str(interface.lineEdit.text())\n else:\n return False, \"\"", "def on_text(self, instance, value):\n if not EVENTS['IS_OBJ']:\n EVENTS['EDITOR_SAVED'] = False\n\n if value:\n self.valid_text = True\n EVENTS['IS_RAM_EMPTY'] = False\n else:\n self.valid_text = False", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def edit_once(self, text):\n return self._edit_engine(text, break_on_success=True)", "def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text", "def edit_google_vision_text(self,text):\n s1=text\n try:\n log_info(\"Correcting google vision text to remove extra spacing\",MODULE_CONTEXT)\n i=0\n while(i<len(text)):\n s1=text\n if text[i] in [\"/\",\"।\",'।' ,':','|',\",\" ,'०',\"]\",\"-\",\")\",\"}\"] and text[i-1]==\" \": \n text=text[:i-1]+text[i:]\n if i > 0 :\n if text[i-1] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i]==\" \":\n text=text[:i]+text[i+1:]\n elif text[i] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i+1]==\" \":\n text=text[:i+1]+text[i+2:]\n i=i+1\n except Exception as e:\n log_exception(\"Exception while correcting google vision text\", MODULE_CONTEXT, e)\n return s1\n return text", "def process_text(self, text, language):", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def get_row_input_text(self, row_idx):\n return self.row_items[row_idx][1].get()", "def updateText(widget,text,format=''):\n # autorecognition\n if format not in ['plain','html','rest']:\n if type(text) is str and text.startswith('..'):\n format = 'rest'\n\n # conversion\n if format == 'rest' and pf.options.rst2html:\n html = utils.rst2html(text)\n if html[:10] == text[:10]:\n #print \"CONVERSION TO HTML FAILED\"\n text += \"\\n\\nNote: This reStructuredText is displayed as plain text because it could not be converted to html. If you install python-docutils, you will see this text (and other pyFormex messages) in a much nicer layout!\\n\"\n else:\n text = html\n\n # We leave the format undefined, because we are not sure\n # that the conversion function (docutils) is available\n # and always produces good results\n format = ''\n\n if format == 'plain':\n widget.setPlainText(text)\n elif format == 'html':\n widget.setHtml(text)\n else:\n # As a last rescue, try QT4's autorecognition\n widget.setText(text)", "def modified_flag(self, event):\n text = self.get_current()\n text.modified = 1", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def get_text_from_editor():\n with tempfile.NamedTemporaryFile(suffix='.tmp', mode='w+t') as f:\n # Create a temporary file with instructions on describing bug\n f.write(message + '\\n\\n')\n f.flush()\n # Open the editor and allow the user to type\n editor = os.environ.get('EDITOR', 'vim')\n subprocess.call([editor, f.name])\n # Read and clean the file\n f.seek(0)\n text = ''.join([line.lstrip() for line in f.readlines()\n if line and not line.lstrip().startswith('#')])\n return '\\n'.join(textwrap.wrap(text, width=100))", "def storeTextEditValue(self):\n\n\t\tcategory, attr = self.getWidgetMeta(self.sender())\n\t\tvalue = self.sender().toPlainText()\n\t\tself.storeValue(category, attr, value)", "def _editorText(self):\n if self.__lineEditKind:\n return self._editor.text()\n else:\n return self._editor.currentText()", "def _hidden_in_unicode(self, txt):", "def fix_document(key, value, _format, _meta):\n if key == \"Link\":\n url = value[2][0]\n if url.startswith(\"user-manual\") or url.startswith(\"developers-guide\"):\n # Return the link text\n return value[1]\n # Reformat the text inside block quotes\n elif key == \"BlockQuote\":\n try:\n first_string = value[0][\"c\"][0][\"c\"]\n if first_string == \"[!NOTE]\":\n value[0][\"c\"][0] = Strong([Str(\"Note:\")])\n return BlockQuote(value)\n elif first_string == \"[!INFO]\":\n value[0][\"c\"][0] = Strong([Str(\"Info:\")])\n return BlockQuote(value)\n elif first_string == \"[!TIP]\":\n value[0][\"c\"][0] = Strong([Str(\"Tip:\")])\n return BlockQuote(value)\n elif first_string == \"[!WARNING]\":\n value[0][\"c\"][0] = Strong([Str(\"Warning:\")])\n return BlockQuote(value)\n elif first_string == \"[!ATTENTION]\":\n value[0][\"c\"][0] = Strong([Str(\"Attention:\")])\n return BlockQuote(value)\n except Exception:\n return\n return", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def edit_type(self, candidate, word):\n edit = [False] * 4\n correct = \"\"\n error = \"\"\n replaced = ''\n replacer = ''\n for i in range(min([len(word), len(candidate)]) - 1):\n if candidate[0:i + 1] != word[0:i + 1]: # inconsistency in the first (i + 1) characters of the two strings\n if candidate[i:] == word[i - 1:]:\n edit[1] = True # deletion\n correct = candidate[i - 1] # candidate[i - 1] is deleted and we get word\n error = ''\n replaced = candidate[i - 2]\n replacer = candidate[i - 2] + candidate[i - 1]\n break\n elif candidate[i:] == word[i + 1:]:\n edit[0] = True # insertion\n correct = ''\n error = word[i] # word[i] is redundant\n if i == 0:\n replacer = '@'\n replaced = '@' + error\n else:\n replacer = word[i - 1]\n replaced = word[i - 1] + error\n break\n if candidate[i + 1:] == word[i + 1:]:\n edit[2] = True # substitution\n correct = candidate[i]\n error = word[i]\n replaced = error\n replacer = correct\n break\n if candidate[i] == word[i + 1] and candidate[i + 2:] == word[i + 2:]:\n edit[3] = True # transposition\n correct = candidate[i] + candidate[i + 1]\n error = word[i] + word[i + 1]\n replaced = error\n replacer = correct\n break\n\n # string inversion\n candidate = candidate[::-1]\n word = word[::-1]\n\n for i in range(min([len(word), len(candidate)]) - 1):\n if candidate[0:i + 1] != word[0:i + 1]:\n if candidate[i:] == word[i - 1:]:\n edit[1] = True\n correct = candidate[i - 1]\n error = ''\n replaced = candidate[i - 2]\n replacer = candidate[i - 2] + candidate[i - 1]\n break\n elif candidate[i:] == word[i + 1:]:\n correct = ''\n error = word[i]\n if i == 0:\n replacer = '@'\n replaced = '@' + error\n else:\n replacer = word[i - 1]\n replaced = word[i - 1] + error\n edit[0] = True\n break\n if candidate[i + 1:] == word[i + 1:]:\n edit[2] = True\n correct = candidate[i]\n error = word[i]\n replaced = error\n replacer = correct\n break\n if candidate[i] == word[i + 1] and candidate[i + 2:] == word[i + 2:]:\n edit[3] = True\n correct = candidate[i] + candidate[i + 1]\n error = word[i] + word[i + 1]\n replaced = error\n replacer = correct\n break\n\n if word == candidate:\n return \"None\", '', '', '', ''\n if edit[0]:\n return EDIT_TYPE_INSERTION, correct, error, replaced, replacer\n elif edit[1]:\n return EDIT_TYPE_DELETION, correct, error, replaced, replacer\n elif edit[2]:\n return EDIT_TYPE_SUBSTITUTION, correct, error, replaced, replacer\n elif edit[3]:\n return EDIT_TYPE_TRANSPOSITION, correct, error, replaced, replacer", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def text_editor():\n return True", "def task_format_check():\n return {\"actions\": [[\"black\", HERE, \"--check\"]], \"verbosity\": 1}", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def stepText2Changed(build, step, text2):", "def retrieve_input():\r\n inputValue = simpleText.get(\"1.0\",\"end-1c\") #Our Variable\r\n #\"1.0\" = start from first character in the text widget\r\n #\"end-1c = delete the last character that Text creates every time\"\r\n return inputValue", "def on_idEdit_textChanged(self, txt):\n self.__generateDefaultCommitMessage()\n self.__updateOK()", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def element_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier))\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def reformat():\n toolkit.reformat()", "def process_raw_text(self, file_name, column_side):\n self.mvc_check()\n\n model_txt = None\n if column_side == LEFT_TEXT:\n model_txt = self.model.txt1\n elif column_side == RIGHT_TEXT:\n model_txt = self.model.txt2\n\n model_txt.open_raw(file_name)\n model_txt.process_raw()\n self.opened_txt[column_side] = True\n self.can_align = self.opened_txt[LEFT_TEXT] and self.opened_txt[RIGHT_TEXT]\n\n # Goldsmith\n model_txt.make_trie(column_side)\n model_txt.apply_goldsmith(1.1, 20, column_side)\n\n # Associate word for alignment if both text were opened\n if self.can_align:\n for view in self.views:\n view.end_task()\n view.change_task(\"Associating words\")\n self.model.associate_words(1.5)\n for view in self.views:\n view.end_task()\n\n # TODO : coherent saving to database using model.save_data\n\n return model_txt.str", "def handleFormatText(paragraphContent):\n # We tokenize and remove the stop word\n words = tokenizeWord(paragraphContent) \n \n stemWords = []\n # We loop on each word.\n for word in words:\n stemWord = STEMMER.stem(word)\n \n # Selection on a part of string.\n stemWord = re.sub(\"[*\\'\\.+:,\\`:/]\", '', stemWord)\n if stemWord.isdigit() or len(stemWord) < 2:\n continue\n \n stemWords.append(stemWord)\n my_r_string = stemWords.pop(0)\n for word in stemWords:\n my_r_string += \" \"+str(word)\n return my_r_string", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def get_text(self):\n\n if self.text: return self.text\n # retrieve from args and return if exists\n text = Settings.get_text() or None\n if text: \n self.text = text\n return text\n # prompt skip\n if not Settings.prompt(\"text\"): return None\n question = {\n 'type': 'input',\n 'name': 'text',\n 'message': 'Text:'\n }\n text = prompt(question)[\"text\"]\n # confirm text\n if not Settings.confirm(text): return self.get_text()\n self.text = text\n return self.text", "def read_plain_txt(input_fn: str) -> Tuple[List[str], List[str]]:\n\n with open(input_fn, 'r') as f:\n migrations = []\n queries = []\n mode = 'none'\n for line in f:\n stripped = line.strip()\n if len(stripped) == 0:\n continue\n if stripped.lower() == '== migrations':\n if mode != 'none':\n raise ValueError(f'Invalid {input_fn}: The migrations section should appear first.')\n mode = 'migrations'\n elif stripped.lower() == '== queries':\n if mode != 'migrations':\n raise ValueError(f'Invalid {input_fn}: The queries section should appear after the migrations section.')\n mode = 'queries'\n elif stripped[0] == '#':\n pass\n else:\n if mode == 'migrations':\n migrations.append(stripped)\n elif mode == 'queries':\n queries.append(stripped)\n else:\n pass\n return migrations, queries", "def on_lineEdit_textChanged(self, p0):\n # str_me = \"我爱我的祖国\"\n # self.lineEdit.setText(str_me) # 设置单行文本内容\n input_text = self.lineEdit.text()\n self.textEdit.setPlainText(input_text)\n # self.textEdit.setHtml(input_text) # 显示Html,如 <font color='red' size='20'>HELLO!</font>\n a = self.textEdit.toPlainText()\n print(a)", "def post_process_text(self, text):\n\t\treturn text", "def text(self) -> str:", "def editText(self, text, jumpIndex=None, highlight=None):\n try:\n import gui\n except ImportError, e:\n print 'Could not load GUI modules: %s' % e\n return text\n editor = gui.EditBoxWindow()\n return editor.edit(text, jumpIndex=jumpIndex, highlight=highlight)", "def alter_text_format(self):\n service = self.slides_service\n requests = [\n {\n 'updateParagraphStyle': {\n 'objectId': self.song_numbers_box_id,\n 'style': {\n 'alignment': 'CENTER'\n },\n 'fields': 'alignment'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.song_numbers_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.TITLE_FONT_SIZE, # numbers slightly larger than lyrics\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.left_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.FONT_SIZE,\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.right_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.FONT_SIZE,\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n }\n ]\n body = {\n 'requests': requests\n }\n response = service.presentations().batchUpdate(presentationId=self.presentation_id, body=body).execute()\n print(f'Updated the text style for shape with ID: {self.left_box_id}')\n return response", "def T(value):\n return (value if isinstance(value, basestring) else\n str(value) if isinstance(value, AltText) else \"\")", "def as_tuple(self, selected: bool) -> tuple:\n if self.check_ans:\n current = self.num.ljust(2) + self.answer\n else:\n current = self.num.ljust(2)+self.current.rjust(1)\n return ((\n \"fg:\"+self.style_fg + \" bg:\" # apply foreground color\n + (self.style_bg_selected if selected else self.style_bg),\n current\n if self.answer != \".\" else \"▐\" + \"█\" + \"▌\"), ('bg:#fefefe fg:#000', \"│\"))", "def edit(self,edits):\n\t\tself.alphanumeric=edits['alphanumeric'] if 'alphanumeric' in edits else None\n\t\tself.alphanumeric_color = edits['alphanumeric_color'] if 'alphanumeric_color' in edits else None\n\t\tif self.alphanumeric_color ==\"grey\":\n\t\t\tself.alphanumeric_color = \"gray\"\n\t\tself.background_color = edits['background_color'] if 'background_color' in edits else None\n\t\tif self.background_color == \"grey\":\n\t\t\tself.background_color = \"gray\";\n\t\tshapeChoices = dict((x,y) for x,y in Target.SHAPE_CHOICES)\n\t\tself.shape = str(shapeChoices[edits['shape']]) if 'shape' in edits else None\n\t\tself.orientation = edits['orientation'] if 'orientation' in edits else None\n\t\tself.ptype = edits['ptype']\n\t\tself.description = edits['description'] if 'description' in edits else None\n\t\tself.save()", "def label_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),label=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def displayText():\n global entryWidget,entryWidget1,entryWidget2,entryWidget3,entryWidget4 ,entryWidget5,entryWidget6\n global thefilename,itrial,do_stim, delaylen,ntest_arms,stop_if_error,timeout_arm_sec\n thefilename=entryWidget.get().strip()\n itrial=entryWidget1.get().strip()\n do_stim=entryWidget2.get().strip()\n delaylen=entryWidget3.get().strip()\n ntest_arms=entryWidget4.get().strip()\n stop_if_error=int(entryWidget5.get().strip())==1 # convert to logical\n print 'stop_if_error is ', stop_if_error\n\n\n timeout_arm_sec=entryWidget6.get().strip()\n root.destroy()\n return thefilename,itrial,do_stim,delaylen,ntest_arms,stop_if_error,timeout_arm_sec" ]
[ "0.78716373", "0.76830506", "0.75691116", "0.75691116", "0.7379154", "0.73117137", "0.7183602", "0.7152062", "0.7089976", "0.6863199", "0.68065554", "0.6748621", "0.6604557", "0.62711895", "0.61224514", "0.6009547", "0.5690611", "0.5690611", "0.5690611", "0.5690611", "0.5690611", "0.5534457", "0.5529326", "0.55119324", "0.54897064", "0.54593766", "0.53941077", "0.53884834", "0.53541094", "0.5348279", "0.5336523", "0.53298044", "0.53044033", "0.53017735", "0.5284678", "0.52548796", "0.5231703", "0.52075195", "0.51657903", "0.5139631", "0.51269805", "0.51183087", "0.50954133", "0.5086037", "0.50556576", "0.50475675", "0.50413114", "0.5033974", "0.50320536", "0.50238174", "0.50172436", "0.501209", "0.5011348", "0.50095177", "0.499828", "0.49958882", "0.49862808", "0.49802482", "0.49685866", "0.49656975", "0.49588487", "0.4951691", "0.49488887", "0.49448055", "0.49138415", "0.49082175", "0.48921612", "0.48836753", "0.48688877", "0.48642147", "0.48558703", "0.48427588", "0.48402458", "0.48379573", "0.48347312", "0.4829869", "0.48117617", "0.48040468", "0.48027003", "0.47989967", "0.47953638", "0.47919485", "0.47787616", "0.47736892", "0.47728088", "0.47708187", "0.4769437", "0.4768398", "0.47677627", "0.47633177", "0.47631097", "0.4755773", "0.47515184", "0.4750719", "0.47494507", "0.47457764", "0.47452554", "0.4735827", "0.47239852", "0.47187877" ]
0.6903923
9
Return list of choices for combo box, each a tuple of edit text and any annotation text
def getEditChoices(self, currentText=''): format = globalref.options.strData('EditDateFormat', True) today = GenDate().dateStr(format) yesterday = (GenDate() - 1).dateStr(format) tomorrow = (GenDate() + 1).dateStr(format) return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _('yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def choices(self, typename, value_field='description'):\n rows = self.type(typename).values('id', value_field)\n return [(r['id'], r[value_field]) for r in rows]", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def select_combo_text(cb, text, index=0):\n i = 0\n for n in cb.get_model():\n if n[index] == text:\n break\n i += 1\n cb.set_active(i)", "def application_command_autocomplete_choice_builder(name, value):\n return {\n 'name': name,\n 'value': value,\n }", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def choices(self):\n return tuple(self._choices)", "def iter_choices(self):\n\n for pk, obj in self._get_object_list():\n if hasattr(obj, self.id):\n selected = getattr(obj, self.id)\n else:\n selected = obj in self.data\n\n yield (pk, self.get_label(obj), selected)", "def _get_choices_str(self):\n return ', '.join(\n '\"%s\"' % choice\n for choice in self.choices\n )", "def fill_combobox_attributes(self):\n\n list_char = [\"\"]\n list_num = [\"\"]\n if self.ui.radioButton_file.isChecked():\n for a in self.attributes:\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n else:\n for a in self.attributes:\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n self.ui.comboBox_num_attributes.blockSignals(True)\n self.ui.comboBox_char_attributes.blockSignals(True)\n self.ui.comboBox_num_attributes.clear()\n self.ui.comboBox_char_attributes.clear()\n self.ui.comboBox_char_attributes.addItems(list_char)\n self.ui.comboBox_num_attributes.addItems(list_num)\n self.ui.comboBox_num_attributes.blockSignals(False)\n self.ui.comboBox_char_attributes.blockSignals(False)", "def getComboTerms(tuples):\t\t\t\n\t\t\t#return \"[{0}]\".format('; '.join([\"({0})\".format(','.join([text[indices[0]:indices[1]], str(indices[0])])) for indices in tuples]))\n\t\t\treturn \"{0}\".format('; '.join((\"{0}\".format(text[indices[0]:indices[1]]) for indices in tuples)))", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def get_choices(cls):\n return cls.values.items()", "def _find_options(self, inputfield):\r\n elements = inputfield.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def create_combo(self) -> typing.Iterable[Combo]:\n raise NotImplementedError()", "def choices(cls):\n return tuple(item.as_tuple for item in list(cls))", "def choices(self) -> list:\n return [self.mapper(item) for item in self]", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice>\" +\r\n \"tag; got {0} instead\".format(choice.tag)\r\n )\r\n\r\n components = []\r\n choice_text = ''\r\n if choice.text is not None:\r\n choice_text += choice.text\r\n # Initialize our dict for the next content\r\n adder = {\r\n 'type': 'text',\r\n 'contents': choice_text,\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n components.append(adder)\r\n\r\n for elt in choice:\r\n # for elements in the choice e.g. <text> <numtolerance_input>\r\n adder = {\r\n 'type': 'text',\r\n 'contents': '',\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n tag_type = elt.tag\r\n # If the current `elt` is a <numtolerance_input> set the\r\n # `adder`type to 'numtolerance_input', and 'contents' to\r\n # the `elt`'s name.\r\n # Treat decoy_inputs and numtolerance_inputs the same in order\r\n # to prevent students from reading the Html and figuring out\r\n # which inputs are valid\r\n if tag_type in ('numtolerance_input', 'decoy_input'):\r\n # We set this to textinput, so that we get a textinput html\r\n # element.\r\n adder['type'] = 'textinput'\r\n adder['contents'] = elt.get('name')\r\n else:\r\n adder['contents'] = elt.text\r\n\r\n # Add any tail text(\"is the mean\" in the example)\r\n adder['tail_text'] = elt.tail if elt.tail else ''\r\n components.append(adder)\r\n\r\n # Add the tuple for the current choice to the list of choices\r\n choices.append((choice.get(\"name\"), components))\r\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices", "def combo_callback(self, eventobj):\n print(self.combo_input.get()) # print name\n print(self.combo_input.current()) # print index", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)", "def initDefaultChoices(self):\n return [text for text in self.formatList]", "def __init__(self,choices,caption='ListSelection',default=[],single=False,check=False,sort=False,*args,**kargs):\n InputDialog.__init__(self,caption=caption,items = [\n dict(name='input',value=default,itemtype='list',choices=choices,\n text='',single=single,check=check,sort=sort,*args,**kargs),\n ],)", "def _build_dropdown(options):\n return [(x, x) for x in options]", "def set_choices(self, index, choices):\n if len(choices) == 1:\n self._label(index)\n self._widgets[index][\"text\"] = str(choices[0])\n else:\n self._combo(index)\n self._widgets[index][\"values\"] = [str(t) for t in choices]\n width = max(len(str(t)) for t in choices)\n width = max(5, width)\n self._widgets[index][\"width\"] = width", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice> tag; got %s instead\"\r\n % choice.tag)\r\n choices.append((choice.get(\"name\"), stringify_children(choice)))\r\n return choices", "def __init__(self, \n num_fld=1, \n lab_txt=[\"1\"], \n txt_fld=[\"1\"], \n title_txt=\"test\", \n comb_txt=[],\n comb_lab_txt=[], \n comb_num=0, \n root_x=50, \n root_y=50):\n super().__init__()\n self.geometry(f'+{root_x}+{root_y}') #head=y+20px\n self.str_in=[]\n self.title(title_txt)\n if comb_txt:\n self.name=[0]*num_fld\n ent=[0]*num_fld\n self.comb=[]\n self.act=[]\n lab=[0]*num_fld\n lab_comb=[0]*comb_num\n else:\n self.name=[0]*num_fld\n ent=[0]*num_fld\n lab=[0]*num_fld\n self.comb=[]\n self.act=[]\n for i in range(num_fld):\n self.name[i]=tk.StringVar()\n ent[i]=tk.Entry(self,textvariable=self.name[i])\n ent[i].insert(0, txt_fld[i])\n lab[i] = tk.Label(self,width=15, text=lab_txt[i])\n lab[i].pack()\n ent[i].pack()\n for i in range(comb_num):\n lab_comb[i]=tk.Label(self,width=35, text=comb_lab_txt[i])\n self.comb.append(ttk.Combobox(self, values=comb_txt))\n lab_comb[i].pack()\n self.comb[i].pack()\n self.comb[i].current(1)\n\n but_ac=tk.Button(self, text=\"Accept\", command=self.ins)\n but_ac.pack()\n self.mainloop", "def input_choices_from_list(choices, text):\n no_courses_text = \"\"\"\n init will only list the courses you are enrolled in\n and there seem to be none.\n Either enrol in a course or add the course id as command line argument.\n \"\"\"\n if choices is None or len(choices) == 0:\n print(no_courses_text)\n raise SystemExit(1)\n\n digits = str(math.ceil(math.log10(len(choices))))\n format_str = '{:' + digits + 'd} {}'\n for n, c in enumerate(choices):\n print(format_str.format(n, c))\n try:\n return [int(c) for c in input(text).split()]\n except EOFError:\n return []", "def combobox(self):\n return self._combo", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def widgets_from_abbreviations(self, seq):\n result = []\n for name, abbrev, default in seq:\n widget = self.widget_from_abbrev(abbrev, default)\n if not (isinstance(widget, ValueWidget) or isinstance(widget, fixed)):\n if widget is None:\n raise ValueError(\"{!r} cannot be transformed to a widget\".format(abbrev))\n else:\n raise TypeError(\"{!r} is not a ValueWidget\".format(widget))\n if not widget.description:\n widget.description = name\n widget._kwarg = name\n result.append(widget)\n return result", "def multiple_choice():\n\n return [\"MAR\", \"MAR\", \"NI\", \"NI\", \"MCAR\"]", "def build_comboboxes(activities, events):\n global comboboxes\n # For each activity set up a selector for an event\n\n for activity in activities:\n\n # Setup frame for better display in gui\n frame = Frame(main_window)\n frame.configure(background=\"gray30\")\n\n # Label the left column as activity in a model + \"beautify gui\"\n text = \"Activity name (model):\"\n Label(frame, text=text, bg=\"gray30\", fg=\"white\", padx=5).grid(column=0, row=0)\n Label(frame, text=activity, bg=\"gray30\", fg=\"white\").grid(column=0, row=1)\n\n # Set up the combobox for an event\n combo = Combobox(frame)\n combo['values'] = events\n\n # If activity is in events preselect the current one\n if activity in events:\n combo.current(events.index(activity))\n\n # Label the combobox and place label and box in frame\n Label(frame, text=\"Event name (log):\", bg=\"gray30\", fg=\"white\", padx=5).grid(column=1, row=0)\n combo.grid(column=1, row=1)\n\n # If the last activity in the graph is handled then do not write a separator\n if activity != activities[-1]:\n Separator(frame, orient=\"horizontal\").grid(row=2, columnspan=2, sticky=\"ew\", pady=10)\n\n comboboxes[activity] = combo\n # place the frame in the main_window\n frame.grid(column=0)", "def get_chosen_options(user):\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n ocl = eval(user_application.options_selected)\n chosen_options = []\n for oc in ocl:\n chosen_options.append(Option.objects.get(opt_code=int(oc))) \n return chosen_options", "def get_generic_choices(model, key, allow_null=False):\n CHOICES = [('', '--------')] if allow_null else []\n for i in model.objects.all().values_list(key, flat=True).distinct():\n CHOICES.append((str(i), str(i)))\n CHOICES.sort()\n\n return CHOICES", "def _combobox_choice(self, _=None):\n combobox_string = self.value_combobox.var.get()\n if combobox_string.startswith(\"Unknown: \"):\n value = int(combobox_string[len(\"Unknown: \"):])\n else:\n value = int(self.value_combobox.var.get().split(\" \")[0])\n self.master.change_field_value(self.field_name, value)", "def widgets(parameter: Parameter):\n widgets = []\n for key in parameter.keys():\n textEdit = QTextEdit()\n textEdit.setText(key)\n widgets.append(textEdit)\n if isinstance(parameter[key], Enum):\n comboBox = MyQtEnumComboBox()\n comboBox.fillValues(type(parameter[key]))\n widgets.append(comboBox)\n elif isinstance(parameter[key], bool):\n comboBox = QComboBox()\n comboBox.addItems((\"False\", \"True\"))\n widgets.append(comboBox)\n else:\n textEdit = QTextEdit()\n textEdit.setText(str(parameter[key]))\n widgets.append(textEdit)\n for widget in widgets:\n widget.setFixedHeight(30)\n return widgets", "def _getBrailleRegionsForComboBox(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForComboBox\", obj)\n\n regions = []\n\n focusedRegionIndex = 0\n label = self._script.getDisplayedLabel(obj)\n if label and (len(label) > 0):\n regions.append(braille.Region(label + \" \"))\n focusedRegionIndex = 1\n\n # Check to see if the text is editable. If so, then we want\n # to show the text attributes (such as selection -- see bug\n # 496846 for more details).\n #\n textObj = None\n for child in obj:\n if child and child.getRole() == pyatspi.ROLE_TEXT:\n textObj = child\n if textObj and textObj.getState().contains(pyatspi.STATE_EDITABLE):\n textRegion = braille.Text(textObj)\n regions.append(textRegion)\n else:\n displayedText = self._script.getDisplayedText(obj)\n if displayedText:\n regions.append(braille.Region(displayedText))\n\n regions.append(braille.Region(\n \" \" + rolenames.getBrailleForRoleName(obj)))\n\n # Things may not have gone as expected above, so we'll do some\n # defensive programming to make sure we don't get an index out\n # of bounds.\n #\n if focusedRegionIndex >= len(regions):\n focusedRegionIndex = 0\n if len(regions) == 0:\n focusedRegion = None\n else:\n focusedRegion = regions[focusedRegionIndex]\n\n # [[[TODO: WDW - perhaps if a text area was created, we should\n # give focus to it.]]]\n #\n return [regions, focusedRegion]", "def form_SelectChoiceCallableOptions(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n def _():\n options = [(1,'a'),(2,'b'),(3,'c')]\n for option in options:\n yield option\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(_)\n return form", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def _comboSlot(self, select):\n select = self.sender().itemText(select)\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n item = QtXml.QDomElement()\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() != select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n widget_enabled = h.elem.attribute(\"Enabled\", \"True\")\n widget_visible = h.elem.attribute(\"Visible\", \"Unknown\")\n h.widget.setEnabled(False)\n if(widget_visible != \"Unknown\"):\n h.label.hide()\n h.widget.hide()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() == select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n h.widget.setEnabled(True)\n h.label.show()\n h.widget.show()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")", "def get_choices(self, cutoff=None):\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict(\n [\n (\n # This line below is modifed.\n item.pk,\n self.display_value(item),\n )\n for item in queryset\n ]\n )", "def test_rendering_combobox(qtbot):\n layer = Image(np.random.rand(8, 8))\n qtctrl = QtImageControls(layer)\n qtbot.addWidget(qtctrl)\n combo = qtctrl.renderComboBox\n opts = {combo.itemText(i) for i in range(combo.count())}\n rendering_options = {\n 'translucent',\n 'additive',\n 'iso',\n 'mip',\n 'minip',\n 'attenuated_mip',\n 'average',\n }\n assert opts == rendering_options\n # programmatically updating rendering mode updates the combobox\n layer.rendering = 'iso'\n assert combo.findText('iso') == combo.currentIndex()", "def get_choicesdata(self):\n selected_value = self.get_cleaned_value()\n choicesdata = []\n for value, label in self.get_choices_cached():\n is_selected = value == selected_value\n url = self.build_set_values_url(values=[value])\n choicesdata.append({\n 'url': url,\n 'label': label,\n 'is_selected': is_selected,\n 'dom_id': '{}_{}'.format(self.get_inputfield_dom_id(), value)\n })\n return choicesdata", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def get_poll_choices(self, games: [Game]) -> [dict]:\n answer_texts = []\n for g in games:\n answer_texts.append(g.name + \" - \" + g.genre)\n answer_texts = sorted(answer_texts, key=str.lower)\n poll_choices = []\n for at in answer_texts:\n poll_choices.append({\"text\": at})\n return poll_choices", "def __str__(self):\n return \"choice_text: \" + self.choice_text", "def _parse_choices(self, text):\n choices = dict()\n\n matches = re.findall(self.choice_regex, text)\n for match in matches:\n # remove the brackets\n match = match.replace('[[', '')\n match = match.replace(']]', '')\n\n if '|' in match:\n # format is {text}|{node_id}, the text and node id are different\n text, node_id = match.split('|')\n choices[node_id] = text\n else:\n choices[match] = match\n\n return choices", "def get_choices_new_protected():\n ret = []\n ret.append( (1, _(u'Nur Community-Mitglieder dürfen neue Beiträge leisten')) )\n ret.append( (-1, _(u'Offener Zugang')) )\n return ret", "def comboBox(args: list, slot) -> QComboBox:\n comboBox = QComboBox()\n comboBox.addItems(args[0])\n comboBox.currentTextChanged.connect(slot)\n return comboBox", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices", "def make_ddls( self, parent ):\n # ---- 0\n a_widget = ttk.Combobox( parent,\n width = self.arg_width,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n a_widget.bind( \"<<ComboboxSelected>>\", self.sync_ddl_0 )\n\n # AppGlobal.gui_style.style_combobox( a_widget )\n self.ddl_0_widget = a_widget\n\n # ---- 1\n a_widget = ttk.Combobox( parent,\n width = self.arg_width,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n self.ddl_1_widget = a_widget\n\n # ---- 2\n a_widget = ttk.Combobox( parent,\n width = self.arg_width + 5,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n # AppGlobal.gui_style.style_combobox( a_widget )\n self.ddl_2_widget = a_widget\n\n # self.ddl_widgets = [ self.ddl_0_widget, self.ddl_1_widget, self.ddl_2_widget, ]\n # print( self.ddl_widgets[ 0 ] == self.ddl_widgets[ 1 ])\n\n #self.load_ddl_0( )", "def __str__(self):\n return self.choice_text", "def as_choices():\n return (\n # Live is currently disabled as a choice\n # pending implementation\n (\"live\", \"Use working directory\"),\n (\"latest\", \"Use latest snapshot\"),\n (\"pinned\", \"Pinned to snapshot\"),\n )", "def comboBoxes(self):\r\n # Cities Combo Button\r\n self.comboCities = QComboBox()\r\n self.comboCities.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: 15px;\r\n background-color : #A8DBC5;\r\n border: 1px solid white;\r\n\r\n \"\"\")\r\n self.comboCities.addItems(\r\n ['Girón', 'Piedecuesta', 'Floridablanca', 'Bucaramanga'])\r\n self.grid.addWidget(self.comboCities, 6, 1, 1, 2)\r\n self.comboCities.setCurrentText(\"Bucaramanga\")\r\n # Payment Combo Button\r\n self.comboPayment = QComboBox()\r\n self.comboPayment.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: 15px;\r\n background-color : #A8DBC5;\r\n border: 1px solid white;\r\n\r\n \"\"\")\r\n self.comboPayment.addItems(['Efectivo', 'Nequi'])\r\n self.grid.addWidget(self.comboPayment, 7, 1, 1, 2)", "def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n # choicesdata = []\n # found_selected_value = False\n # for value, label in self.get_choices():\n # is_selected = value == selected_value\n # if is_selected:\n # found_selected_value = True\n # url = self.build_set_values_url(values=[value])\n # choicesdata.append({\n # 'url': url,\n # 'label': label,\n # 'is_selected': is_selected\n # })\n choicesdata, found_selected_value = self.__make_choicesdata_list(\n choices=self.get_choices(),\n selected_value=self.get_cleaned_value())\n if not found_selected_value and len(choicesdata) > 0:\n selected_index = self.get_default_is_selected_index(choicesdata=choicesdata)\n choicesdata[selected_index]['is_selected'] = True\n return choicesdata", "def _find_options(self):\r\n elements = self.xml.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def __str__(self):\n return gettext('One of %s') % self._get_choices_str()", "def list_selector_widget(members=None,\n preselect=None,\n entry=False,\n callback=None):\n store, i=generate_list_model(members,\n active_element=preselect)\n\n if entry:\n combobox=gtk.ComboBoxEntry(store, column=0)\n else:\n combobox=gtk.ComboBox(store)\n cell = gtk.CellRendererText()\n combobox.pack_start(cell, expand=True)\n combobox.add_attribute(cell, 'text', 0)\n combobox.add_attribute(cell, 'background', 2)\n\n combobox.set_active(-1)\n if i is None:\n i = store.get_iter_first()\n if i is not None:\n combobox.set_active_iter(i)\n\n if entry:\n def get_current_element(combo):\n try:\n return combo.get_model().get_value(combo.get_active_iter(), 1)\n except (TypeError, AttributeError):\n return unicode(combo.child.get_text())\n def set_current_element(combo, t):\n combo.child.set_text(t)\n else:\n def get_current_element(combo):\n if combo.get_active_iter() is not None:\n return combo.get_model().get_value(combo.get_active_iter(), 1)\n else:\n return None\n def set_current_element(combo, el):\n # Find the index of the element\n l=[ t[0] for t in enumerate(combo.get_model()) if t[1][1] == el ]\n if l:\n # The element is present.\n combo.set_active(l[0])\n else:\n combo.set_active_iter(combo.get_model().append( (unicode(el), el, None) ))\n\n # Bind the method to the combobox object\n combobox.get_current_element = get_current_element.__get__(combobox)\n combobox.set_current_element = set_current_element.__get__(combobox)\n\n if callback is not None:\n combobox.connect('changed', callback)\n\n return combobox", "def initDefaultChoices(self):\n return []", "def occurrence_choices():\n OCCURRENCE_CHOICES = [('one_off', 'One Off'), ('weekly', 'Weekly'), ('fortnightly', 'Fortnightly')]\n occurrence = forms.ChoiceField(choices=OCCURRENCE_CHOICES, widget=forms.RadioSelect())\n return occurrence", "def get_action_choices():\n from hardware.management.commands.gpio_buttons import Command\n import re\n pattern = re.compile(r'^on_(?P<name>\\w+)_press$')\n choices = []\n for member in dir(Command):\n match = pattern.match(member)\n if match:\n action = match.groupdict()['name']\n name = action.replace('_', ' ').title()\n choices.append((action, name))\n return choices", "def DrawComboBox(*args, **kwargs):\n return _gdi_.RendererNative_DrawComboBox(*args, **kwargs)", "def fill_combobox(self):\n sqlstring = \"SELECT first_name, last_name from employees WHERE enabled = 1 ORDER BY last_name ASC\"\n self.CB_employee.addItem(\"\")\n for employee in self.ms.c.execute(sqlstring):\n self.CB_employee.addItem(\" \".join((employee[0], employee[1])))\n sqlstring = \"SELECT first_name, last_name from employees WHERE enabled = 0 ORDER BY last_name ASC\"\n for employee in self.ms.c.execute(sqlstring):\n self.CB_employee.addItem(\" \".join((employee[0], employee[1])))", "def get_text_type():\n opt = ['text', 'email', 'password']\n inp = option_menu(opt, 'Select text type:')\n\n # mark text type with option\n OPTIONS['text-type'] = opt[inp]\n\n # add option to collected list\n add_to_collected('text type', opt[inp])\n\n return", "def get_context(self, name, value, attrs=None, choices=()):\n context = super(TriStateCheckboxSelectMultiple, self).get_context(\n name, value, attrs\n )\n\n choices = dict(it.chain(self.choices, choices))\n if value is None:\n value = dict.fromkeys(choices, False)\n else:\n value = dict(dict.fromkeys(choices, False).items() +\n value.items())\n\n context['values'] = [\n (choice, label, value[choice])\n for choice, label in choices.iteritems()\n ]\n\n return context", "def _get_choices ( self, context, path = '' ):\n choices = []\n gdc = context.get_data_context\n for name in context.data_contexts:\n next_path = path_for( path, name )\n choices.append( TemplateChoice( choice_value = next_path ) )\n choices.extend( self._get_choices( gdc( name ), next_path ) )\n \n return choices", "def set_dropdown_b_options(value):\n options_c = []\n if value=='C':\n options_c = [{'label': '1', 'value': '1'},\n {'label': '2', 'value': '2'}]\n if value == 'D':\n options_c = [{'label': '3', 'value': '3'},\n {'label': '4', 'value': '4'}]\n if value=='E':\n options_c = [{'label': '5', 'value': '5'},\n {'label': '6', 'value': '6'}]\n if value == 'F':\n options_c = [{'label': '7', 'value': '7'},\n {'label': '8', 'value': '8'}]\n return options_c", "def objects_to_choices(queryset):\n res = []\n for elm in queryset:\n res.append((elm.pk, unicode(elm)))\n return res", "def choice(text, choices, **kwargs):\n return click.prompt(click.style('> {}'.format(text), fg='blue', bold=True),\n type=click.Choice(choices),\n **kwargs)", "def on_comboBox_enceinte_activated(self, index):\n nom_enceinte = self.comboBox_enceinte.currentText()\n marque = [x[2] for x in self.enceintes if x[1] == nom_enceinte][0]\n n_serie = [x[4] for x in self.enceintes if x[1] == nom_enceinte][0]\n model =[x[3] for x in self.enceintes if x[1] == nom_enceinte][0]\n \n \n self.lineEdit_marque.setText(marque)\n self.lineEdit_n_serie.setText(n_serie)\n self.lineEdit_model.setText(model)", "def list_selector(title=None,\n text=None,\n members=None,\n controller=None,\n preselect=None,\n entry=False):\n combobox = list_selector_widget(members=members,\n preselect=preselect,\n entry=entry)\n\n d = gtk.Dialog(title=title,\n parent=None,\n flags=gtk.DIALOG_DESTROY_WITH_PARENT,\n buttons=( gtk.STOCK_OK, gtk.RESPONSE_OK,\n gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL ))\n\n if text is not None:\n l=gtk.Label(text)\n l.show()\n d.vbox.add(l)\n\n d.vbox.add(combobox)\n combobox.show_all()\n\n d.connect('key-press-event', dialog_keypressed_cb)\n\n d.show()\n center_on_mouse(d)\n res=d.run()\n retval=None\n if res == gtk.RESPONSE_OK:\n retval=combobox.get_current_element()\n d.destroy()\n return retval", "def display_choose(self, text, choices):\n cur_index = 0\n key = None\n while key != 'KEY_NEWLINE':\n if key == 'KEY_UP':\n cur_index = max(cur_index - 1, 0)\n elif key == 'KEY_DOWN':\n cur_index = min(cur_index + 1, len(choices) - 1)\n self.stdscr.erase()\n for line in text:\n self.stdscr.addstr(f'{PADCHAR}{line}\\n')\n for index, value in enumerate(choices):\n self.stdscr.addstr('\\n')\n self.stdscr.addstr(PADCHAR)\n self.stdscr.addstr(value, color_pair(7 if index == cur_index else 1))\n self.stdscr.addstr(f'\\n\\n{PADCHAR}') \n key = self.get_key() \n return cur_index", "def get_classes(self, code):\n \n select = v.Combobox(\n _metadata={'name':code}, \n items=self.items, \n v_model=None, \n dense=True,\n hide_details=True\n )\n \n select.observe(partial(self.store, code), 'v_model')\n \n return select", "def getOptionsNames(self) -> List[unicode]:\n ...", "def the_option_named(text: str) -> \"SelectByText\":\n return SelectByText(text)", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def prepareAutoComplete(editor, text, charPos, lineStartCharPos,\r\n wikiDocument, settings):\r\n return []", "def _validate_selects(text, response):\n answer_options = re.split(settings.MULTISELECT_DELIMITER_RE, str(text))\n choices = map(lambda choice: choice.lower(), response.event.choices)\n logger.debug('Question (%s) answer choices are: %s, given answers: %s' % (datatype, choices, answer_options))\n new_answers = copy(answer_options)\n for idx, opt in enumerate(answer_options):\n logger.debug('Trying to format (m)select answer: \"%s\"' % opt)\n try: \n #in the case that we accept numbers to indicate option selection\n opt_int = int(opt)\n if not (1 <= opt_int <= len(choices)): \n return text, 'Answer %s must be between 1 and %s' % (opt_int, len(choices))\n else:\n new_answers[idx] = str(opt_int)\n\n except ValueError: \n # in the case where we accept the actual text of the question\n logger.debug('Caught value error, trying to parse answer string choice of: %s' % choices)\n if opt.lower() not in choices:\n return text, 'Answer must be one of the choices'\n else:\n new_answers[idx] = str(choices.index(opt.lower()) + 1)\n return ' '.join(new_answers), None", "def choice(choices=[], message=\"Pick something.\", title=None):\n return dialog(\n \"choice\",\n choices=choices,\n message=message,\n title=title,\n )", "def autocomplete_possibilities():\n try:\n # get data sent by client\n typed_input = request.args.get('q')\n print(' ')\n print('\\n------ getting autocomplete_possibilities ------')\n print(f\"recived: input:{typed_input}\")\n\n # call the google API\n results = gmaps.places_autocomplete(typed_input)\n data = [\n {'value': r['place_id'], 'text': r['description']}\n for r in results\n ]\n\n # Pass data to the front end\n print(f'returning: {data}')\n return jsonify(data)\n\n except Exception as e:\n print(\"AJAX excepted \" + str(e))\n return str(e)", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def choices(self, cl):\n # TODO: Determine if non-static choices would be cleaner here.\n # Honestly, I tried a more generic version and it was even harder to\n # follow than this version.\n yield {\n 'selected': not (self.lookup_val_gte or self.lookup_val_lt),\n 'query_string': cl.get_query_string({}, [self.lookup_kwarg_gte,\n self.lookup_kwarg_lt]),\n 'display': 'All'\n }\n\n goal = settings.FACEBOOK_CLICK_GOAL\n yield {\n 'selected': self.lookup_val_gte and not self.lookup_val_lt,\n 'query_string': cl.get_query_string({self.lookup_kwarg_gte: goal},\n [self.lookup_kwarg_lt]),\n 'display': 'Yes'\n }\n yield {\n 'selected': self.lookup_val_lt and not self.lookup_val_gte,\n 'query_string': cl.get_query_string({self.lookup_kwarg_lt: goal},\n [self.lookup_kwarg_gte]),\n 'display': 'No'\n }", "def build_options(slot, snacks):\n \n if slot == 'Fast':\n return [\n {'text': 'Pizza', 'value': 'Pizza'},\n {'text': 'Fries', 'value': 'Fries'},\n {'text': 'Franky', 'value': 'Franky'},\n {'text': 'Burger', 'value': 'Burger'},\n {'text': 'Sandwich', 'value': 'Sandwich'}\n \n \n ]\n elif slot == 'drink':\n return [\n {'text': 'Coca-Cola', 'value': 'Coca-cola'},\n {'text': 'Appy', 'value': 'Appy'},\n \n {'text': 'Beer', 'value': 'Beer'},\n {'text': 'Frooti', 'value': 'Frooti'},\n {'text': 'Pepsi', 'value': 'Pepsi'}\n \n ]", "def sortedChoices(self, inText):\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return (sortedChoices, True)\n else:\n return (sortedChoices, False)", "def _getChoices(self, acronym):\n # get matches from acronymDB\n matches = []\n if(acronym in self.acronymDB):\n matches += self.acronymDB[acronym]\n if(acronym[-1] == \"s\" and acronym[:-1] in self.acronymDB):\n matches += self.acronymDB[acronym]\n\n # create training data\n X_train, y_train = [], []\n for definition, articleID, ignored_var in matches:\n text = self.articleDB[articleID]\n X_train.append(\n ExpansionChoice(article_id=articleID, article_text=text))\n y_train.append(definition)\n\n # create y labels to group similar acronyms\n y_labels, labelToExpansion = self._processChoices(y_train)\n\n return X_train, y_labels, labelToExpansion", "def multiple_choices(self, choices, response):\n for elem in self.method_order:\n if elem in choices:\n return [elem]\n raise NoData", "def set_dropdown_b_options(value):\n options_b = []\n if value=='A':\n options_b = [{'label': 'C', 'value': 'C'},\n {'label': 'D', 'value': 'D'}]\n if value == 'B':\n options_b = [{'label': 'E', 'value': 'E'},\n {'label': 'F', 'value': 'F'}]\n return options_b", "def choices(self, choices: Iterable[Tuple[str, str]]):\n try:\n iter(choices)\n except TypeError:\n raise TypeError(\"'choices' isn't a valid iterable\")\n\n apply_choices = []\n for i, (choice_id, choice_label) in enumerate(choices):\n apply_choices.append((str(choice_id), str(choice_label)))\n\n if len(apply_choices) < 2:\n raise ValueError(\"you need to specify at least two choices\")\n\n self._choices = apply_choices\n self.specific.refresh()\n self._selected = 0", "def before_choose_candidate_listener(self, session, task):\n choices = [PromptChoice('d', 'eDit', self.importer_edit)]\n if task.candidates:\n choices.append(PromptChoice('c', 'edit Candidates',\n self.importer_edit_candidate))\n\n return choices", "def _get_completion(self, original_text, remaining_text, options):\n if self.should_hide_completions(original_text=original_text,\n remaining_text=remaining_text,\n allowed_suffixes=(\" \", \".\")):\n return []\n\n return [(option, len(remaining_text)) for option in options\n if option.startswith(remaining_text) and not option.startswith(\"_\")]" ]
[ "0.6981832", "0.64423245", "0.64423245", "0.6404376", "0.63695693", "0.62808853", "0.615837", "0.61527145", "0.60969144", "0.60806596", "0.6074747", "0.59950155", "0.59769976", "0.59458613", "0.59082", "0.5853147", "0.5850785", "0.58412063", "0.58367765", "0.58151263", "0.58029526", "0.57948583", "0.5792428", "0.5791611", "0.57795537", "0.575053", "0.57465565", "0.5732782", "0.57096493", "0.57060355", "0.56966597", "0.5679937", "0.563918", "0.5615509", "0.5599425", "0.55810773", "0.557214", "0.55665624", "0.5565737", "0.55615264", "0.5538559", "0.55243224", "0.552361", "0.5519529", "0.5518102", "0.55137795", "0.5486652", "0.5486258", "0.5484074", "0.5468984", "0.54495764", "0.5447661", "0.5433353", "0.54303634", "0.5424338", "0.5411812", "0.5406592", "0.5394407", "0.5391574", "0.53862125", "0.53735393", "0.53548867", "0.5352198", "0.5345724", "0.5333625", "0.53295", "0.53213644", "0.5318512", "0.53181034", "0.5290123", "0.526435", "0.5263117", "0.5259166", "0.5246472", "0.5245967", "0.5245039", "0.5238816", "0.52362823", "0.52285147", "0.5223594", "0.5216064", "0.5216059", "0.5197635", "0.51916355", "0.5189832", "0.51882505", "0.5170863", "0.5163809", "0.5163533", "0.51623505", "0.51605934", "0.5157795", "0.5157584", "0.5154029", "0.5144694", "0.5143628", "0.5143539", "0.5141602", "0.5133782", "0.5127406" ]
0.5867416
15
Return initial stored value for new nodes
def getInitDefault(self): if self.initDefault in DateFormat.dateStampStrings: return GenDate().dateStr() return TextFormat.getInitDefault(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_new_value(self):\r\n if self.initial_value is None:\r\n return None\r\n\r\n return deepcopy(self.initial_value)", "def initial_value(self):\n return self._initial_value", "def initial(self):\n return zero", "def initial_value(self) -> float:\n return self._initial_value", "def get(self, node):\n if node in self.val:\n return self.val[node]\n else:\n return self.initial", "def _node_defaults(self):\n parent = super(QTree, self)._node_defaults()\n parent[\"state\"] = np.zeros([self.size, self.size])\n parent[\"network\"] = self\n return parent", "def lazy_value(self):\n\n if self.state == Node.State.VALID:\n return self.value\n else:\n return None", "def _set_default_node(self, key):\n if key not in self._key_to_node_index:\n self._key_to_node_index[key] = self._graph.add_node(NodeData(key=key, equivs=[]))\n return self._key_to_node_index[key]", "def __init__(self):\n self.root = self.get_new_node();", "def generate_initial_state(self, x):\n\n if self.initial_state is None:\n x[:] = 0\n return x\n else:\n x[:] = self.initial_state(size=(self._num_neurons, 1))\n return x", "def getInitialValue(self):\n return _libsbml.Trigger_getInitialValue(self)", "def value(self):\n\n return deepcopy(self._node_id)", "def initial_nodes(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"initial_nodes\")", "def refresh(self):\n self._pre_action_check('refresh')\n if hasattr(self, '_id'):\n node = self.inflate(self.cypher(\"START n=node({self}) RETURN n\")[0][0][0])\n for key, val in node.__properties__.items():\n setattr(self, key, val)\n else:\n raise ValueError(\"Can't refresh unsaved node\")", "def initial_state(self):\n return 0", "def get_initial(self):\n\t\treturn self.initial", "def calculate_gn_value(self, current_path_length) :\r\n\r\n self.gn_value = (current_path_length) #The g(n) value is the distance of the path if the node is traversed\r", "def getValue(self):\n return self.initValue", "def __init__(self, value: object) -> None:\n self.value = value # to store node's data\n self.left = None # pointer to root of left subtree\n self.right = None # pointer to root of right subtree", "def __init__(self, value: object) -> None:\n self.value = value # to store node's data\n self.left = None # pointer to root of left subtree\n self.right = None # pointer to root of right subtree", "def __init__(self):\n\n\t\tself.root = None\n\t\tself.numNodes = 0", "def get_initial(self):\n return self.initial[:]", "def value(self):\n return self.node_value", "def __init__(self, data: str):\n self.root = Node(data)\n self.node_count = 1\n self.node_of_last_computed_hash = 0", "def _default_value(self):\n raise NotImplementedError", "def __init__(self, value, parent = None):\n # initialize new node\n self.value = value\n self.parent = parent\n self.left = None\n self.right = None\n self.height = 1", "def initial_state(self):\n return None", "def __init__(self):\n self.node = None\n self.data = None", "def init_id(root: TreeNode):\n current_id = [0]\n init_id_helper(root, current_id)\n return current_id[0]", "def default_value(self):\n if self.default:\n return copy.deepcopy(self.default)\n else:\n return None", "def initialize(self):\n self._value = self.initializer.evaluate(self)\n return self.value()", "def __init__(self, root_value):\n self.root = self.TreeNode(value=root_value)", "def value(self):\n self.refresh_default_value()\n return self.default_value", "def get_initial_state(self):\n return self.get_state(self.get_initial_observation())", "def __init__(self):\n self.start = Node('-1')", "def _default_value(self):\n return None", "def get_default_value(self):\n pass", "def get_initial_value(\n self, rel_name):\n return self._np_initval[rel_name].transpose()", "def initial(self) -> np.ndarray:\n return self._dist['initial']", "def __init__(self, val=None):\r\n self.root = {}", "def get_starting_node(self, graph):\n return random.choice(list(graph.nodes))", "def default(self):\r\n return self.default_value()", "def __init__(self):\n self.root = RadixTreeNode()\n self.root.key = \"\"\n self.size = 0", "def __init__(self):\n self.end_of_ngram = False #Flag marking whether this node is the end of an n-gram.\n self.value = None #Provided that the node marks the end of an n-gram, this refers to the value mapped by this n-gram.\n self.children = dict() #A dictionary which maps the next elements in the current path of the prefix tree to the respective node of the tree.", "def initialize_node(db, c):\n\n # have we already called this function?\n if saq.SAQ_NODE_ID is not None:\n return\n\n saq.SAQ_NODE_ID = None\n\n # we always default to a local node so that it doesn't get used by remote nodes automatically\n c.execute(\"SELECT id FROM nodes WHERE name = %s\", (saq.SAQ_NODE,))\n row = c.fetchone()\n if row is not None:\n saq.SAQ_NODE_ID = row[0]\n logging.debug(\"got existing node id {} for {}\".format(saq.SAQ_NODE_ID, saq.SAQ_NODE))\n\n if saq.SAQ_NODE_ID is None:\n execute_with_retry(db, c, \"\"\"INSERT INTO nodes ( name, location, company_id, is_local, last_update ) \n VALUES ( %s, %s, %s, %s, NOW() )\"\"\", \n (saq.SAQ_NODE, saq.API_PREFIX, saq.COMPANY_ID, True),\n commit=True)\n\n c.execute(\"SELECT id FROM nodes WHERE name = %s\", (saq.SAQ_NODE,))\n row = c.fetchone()\n if row is None:\n logging.critical(\"unable to allocate a node_id from the database\")\n sys.exit(1)\n else:\n saq.SAQ_NODE_ID = row[0]\n logging.info(\"allocated node id {} for {}\".format(saq.SAQ_NODE_ID, saq.SAQ_NODE))", "def get_first(self) -> object:\n if self.root is None: # If tree is empty\n return None\n\n return self.root.value # Returning root value", "def __init__(self, initial_state):\n self.initial_state = initial_state\n self.final_state = [1, 2, 3, 8, 0, 4, 7, 6, 5]\n self.nodes = {}\n self.add_node(self.initial_state)\n self.add_node(self.final_state)\n self.results = []", "def compute_default(self):\n if self.default is None and callable(self.compute_default_fn):\n self.default=self.compute_default_fn() \n if self.default not in self.objects:\n self.objects.append(self.default)", "def __init__(self):\n self._idx = Node.index\n Node.index += 1", "def initial_state(self, parameters = None):\n if parameters is None:\n parameters = self._get_static_parameters_or_die()\n return Value(\n state=ed.Categorical(logits=parameters.get('initial_dist_logits')))", "def initialstate(self):\n return self.problem.initialstate", "def zero_val(self):\r\n self.piDD = {\"[0]\": None}\r\n self.top_node = \"[0]\"\r\n self.dim = 0", "def __init__(self):\n self.root = None\n self.k = None", "def __init__(self):\n self.root = None\n self.k = None", "def _fill_root(self):\n if self.parent in filled_variables:\n return f\"{self.name} {st_persistent_perc}P {st_k}k\"\n return self.name", "def get_initial(self):\n return self.initial", "def _new_node(self):\n self._size += 1\n return self._node_factory()", "def initial_state(self):\n # Network details elided.\n initial_state = None\n\n return initial_state", "def __init__(self, value, prev=None, next=None):\n\n self.prev = prev # the node before this one — defaults to None\n self.value = value # the value to store\n self.next = next # the node after this one — defaults to None", "def initial(self):\n\n self.var.Kfrost = loadmap('Kfrost')\n self.var.Afrost = loadmap('Afrost')\n self.var.FrostIndexThreshold = loadmap('FrostIndexThreshold')\n self.var.SnowWaterEquivalent = loadmap('SnowWaterEquivalent')\n\n # FrostIndexInit=ifthen(defined(self.var.MaskMap),scalar(loadmap('FrostIndexInitValue')))\n # self.var.FrostIndex=FrostIndexInit\n self.var.FrostIndex = loadmap('FrostIndexInitValue')\n # self.var.AfrostIndex=-(1-self.var.Afrost)*self.var.FrostIndex\n # initial Frost Index value", "def getRandom(self) -> int:\n\n return random.choice(self.nodes).val", "def __init__(self):\n self.left = None\n self.right = None\n self.depth = 0\n self.val = None\n self.id = None", "def DefaultValue(self):\n return tf.zeros(self.shape, dtype=self.dtype)", "def DefaultValue(self):\n return tf.zeros(self.shape, dtype=self.dtype)", "def __init__(self, initial_node):\n self.__nodes = MinPriorityQueue({initial_node : initial_node.estimate})", "def __init__(self):\n self.sum_of_node_inputs = 0\n self.output = 0\n self.delta = 0\n self.dp = 0\n self.onehot_label = 0", "def _getDefaultValue(self):\n value = self._getDefaultValue()\n return value.getData() if value else None", "def test_find_highest_value_node_first(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 2, 2, 2)\n nn.eta = 0.1\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n nn.layers[3].nodes[0].weights = [1.0, 1.0]\n nn.layers[3].nodes[1].weights = [0.0, 0.0]\n\n val = nn.assign_output([2, 3], test=True)\n self.assertEqual(val, '10')", "def get_value(self) -> T:\n return clone(self.default_value)", "def get_value(self):\n if not self.visited:\n # first visit at node\n self.visited = True\n\n # value calculation\n for node, weight in self.predecessors:\n self.value += (node.get_value() * weight)\n\n # applying activation function\n if self.activation is not None:\n self.activation()\n\n self.calculated = True\n\n return self.value\n else:\n # visited node\n if self.calculated:\n # calculated in this computation\n return self.value\n else:\n # recurrent connection\n return self.past_value", "def first_value(self):\n return 0", "def _assign_init(self, first_item):\r\n if hasattr(self.scalar_op, 'identity'):\r\n return str(self.scalar_op.identity)\r\n else:\r\n assert isinstance(self.scalar_op, (scal.Maximum,\r\n scal.Minimum))\r\n return first_item", "def _assign_init(self, first_item):\r\n if hasattr(self.scalar_op, 'identity'):\r\n return str(self.scalar_op.identity)\r\n else:\r\n assert isinstance(self.scalar_op, (scal.Maximum,\r\n scal.Minimum))\r\n return first_item", "def __call__(self):\n value = self._value\n if value is None:\n value = self._init()\n self._value = value\n return value", "def state_initial(self):\n return self.states_initial()[0]", "def __init__(self):\n self.idx = None\n self.val = None\n self.left = None\n self.right = None", "def get_value(self):\r\n return 0", "def mutate(self, node, _):\n new_node = ast.Num(n=node.n + 1)\n return new_node", "def root_value(self):\n return self.__root.get_value()", "def initial(self):\n return self.args[3]", "def __init__(self):\n self.number = None\n self.nodes = []\n self.type = None\n self.group = None\n self.material = None\n self.key = -1", "def __init__(self):\n\n self.nodes = {}", "def identity(self):\r\n self.piDD = {\"[1]\": None}\r\n self.top_node = \"[1]\"\r\n self.dim = 0", "def get_first(self) -> object:\n #binary search tree == empty\n if self.root is None:\n return None\n\n # return\n return self.root.value", "def starting_point(self, random=False):\n sqrt_C = sqrtm(self.covariance)\n sqrt_L = np.sqrt(self.mean_intensity)\n if random:\n random_matrix = np.random.rand(self.n_nodes, self.n_nodes)\n M, _ = qr(random_matrix)\n else:\n M = np.eye(self.n_nodes)\n initial = np.dot(np.dot(sqrt_C, M), np.diag(1. / sqrt_L))\n return initial", "def __init__(self):\n self.root = Node(None)", "def setInitialValue(self, *args):\n return _libsbml.Trigger_setInitialValue(self, *args)", "def __init__(self, abstract_value=None, representation=None, index=None):\n\n if len(list(filter(None, [abstract_value, representation, index]))) != 1:\n raise ValueError('Expected exactly one initial value')\n\n if index is not None:\n self.index = index\n self._abstract_value = None\n else:\n self.index = batch.add_rows('vals_'+node_type.id, tf.zeros([1, node_type.value_type.representation_shape]))[0]\n node_type.value_type.__init__(abstract_value=abstract_value, representation=representation)\n del self._representation", "def __init__(self):\n self.val = None", "def __init__(self):\n self.root = Node('')", "def rec_default(self):\n pass", "def __init__(self):\n super().__init__()\n self._value = 0", "def default_value(self) -> float:\n return pulumi.get(self, \"default_value\")", "def getDefault():", "def prepare_node(self, node):\n # Every change at the position of node will be recognized\n aexpr(lambda: node.position, globals(), locals())\\\n .on_change(lambda obs, oldv, newv: self.set_node_position(node, *newv))", "def value(self):\n\n if self.state == Node.State.VALID:\n return self._value\n else:\n with _NodeStackFrame(self):\n self.state = Node.State.PENDING\n self.value = self.compute_value(*self.args, **self.kwargs)\n return self._value", "def fillNode(node, grounding, db):\n gn = copy.deepcopy(node)\n gn.val = query(gn, grounding, db)\n return gn", "def _init_node_attributes(self):\n assert False", "def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None", "def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None", "def _update_min(self):\n tmp = self\n while tmp.left is not None:\n tmp = tmp.left\n return tmp.parent.key" ]
[ "0.6917573", "0.6599723", "0.64693916", "0.62434894", "0.6088791", "0.605029", "0.6040265", "0.5898664", "0.58754563", "0.5820543", "0.5805469", "0.5794411", "0.57890224", "0.57560384", "0.5747938", "0.57314616", "0.5730434", "0.5725593", "0.57140493", "0.57140493", "0.56994367", "0.56713814", "0.5666693", "0.56612456", "0.5652481", "0.5645944", "0.56458586", "0.5624305", "0.56189084", "0.5617808", "0.56137997", "0.5607398", "0.5596775", "0.55964184", "0.5596218", "0.55950594", "0.558086", "0.55720526", "0.5569308", "0.55512476", "0.55413604", "0.5540463", "0.5531693", "0.5495316", "0.5485417", "0.5478051", "0.54755235", "0.5470715", "0.5461766", "0.546034", "0.5433562", "0.54317236", "0.54287916", "0.54287916", "0.54279566", "0.54207116", "0.54133886", "0.5410908", "0.5410448", "0.54101855", "0.540541", "0.5404037", "0.5400453", "0.5400453", "0.5394933", "0.5392656", "0.53907937", "0.538711", "0.5385489", "0.5383486", "0.53627026", "0.5347435", "0.5347435", "0.5345404", "0.5340262", "0.532986", "0.5327101", "0.53193027", "0.5317672", "0.5315903", "0.5313174", "0.5310825", "0.53099066", "0.5309453", "0.53000796", "0.5295013", "0.52859294", "0.5282753", "0.52809393", "0.5268559", "0.5235831", "0.52314967", "0.5229555", "0.5224865", "0.52206385", "0.5219064", "0.521509", "0.5213956", "0.52090657", "0.52090657", "0.5209046" ]
0.0
-1
Set initial value from editor version using edit format option
def setInitDefault(self, editText): if editText in DateFormat.dateStampStrings: self.initDefault = DateFormat.dateStampStrings[0] else: TextFormat.setInitDefault(self, editText)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEditInitDefault(self):\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)", "def getEditInitDefault(self):\n return self.formatEditText(self.initDefault)[0]", "def setInitDefault(self, editText):\n self.initDefault = self.storedText(editText)[0]", "def getEditInitDefault(self):\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)", "def setInitDefault(self, editText):\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def on_editor_save(self):\n self.text = self.textWidget.get(\"1.0\", tk.END)", "def setModelData(self, editor, model, index):\n try:\n date = datetime.strptime(str(editor.text()), self.format)\n model.setData(index, date, Qt.EditRole)\n except:\n pass # If the text does not conform to the date format, do nothing.", "def on_edit_changed(self, edit):\n\t\tself.emit('value-changed', edit.get_text())", "def initFormat(self):\n pass", "def update_editor ( self ):\n font = self.factory.to_wx_font( self )\n try:\n self._facename.SetStringSelection( font.GetFaceName() )\n except:\n self._facename.SetSelection( 0 )\n try:\n self._point_size.SetStringSelection( str( font.GetPointSize() ) )\n except:\n self._point_size.SetSelection( 0 )\n font.SetPointSize( min( 10, font.GetPointSize() ) )\n self._font.SetValue( self.str_value )\n self._font.SetFont( font )", "def set_format(cls,format):\n import __main__\n IP = __main__.__dict__['__IP']\n prompt = getattr(IP.outputcache,cls._prompt)\n prompt.p_template = format\n prompt.set_p_str()\n cls._format = format", "def _set_settings_version(c, settings_path, version_line):\n version_const = \"VERSION\"\n\n print(f\"Adjusting {version_const} in {settings_path} to {version_line}...\")\n c.run(f'sed -i .orig \\'s/^{version_const} =.*$/{version_const} = \"{version_line}\"/\\' \"{settings_path}\"')", "def readVersion(self):\n ds = self.root.findall(\"[@format]\")[0]\n raw_format = ds.attrib['format']\n try:\n self.documentFormatVersion = int(raw_format)\n except ValueError:\n # as of fontTools >= 3.27 'format' is formatted as a float \"4.0\"\n self.documentFormatVersion = float(raw_format)", "def setValue(self,val):\n if self._plain:\n self.input.setPlainText(str(val))\n else:\n updateText(self.input,str(val))", "def defaultLoad (self):\n self.srcEditor.setText( \"\" )\n self.srcEditor.setFocus()\n self.setReadOnly( readOnly=False )", "def createEditor(self, parent, option, index):\n editor = QLineEdit(parent)\n date = index.model().data(index, Qt.DisplayRole)\n editor.setText(date.strftime(self.format))\n return editor", "def testSetEditorValue(self):\r\n \r\n lineEdit = QtGui.QLineEdit()\r\n self._editorFactory.setEditorValue(lineEdit, u\"Test\")\r\n self.assertTrue(lineEdit.text() == u\"Test\" )\r\n \r\n spinBox = QtGui.QDoubleSpinBox()\r\n self._editorFactory.setEditorValue(spinBox, 2.05)\r\n self.assertTrue(spinBox.value() == 2.05)\r\n \r\n checkBox = QtGui.QCheckBox()\r\n self._editorFactory.setEditorValue(checkBox, True)\r\n self.assertTrue(checkBox.isChecked() == True)", "def edit():", "def setValue(self,val):\n val = str(val)\n if self._plain:\n self.input.setText(val)\n else:\n updateText(self.input,val)", "def set_initial(self, value):\n # TODO: Make an Initial Stock Adjust here\n pass", "def setContentData(self, content):\n original = content\n if IVersionedObject.providedBy(original):\n content = original.get_editable()\n if content is None:\n self.widgetFactoryFactory = SMIDisplayWidgetFactory\n content = original.get_previewable()\n\n super(SMIEditForm, self).setContentData(content)", "def initFormat(self):\n self.html = True", "def initFormat(self):\n self.html = True", "def setValue(self,val):\n val = int(val)\n self.input.setText(str(val))", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def setEditorData(self, ledit, midx):\n cond = self._sel.give_cond(midx.row())\n val = cond[midx.column()]\n txt = \"\"\n if val is not None:\n txt = str(val)\n ledit.setText(txt)", "def setEditorData(self, ledit, midx):\n cond = self._sel.give_cond(midx.row())\n val = cond[midx.column()]\n txt = \"\"\n if val is not None:\n txt = str(val)\n ledit.setText(txt)", "def edition(self, key, value):\n return clean_val(\"a\", value, str).replace(\"ed.\", \"\")", "def on_widget_edited(self, value): # this is a slot\n # note this is exactly the same as @value.setter...\n self.value = value", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def update_format_string(self):\n if self._show_units:\n units = \" {}\".format(self._unit)\n else:\n units = \"\"\n\n if self._show_step_exponent:\n self.setSuffix(\"{0} Step: 1E{1}\".format(units, self.step_exponent))\n self.lineEdit().setToolTip(\"\")\n else:\n self.setSuffix(units)\n self.lineEdit().setToolTip('Step: 1E{0:+d}'.format(self.step_exponent))", "def __set__(self, instance, value):\n # make sure value follows \"major,minor,build\" convention\n if not is_version_valid(value):\n raise InvalidVersionFormat(\"Version: {0} is invalid\".format(value))\n\n super().__set__(instance, value)", "def setContentData(self, content):\n original = content\n if IVersionedObject.providedBy(original):\n content = original.get_editable()\n if content is None:\n self.widgetFactoryFactory = SMIDisplayWidgetFactory\n content = original.get_previewable()\n\n super(SMISubEditForm, self).setContentData(content)", "def initFormat(self):\n self.formatList = self.splitText(self.format)", "def opt_format(self, fmt):\n key = get_enum_key(fmt, FORMATTERS)\n if key is not None:\n self.conf[\"format\"] = key\n print(\"Set format %r\" % key)\n else:\n print(\"Unknown format %r\" % fmt)", "def setValue(self,val):\n self.input.setText(str(val))", "def special_case(self):\n Input.clear_display(self, self.entries[4])\n self.entries[4].insert(INSERT, '1712/02/30 was a real date in Sweden')\n self.entries[4].configure(state='readonly')", "def _set_real_format(self, fmt):\n # try to use the _nomax variant if available\n if not self._max and fmt + '_nomax' in self.formats:\n self._format = self.formats[fmt + '_nomax']\n elif fmt in self.formats:\n self._format = self.formats[fmt]\n else:\n self._format = fmt\n\n self._format_line_count = self._format.count('\\n')", "def updateeng(self):\n self.enstr = self.enEdit.text()", "def format_cell_updated(self, cell, value=None):\n self.is_not_used()\n if value is not None:\n cell.value = value\n\n cell.fill = PatternFill(start_color='7fffd4', end_color='7fffd4', fill_type='solid')\n cell.font = Font(name='Ubuntu', size=11, color='555555', bold=False, italic=False)", "def dummy():\n\t\t\tself.edit = True", "def convert_format(self, new_format):\n if new_format not in [0, 1, 2, 3]:\n raise ValueError(\"Unknown format specified\")\n\n inp_format = new_format\n if inp_format == 3:\n new_format = 2\n\n for block in self.frd.blocks:\n if hasattr(block, 'format'):\n block.format = new_format\n\n self.frd.node_block.format = inp_format", "def setValue(self,val):\n val = float(val)\n self.input.setText(str(val))", "def entry_a_modified(self, content):\n if content.isdigit():\n self.model.number_a = int(content)\n self.show_calculations()", "def _init_edit(self):\n def edit(core, args):\n month = ' '.join(getattr(args, 'month', []))\n core.edit(month)\n\n usage = 'stl edit [month]'\n desc = (\n 'lets you vim the right file'\n )\n\n subp = self.subparsers.add_parser(\n 'edit', usage=usage, description=desc, help=desc)\n\n subp.add_argument(\n 'month', nargs=argparse.REMAINDER,\n help='the month you want to edit, e.g. oct 2016')\n\n subp.set_defaults(func=edit)", "def set_version(self, bundle, ctx, filename, version):", "def __init__(self, value: str):\n self.options = [\n \"v1.0\"\n ]", "def change_exteditor(self):\r\n path, valid = QInputDialog.getText(self, self.tr('External editor'),\r\n self.tr('External editor executable path:'),\r\n QLineEdit.Normal,\r\n CONF.get(self.ID, 'external_editor/path'))\r\n if valid:\r\n CONF.set(self.ID, 'external_editor/path', unicode(path))", "def set_version(v):\n old = get_version()\n sys.stderr.write('%s -> %s\\n' % (old, v))\n with open(INIT, 'r+') as f:\n text = f.read()\n text = pattern.sub(\"__version__ = %r\" % v, text)\n f.seek(0)\n f.truncate()\n f.write(text)", "def _init_obo_version(self, line):\n if line[0:14] == \"format-version\":\n self.format_version = line[16:-1]\n if line[0:12] == \"data-version\":\n self.data_version = line[14:-1]", "def reformat():\n toolkit.reformat()", "def _on_changed(self, entry, index):\r\n\r\n from re import sub\r\n from decimal import Decimal\r\n\r\n if index == 5:\r\n _text = entry.get_text()\r\n _text = Decimal(sub(r'[^\\d.]', '', _text))\r\n elif index in [16, 17]:\r\n _text = int(entry.get_text())\r\n else:\r\n _text = float(entry.get_text())\r\n\r\n self._modulebook.update(index, _text)\r\n\r\n return False", "def edit_text(self, _, val):\n t_edit = text_editor.TextEditor(val, \"value\")\n t_edit.execute = self.execute", "def format(self):\n ...", "def _update_default(self, default_value):\n if self.type == \"uri_folder\" or self.type == \"uri_file\":\n self.default = default_value\n return\n else:\n if isinstance(default_value, float) and not math.isfinite(default_value):\n # Since nan/inf cannot be stored in the backend, just ignore them.\n # logger.warning(\"Float default value %r is not allowed, ignored.\" % default_value)\n return\n \"\"\"Update provided default values.\n Here we need to make sure the type of default value is allowed or it could be parsed..\n \"\"\"\n if default_value is not None and not isinstance(default_value, self._allowed_types):\n try:\n default_value = self._parse(default_value)\n except Exception as e:\n if self.name is None:\n msg = \"Default value of %s Input cannot be parsed, got '%s', type = %s.\" % (\n self.type,\n default_value,\n type(default_value),\n )\n else:\n msg = \"Default value of %s Input '%s' cannot be parsed, got '%s', type = %s.\" % (\n self.type,\n self.name,\n default_value,\n type(default_value),\n )\n raise MldesignerComponentDefiningError(cause=msg) from e\n self.default = default_value", "def edit_date(entry):\n entry.date = get_date()\n entry.save()\n input(\"Edit successful. \")\n return entry", "def setDefaultValue(self, value: int, extend_range: bool=False):\n self.setPlaceholderText(str(self.__validate(value, extend_range)))\n if self.getCurrentValue() is None:\n self.__commitValue()", "def set_field_value(index, value):\r\n elem = world.css_find('.metadata_edit div.wrapper-comp-setting input.setting-input')[index]\r\n elem.value = value\r\n elem.type(Keys.TAB)", "def _update_editor(self):\n root = self.model.data_list\n root.append(RowModel(name='', value=''))\n del root[-1]", "def edit(self, new_content: str) -> None:\n\n # YOUR CODE HERE\n self.content = new_content", "def setField(self, data):\n\t\tview = self.view\n\t\tview.sbAbstraccion.setValue(data['sbAbstraccion'])", "def update_column_format(self):\n pass", "def set_statement_default_value(self, value):\n self.set_value_into_input_field(self.statement_default_value_textbox_locator, value)\n self.click_element(self.statement_fields_bulk_edit_popup_title_locator)", "def rec_default(self):\n self.phase_triggers.setText('(0,1,320)')\n self.phase_min.setText('-1.57')\n self.phase_max.setText('1.57')", "def createEditor(self, parent, options, midx):\n ledit = qt.QLineEdit(parent)\n vmin, vmax = self._vrange\n dnb = self._decimals_nb\n ledit.setValidator(ValueValidator(vmin, vmax, dnb, ledit))\n return ledit", "def __init__(self, value: str):\n self.options = [\n \"m\",\n ]", "def setvalue(self,num,name,val):\n self.M.reconfigure(num,{name:float(val)})", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def with_default_format(self, other):\n return evolve(\n self,\n set_format=self.set_format or other,\n default_format=other\n )", "def _setEditorText(self, text):\n if self.__lineEditKind:\n self._editor.setText(text)\n else:\n self._editor.setEditText(text)\n if text and self._editor.findText(text) == -1:\n self._editor.insertItem(0, text)", "def set_default_mode(args):\n default_repr = parser_opts[args.inputFormat].default_representation\n if not args.representation:\n args.representation = default_repr\n if args.representation != default_repr:\n log.info(\"Will convert from %s -> %s representation\", default_repr, args.representation)\n else:\n log.info(\"Using default %s particle representation\", args.representation)", "def updateText(widget,text,format=''):\n # autorecognition\n if format not in ['plain','html','rest']:\n if type(text) is str and text.startswith('..'):\n format = 'rest'\n\n # conversion\n if format == 'rest' and pf.options.rst2html:\n html = utils.rst2html(text)\n if html[:10] == text[:10]:\n #print \"CONVERSION TO HTML FAILED\"\n text += \"\\n\\nNote: This reStructuredText is displayed as plain text because it could not be converted to html. If you install python-docutils, you will see this text (and other pyFormex messages) in a much nicer layout!\\n\"\n else:\n text = html\n\n # We leave the format undefined, because we are not sure\n # that the conversion function (docutils) is available\n # and always produces good results\n format = ''\n\n if format == 'plain':\n widget.setPlainText(text)\n elif format == 'html':\n widget.setHtml(text)\n else:\n # As a last rescue, try QT4's autorecognition\n widget.setText(text)", "def set_edits(self):\n self._window.input_line.setPlaceholderText('Input item to import')\n self._window.output_text.setPlaceholderText('Import Item')", "def reset(self):\n self.setPlainText(self.label)\n self.setEditable(False)\n if (len(str(self.label)) > 0):\n self.setTextWidth(-1)\n else:\n self.setTextWidth(CurrentTheme.VERSION_LABEL_MARGIN[0])\n \n if self.isTag:\n self.setFont(CurrentTheme.VERSION_FONT)\n else:\n self.setFont(CurrentTheme.VERSION_DESCRIPTION_FONT) \n self.updatePos()\n self.parentItem().updateWidthFromLabel()", "def initFormat(self):\n self.formatList = []", "def update(self, instance: Snippet, validated_data: dict) -> Snippet:\n instance.title = validated_data.get('title', default=instance.title)\n instance.code = validated_data.get('code', default=instance.code)\n instance.language = validated_data.get('language', default=instance.language)\n instance.style = validated_data.get('style', default=instance.style)\n instance.save()\n return instance", "def entry_b_modified(self, content):\n if content.isdigit():\n self.model.number_b = int(content)\n self.show_calculations()", "def setCurrentValue(self, value: int, extend_range: bool=False):\n self.setText(str(self.__validate(value, extend_range)))\n self.__commitValue()", "def asformat(self, format):", "def __init__(self, value: str):\n self.options = [\n \"mg.min.m-3\",\n \"kg.s.m-3\"\n ]", "def str_entered(self, tf, name):\n section, option = name\n text = tf.text\n _stash.config.set(section, option, text)\n self.save()", "def getInitDefault(self):\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)", "def __init__(self, value: str):\n self.options = [\n \"m3.s-1\",\n ]", "def get_initial(self):\n\t\n\t#Getting the initial data and setting it\n initial = super(UpdateView, self).get_initial()\n\timage_ref = default_value.get_setting('compute', 'image_ref') \n flavor_ref = default_value.get_setting('compute', 'flavor_ref')\n initial.update({'test_id': self.kwargs['test_id'], 'image_ref': image_ref, 'flavor_ref': flavor_ref})\n return initial", "def __editorConfigChanged(self, editor):\n fn = editor.getFileName()\n line, pos = editor.getCursorPosition()\n enc = editor.getEncoding()\n lang = editor.getLanguage()\n eol = editor.getEolIndicator()\n zoom = editor.getZoom()\n self.__setSbFile(\n fn, line + 1, pos, encoding=enc, language=lang, eol=eol, zoom=zoom)\n self._checkActions(editor, False)", "def set_modified(self, value):\n self.modified = value\n self.save_button.setEnabled(value)", "def set_modified(self, value):\n self.modified = value\n self.save_button.setEnabled(value)", "def DoEdit(self,event):\r\n raise UncodedError", "def rec_default(self):\n self.new_func_triggers.setText('(0,5)')\n self.new_param.setText('1')", "def __init__(self, value: str):\n self.options = [\n \"kg.m-3\"\n ]", "def setValue(self, value):\n self.setText(str(value))", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def edit(self):\n _, tmp = tempfile.mkstemp()\n with open(tmp, 'w') as f:\n f.write(\"\".join([x + self.newline for x in self.buffer]))\n cledit = os.getenv('EDITOR') or 'vi'\n p = subprocess.Popen([cledit, tmp])\n p.wait()\n buffer = editor.contents(tmp)\n if not buffer:\n return\n else:\n self.buffer = buffer", "def format_(self):\n return self.set_format or self.default_format or self.FALLBACK_FORMAT", "def get_model_format_version(self):\n return None if self.model is None else self.model.get_format_version()", "def edit(self):\n\n pass", "def set_value_to_default(self):\n self.setValue(self.default_value)", "def testGetValueFromEditor(self):\r\n \r\n lineEdit = QtGui.QLineEdit()\r\n lineEdit.setText(QtCore.QString(u\"TestValue\"))\r\n self.assertEquals(self._editorFactory.getValueFromEditor(lineEdit), u\"TestValue\")\r\n \r\n lineEdit = QtGui.QLineEdit()\r\n lineEdit.setText(QtCore.QString(u\"\"))\r\n self.assertEquals(self._editorFactory.getValueFromEditor(lineEdit), None)\r\n \r\n spinBox = QtGui.QDoubleSpinBox()\r\n spinBox.setValue(23.04)\r\n self.assertEquals(self._editorFactory.getValueFromEditor(spinBox), 23.04)\r\n \r\n checkBox = QtGui.QCheckBox()\r\n checkBox.setChecked(True)\r\n self.assertTrue(self._editorFactory.getValueFromEditor(checkBox))\r\n \r\n comboBox = QtGui.QComboBox()\r\n comboBox.addItems([u\"test1\"])\r\n self.assertEquals(self._editorFactory.getValueFromEditor(comboBox), u\"test1\")\r\n \r\n listEditor = ListEditor(dict(), self._editorFactory, [\"test\"])\r\n self.assertEquals(self._editorFactory.getValueFromEditor(listEditor), [\"test\"])\r\n \r\n listEditor = ListEditor(dict(), self._editorFactory)\r\n self.assertEquals(self._editorFactory.getValueFromEditor(listEditor), list())", "def run(self, edit, text):\n\n self.view.replace(edit, sublime.Region(0, self.view.size()), text)", "def update_editor ( self ):\n super( SimpleFontEditor, self ).update_editor()\n set_font( self )" ]
[ "0.62732863", "0.62497866", "0.62184155", "0.6095079", "0.6085302", "0.6077205", "0.6007599", "0.57824373", "0.5620094", "0.552909", "0.54974353", "0.5490214", "0.5446925", "0.54123217", "0.53948015", "0.5381172", "0.53762734", "0.53609794", "0.5341126", "0.5302158", "0.5288958", "0.5286798", "0.5286798", "0.5260249", "0.52585703", "0.5220923", "0.5220923", "0.5215406", "0.5184883", "0.5160404", "0.5160355", "0.5157669", "0.5153179", "0.5149135", "0.514768", "0.5142775", "0.51369023", "0.51288533", "0.5113015", "0.50921416", "0.5085255", "0.5083624", "0.5083354", "0.50803584", "0.50729734", "0.5071114", "0.5067409", "0.50628376", "0.50589025", "0.50547606", "0.50457704", "0.5044692", "0.5022302", "0.50209916", "0.50121856", "0.50043076", "0.50010085", "0.49887383", "0.4988685", "0.4977725", "0.4976572", "0.4964932", "0.49637115", "0.49615422", "0.49329138", "0.49287996", "0.4919903", "0.49131012", "0.49042293", "0.48960185", "0.4895653", "0.4894716", "0.48913637", "0.48819718", "0.4881316", "0.48727578", "0.4866343", "0.48659226", "0.48583668", "0.4853335", "0.484986", "0.48493484", "0.48425078", "0.48310372", "0.48302823", "0.48285818", "0.48285818", "0.4827216", "0.48237178", "0.4816396", "0.4815449", "0.4815419", "0.48102328", "0.4807332", "0.48071593", "0.48043004", "0.48006028", "0.47942585", "0.47926039", "0.4791219" ]
0.6298632
0
Return initial value in edit format, found in edit format option
def getEditInitDefault(self): if self.initDefault in DateFormat.dateStampStrings: return DateFormat.dateStampStrings[1] return TextFormat.getEditInitDefault(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEditInitDefault(self):\n return self.formatEditText(self.initDefault)[0]", "def getEditInitDefault(self):\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def setInitDefault(self, editText):\n self.initDefault = self.storedText(editText)[0]", "def setInitDefault(self, editText):\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def setInitDefault(self, editText):\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def on_edit_changed(self, edit):\n\t\tself.emit('value-changed', edit.get_text())", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def retrieve_input():\r\n inputValue = simpleText.get(\"1.0\",\"end-1c\") #Our Variable\r\n #\"1.0\" = start from first character in the text widget\r\n #\"end-1c = delete the last character that Text creates every time\"\r\n return inputValue", "def get_initial(self):\n\t\treturn self.initial", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def get_val_str(self):\n fmt_str = self.template.get_format_str()\n if self.val_obj is None:\n return \"\"\n elif fmt_str:\n return fmt_str % (self.val_obj.val)\n else:\n return str(self.val_obj.val)", "def get_opt_formatted(self, command):\n if \"formatted\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"formatted\"]\n else:\n return CommandDict.DEFAULT_OPT_FORMATTED", "def formatsrc(self):\n return self[\"formatsrc\"]", "def formatsrc(self):\n return self[\"formatsrc\"]", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def get_initial(self):\n return self.initial", "def getInitDefault(self):\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)", "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def format(self):\n return self.getparam(\"FORMAT\")", "def format(self):\n return self.getparam(\"FORMAT\")", "def _getAlterToFormat(cls, alter):\n if alter == '':\n alter = ['', '']\n if isinstance(alter, str): # nothing to do if it is dict\n alter = ['', alter]\n return alter", "def value(self):\n return str(self.input.currentText())", "def presentation(self, value):\r\n return value", "def default_formatter(self, data):\n return data", "def get_format(self):\n return self._format[0]", "def initial_value(self):\n return self._initial_value", "def get_value_display(self):\r\n if self.display_as == 'percentage':\r\n return '{0}%'.format(self.latest_value)\r\n if self.display_as == 'boolean':\r\n return bool(self.latest_value)\r\n if self.display_as == 'byte':\r\n return defaultfilters.filesizeformat(self.latest_value)\r\n if self.display_as == 'second':\r\n return time.strftime('%H:%M:%S', time.gmtime(self.latest_value))\r\n return self.latest_value", "def edition(self, key, value):\n return clean_val(\"a\", value, str).replace(\"ed.\", \"\")", "def format_(self):\n return self.set_format or self.default_format or self.FALLBACK_FORMAT", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def get_format(self):\n pass", "def get_value ( self, object ):\n try:\n if self.format_func is not None:\n return self.format_func( self.get_raw_value( object ) )\n\n return self.format % ( self.get_raw_value( object ), )\n except:\n logger.exception( 'Error occurred trying to format a %s value' %\n self.__class__.__name__ )\n return 'Format!'", "def format(self) -> str:", "def int_format(self):\n ...", "def get_input_data(input_section: Dict) -> str:\n default_value = input_section.get(\"value\")\n if isinstance(default_value, str):\n return default_value\n\n if default_value:\n complex_field = default_value.get(\"complex\")\n if complex_field:\n if complex_field.get(\"accessor\"):\n return f\"{complex_field.get('root')}.{complex_field.get('accessor')}\"\n else:\n return f\"{complex_field.get('root')}\"\n return default_value.get(\"simple\")\n\n return \"\"", "def _get_field_edit_widget(self, row_index):\n field_row = self.field_rows[row_index]\n if not field_row.editable:\n raise TypeError(\"Cannot edit a boolean or dropdown field. (Internal error, tell the developer!)\")\n field_type = field_row.field_type\n field_value = self.get_field_dict(self.get_entry_id(self.active_row_index))[field_row.field_name]\n initial_text = repr(sorted(field_value)) if issubclass(field_type, list) else str(field_value)\n return self.Entry(\n field_row.value_box,\n initial_text=initial_text,\n integers_only=field_type == int,\n numbers_only=field_type == float,\n sticky=\"ew\",\n width=5,\n )", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def get_value ( self, object ):\n try:\n value = getattr( object, self.name )\n try:\n return self.format % ( value, )\n except:\n return 'Format!'\n except:\n return 'Undefined!'", "def value(self):\n self.refresh_default_value()\n return self.default_value", "def initial(self):\n return self.args[3]", "def asformat(self, format):", "def _format_default_value(self, default):\n return json.dumps(default)", "def getValue(self):\n return self.field.currentText()", "def format(self):\n return self._format", "def createEditor(self, parent, option, index):\n editor = QLineEdit(parent)\n date = index.model().data(index, Qt.DisplayRole)\n editor.setText(date.strftime(self.format))\n return editor", "def _getDefaultValue(self):\n value = self._getDefaultValue()\n return value.getData() if value else None", "def _getAlter(self):\n return self._getAlterToFormat(self.attr('alter'))", "def format( self ) :\n\n return( self.__format )", "def format(self):\n return self[\"format\"]", "def format(self):\n return self[\"format\"]", "def display_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"display_value\")", "def display_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"display_value\")", "def initFormat(self):\n pass", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def format(self) -> str:\n return pulumi.get(self, \"format\")", "def default_from(self):\n\n return \"\"", "def value_unformatted(self):\n return self._unformated_value", "def get_prep_value(self, value):\n\n try:\n return value.isoformat()\n except:\n pass\n\n # maybe value is a string containing a PartialDate?\n try:\n pd = string_to_partialdate(value)\n return pd.isoformat()\n except:\n return ''", "def default(self):\n\n return self._get_field(\"value\")", "def special_case(self):\n Input.clear_display(self, self.entries[4])\n self.entries[4].insert(INSERT, '1712/02/30 was a real date in Sweden')\n self.entries[4].configure(state='readonly')", "def format(self):\n ...", "def __str__(self):\n return '[{0}, {1}]'.format(self.timeValuePairs, self.defaultValue)", "def get_value( self, trans, grid, repository ):\n select_field = grids_util.build_changeset_revision_select_field( trans, repository, downloadable=False )\n if select_field.options:\n return select_field.options[ 0 ][ 0 ]\n return ''", "def format_field(model, name, value):\n if value is None: return value\n t = type( getattr(model,name) )\n if t == datetime:\n return value.replace('T',' ')\n return value", "def update_format_string(self):\n if self._show_units:\n units = \" {}\".format(self._unit)\n else:\n units = \"\"\n\n if self._show_step_exponent:\n self.setSuffix(\"{0} Step: 1E{1}\".format(units, self.step_exponent))\n self.lineEdit().setToolTip(\"\")\n else:\n self.setSuffix(units)\n self.lineEdit().setToolTip('Step: 1E{0:+d}'.format(self.step_exponent))", "def _get_FIELD_display(self, field):\n value = getattr(self, field.attname)\n if value is None:\n return\n template = ''\n template += '{:d}' if field.decimals == 0 else '{:.%sf}' % field.decimals\n template += ' ' if field.spaced_display else ''\n template += '{!s:s}'\n return template.format(value, field.unit)", "def get_value(self, key, args, kwargs):\n if self.default is not None:\n try:\n return string.Formatter.get_value(self, key, args, kwargs)\n except KeyError:\n return self.default\n else:\n return string.Formatter.get_value(self, key, args, kwargs)", "def get_value( self, trans, grid, repository ):\n # A repository's metadata revisions may not all be installable, as some may contain only invalid tools.\n select_field = grids_util.build_changeset_revision_select_field( trans, repository, downloadable=False )\n if len( select_field.options ) > 1:\n return select_field.get_html()\n elif len( select_field.options ) == 1:\n option_items = select_field.options[ 0 ][ 0 ]\n rev_label, rev_date = option_items.split( ' ' )\n rev_date = '<i><font color=\"#666666\">%s</font></i>' % rev_date\n return '%s %s' % ( rev_label, rev_date )\n return select_field.options[ 0 ][ 0 ]\n return ''", "def edit():", "def get_value( self, trans, grid, repository ):\n select_field = grids_util.build_changeset_revision_select_field( trans, repository, downloadable=True )\n if len( select_field.options ) > 1:\n return select_field.get_html()\n elif len( select_field.options ) == 1:\n return select_field.options[ 0 ][ 0 ]\n return ''", "def get_new_value(self):\r\n if self.initial_value is None:\r\n return None\r\n\r\n return deepcopy(self.initial_value)", "def value(self):\n return str(self.input.text())", "def fmt_option_val(option):\n if option is None:\n return \"\"\n return str(option)", "def getInitDefault(self):\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)", "def get_default_value(self):\n pass", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def get_one(self, *args, **kw):\n #this would probably only be realized as a json stream\n tmpl_context.widget = self.edit_form\n pks = self.provider.get_primary_fields(self.model)\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n value = self.edit_filler.get_value(kw)\n return dict(value=value,model=self.model.__name__)", "def getValue(self):\n return self.initValue", "def get_note_value(self):\n return f\"{self.first_name} {self.last_name}\"", "def _get_nullformat(self, newformat):\n if self._type == int:\n length = len(str(newformat % 1))\n return '%'+str(length)+'s'\n elif self._type == float:\n length = len(str(newformat % 1.0))\n return '%'+str(length)+'s'\n else:\n return newformat", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def set_format(cls,format):\n import __main__\n IP = __main__.__dict__['__IP']\n prompt = getattr(IP.outputcache,cls._prompt)\n prompt.p_template = format\n prompt.set_p_str()\n cls._format = format", "def value(self):\n s = str(self.input.toPlainText())\n if self._is_string_:\n return s\n else:\n return eval(s)", "def get_initial(self):\n return self.initial[:]", "def value(self):\n value = super(SpeciesListFilter, self).value()\n if value is None:\n if self.default_value is None:\n first_species = Book.objects.order_by('title').first()\n value = None if first_species is None else first_species.id\n self.default_value = value\n else:\n value = self.default_value\n return str(value)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def render_input(env_spec_entry):\n default_value = env_spec_entry[\"default_value\"]\n default_value_state = f'value=\"{default_value}\"' if default_value else \"\"\n\n env_spec_entry_input = (\n f'<input id=\"env_spec_{env_spec_entry[\"name\"].lower()}\" '\n f'name=\"{env_spec_entry[\"name\"].lower()}\" type=\"{env_spec_entry[\"type\"]}\" '\n f'{default_value_state}\" />\\n'\n )\n return env_spec_entry_input", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def format_default(reg):\n\t\tif reg.size == \"accum\":\n\t\t\treturn str(float(reg.default)) + \"k\"\n\t\telse:\n\t\t\treturn str(int(reg.default)) + \"L\"", "def field_value(self):\n return \"{}_{}\".format(self.place.id, self.line_location)", "def Value(self) -> str:", "def get_display_value(self):\n\n\t\treturn self.__display_value", "def default_field_formatter(variable_name: str, field: Field) -> str:\n return \"{{ \" + f\"form.{variable_name}\" + \" }}\"", "def adjust(self):\n if self._adjust is None:\n return \"\"\n return self._adjust" ]
[ "0.7327655", "0.6777391", "0.6163817", "0.5962609", "0.59190995", "0.5825621", "0.5639453", "0.55958575", "0.5588548", "0.55880916", "0.55728984", "0.5547174", "0.55372924", "0.5518307", "0.55125266", "0.54999983", "0.54888153", "0.54888153", "0.54887563", "0.5471209", "0.5466419", "0.545412", "0.54069316", "0.54069316", "0.53839904", "0.5377033", "0.53640187", "0.53620666", "0.5355793", "0.5349789", "0.5343258", "0.5341933", "0.533963", "0.53153485", "0.53153485", "0.5288899", "0.5269197", "0.5263535", "0.5258517", "0.5249449", "0.52473253", "0.5231447", "0.52152646", "0.5197711", "0.5186485", "0.51838565", "0.51748896", "0.5161524", "0.51536065", "0.51321536", "0.51240325", "0.51217145", "0.5121519", "0.5117097", "0.5117097", "0.51054955", "0.51054955", "0.5105423", "0.5100754", "0.50976294", "0.5095106", "0.50944275", "0.50924397", "0.508725", "0.5084116", "0.5081867", "0.50796103", "0.5072638", "0.5071877", "0.5069576", "0.50672466", "0.50647104", "0.5051511", "0.5050451", "0.50481766", "0.5046123", "0.49976718", "0.49971545", "0.49888214", "0.49814808", "0.4975419", "0.4968391", "0.4959143", "0.49534622", "0.4947233", "0.49469805", "0.4945034", "0.49448776", "0.4943174", "0.49430725", "0.493905", "0.49366254", "0.49283075", "0.49213216", "0.4913436", "0.49123442", "0.49113816", "0.49082282", "0.49070314", "0.4901602" ]
0.69168735
1
Return a list of choices for setting the init default
def initDefaultChoices(self): choices = [entry[0] for entry in self.getEditChoices()] choices.insert(0, DateFormat.dateStampStrings[1]) return choices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initDefaultChoices(self):\n return []", "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def initDefaultChoices(self):\n return [text for text in self.formatList]", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def as_choices():\n return (\n # Live is currently disabled as a choice\n # pending implementation\n (\"live\", \"Use working directory\"),\n (\"latest\", \"Use latest snapshot\"),\n (\"pinned\", \"Pinned to snapshot\"),\n )", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices", "def get_choices(cls):\n return cls.values.items()", "def __init__(self,choices,caption='ListSelection',default=[],single=False,check=False,sort=False,*args,**kargs):\n InputDialog.__init__(self,caption=caption,items = [\n dict(name='input',value=default,itemtype='list',choices=choices,\n text='',single=single,check=check,sort=sort,*args,**kargs),\n ],)", "def choices(self):\n return tuple(self._choices)", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def multiple_choice():\n\n return [\"MAR\", \"MAR\", \"NI\", \"NI\", \"MCAR\"]", "def choices(cls):\n return tuple(item.as_tuple for item in list(cls))", "def as_choices(cls, key_type=None):\n if key_type is None:\n key_type = cls.get_default_choice_type()\n return cls.enum_class.as_choices(key_type)", "def _set_default_suits(self):\n # set up suits\n suit_types = [('Spades', 1), ('Hearts', 2), ('Diamonds', 3), ('Clubs', 4)]\n # populate the list of suits\n suit_list = list()\n for s in suit_types:\n suit_list.append(Suit(s[0], s[1]))\n\n return suit_list", "def choices(self) -> list:\n return [self.mapper(item) for item in self]", "def _resolve_defaults(self, **kwargs):\n res = list()\n for name, value in kwargs.items():\n if value is None:\n value = self.default(name)\n if value is None:\n raise RuntimeError(f\"Missing default {name}\")\n res.append(value)\n return res", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def get_setting_choices(cls, key, **kwargs):\n setting = cls.get_setting_definition(key, **kwargs)\n\n choices = setting.get('choices', None)\n\n if callable(choices):\n # Evaluate the function (we expect it will return a list of tuples...)\n return choices()\n\n return choices", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def get_template_base_dir_choices() -> list[tuple[str, str]]:\n # handle predefined choices\n choices, seen = [], set()\n for template_name in TemplateName:\n choices.append((template_name.value, template_name.label))\n seen.add(template_name.value)\n\n # handle custom choices via settings\n for template_name, display_name in getattr(settings, \"CAST_CUSTOM_THEMES\", []):\n if template_name not in seen:\n choices.append((template_name, display_name))\n seen.add(template_name)\n\n # search for template base directories\n template_directories = get_template_directories()\n template_base_dir_candidates = get_template_base_dir_candidates(template_directories)\n for candidate in template_base_dir_candidates:\n if candidate not in seen:\n choices.append((candidate, candidate))\n\n return choices", "def create_default_repo_choice(self, default_repo):\n return (default_repo, default_repo)", "def initialise_options():\r\n default_options = list(range(NUMBER_OF_TILES))\r\n default_weights = [1/NUMBER_OF_TILES]*NUMBER_OF_TILES\r\n return default_options, default_weights", "def default_controls(self):\n\t\tcontrol_list = []\n\t\tconfig = ConfigParser.ConfigParser()\n\t\tconfig.read(\"./config.ini\")\n\t\tcontrols = config.options(\"default_controls\")\n\t\tfor c in controls:\n\t\t\ttry: control_list.append( config.get(\"default_controls\", c) )\n\t\t\texcept:\n\t\t\t\tprint \"ERROR: missing control settings. Check config.ini.\"\n\t\t\t\traise(SystemExit)\n\t\treturn control_list", "def default_variation(random, candidates, args):\r\n return candidates", "def default_variation(random, candidates, args):\r\n return candidates", "def get_default_options():\n return GROUPS_.values()", "def __init__(self, *initial):\n self.prompt_list = list(initial)", "def get_choices_new_protected():\n ret = []\n ret.append( (1, _(u'Nur Community-Mitglieder dürfen neue Beiträge leisten')) )\n ret.append( (-1, _(u'Offener Zugang')) )\n return ret", "def choices(self, var):\r\n return (self.curr_domains or self.domains)[var]", "def choices(self, choices):\n\n self._choices = choices", "def get_choices_for_var(self, var):\n return self.choices[var]", "def get_options(self):\n return []", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def set_defaults(self):\r\n for name, option in self.options.iteritems():\r\n if not option.is_required():\r\n self.set_value(name, option, option.default)", "def default_value_list(sources: List[str] = None):\n if not default:\n return list()\n if not sources:\n return [default]\n else:\n return sources", "def _get_target_choices():\n apps = [('public', _(\"Public website\"))]\n for model, entity in registry.registry.items():\n if entity.menu:\n appname = model._meta.app_label.lower()\n apps.append((appname, unicode(entity.label)))\n return tuple(apps)", "def get_generic_choices(model, key, allow_null=False):\n CHOICES = [('', '--------')] if allow_null else []\n for i in model.objects.all().values_list(key, flat=True).distinct():\n CHOICES.append((str(i), str(i)))\n CHOICES.sort()\n\n return CHOICES", "def get_default_is_selected_index(self, choicesdata):\n\n return 0", "def __init__(self, choices, *args, **kwargs):\n super(RangePollChoiceForm, self).__init__(*args, **kwargs)\n nominees = [(i, '%d' % i) for i in range(0, choices.count()+1)]\n for choice in choices:\n self.fields['range_poll__%s' % str(choice.id)] = (\n forms.ChoiceField(widget=forms.Select(),\n choices=nominees,\n label=choice.nominee.get_full_name()))", "def _create_defaults(self):\n return DefaultCommandOptionValues(\n min_confidence=3, output_format='vs7')", "def create_options(self):\n return []", "def Choices(cls):\n attr = '_choice_attr_' + cls.__name__\n if hasattr(cls, attr):\n return getattr(cls, attr)\n\n choices = set()\n for (k, v) in cls.__dict__.items():\n if not k.startswith('_') and issubclass(type(v), (str, unicode)):\n choices.add(v)\n for base in cls.__bases__:\n if issubclass(base, ChoiceBase) and base is not ChoiceBase:\n choices = set.union(choices, base.Choices())\n setattr(cls, attr, choices)\n\n return choices", "def multiple_choices(self, choices, response):\n for elem in self.method_order:\n if elem in choices:\n return [elem]\n raise NoData", "def form_SelectChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(options)\n form['mySelect'].default = 2\n return form", "def season_choices():\n return [(s, s) for s in range(0, 3)]", "def is_a_list_of_choices(self):\n pass", "def setChoices(self, choices):\n self.getGtkObject('property_liststore').clear()\n for choice in choices:\n self.getGtkObject('property_liststore').append([str(choice)])", "def _get_choices ( self, context, path = '' ):\n choices = []\n gdc = context.get_data_context\n for name in context.data_contexts:\n next_path = path_for( path, name )\n choices.append( TemplateChoice( choice_value = next_path ) )\n choices.extend( self._get_choices( gdc( name ), next_path ) )\n \n return choices", "def choices(symbols, k):\n return [R.choice(symbols) for _ in range(k)]", "def episode_choices():\n return [(e, e) for e in range(0, 2)]", "def setAll(self):\n self.setValue(self._choices_)", "def configure_list_of_choices_type_question(self, question_data):\n self.driver.find_radio_button(LIST_OF_CHOICE_RB).click()\n index = 1\n for choice in fetch_(CHOICE, from_(question_data)):\n if index > 1:\n self.driver.find(ADD_CHOICE_LINK).click()\n self.driver.find_text_box(by_xpath(CHOICE_XPATH_LOCATOR + \"[\" + str(index) + \"]\" + CHOICE_TB_XPATH_LOCATOR)).enter_text(choice)\n index += 1\n choice_type = fetch_(ALLOWED_CHOICE, from_(question_data))\n if ONLY_ONE_ANSWER == choice_type:\n self.driver.find_radio_button(ONLY_ONE_ANSWER_RB).click()\n elif MULTIPLE_ANSWERS == choice_type:\n self.driver.find_radio_button(MULTIPLE_ANSWER_RB).click()\n return self", "def get_init_list(self):\n\n return self.convert_compartments_to_list(self.init_compartments)", "def __init__(self, choiceList=None, prompt=DEFAULT_PROMPT, title=DEFAULT_TITLE):\n self.choice = None\n \n wpf.LoadComponent(self, GUI_XAML_FILE)\n \n self.Title = title\n self.lblPrompt.Content = prompt\n \n self.choicesBox.ItemsSource = choiceList", "def initDefaults(self):\n return _libsbml.Species_initDefaults(self)", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def _get_choices_str(self):\n return ', '.join(\n '\"%s\"' % choice\n for choice in self.choices\n )", "def choices(self):\n self._choices = self.getChoices()\n return len(self._choices)", "def get_defaults(self):\n\t\treturn self.__defaults", "def init_defaults(self, defaults):\r\n for (sect, opt, default) in defaults:\r\n self._default(sect, opt, default)", "def choices(self, cl):\n # TODO: Determine if non-static choices would be cleaner here.\n # Honestly, I tried a more generic version and it was even harder to\n # follow than this version.\n yield {\n 'selected': not (self.lookup_val_gte or self.lookup_val_lt),\n 'query_string': cl.get_query_string({}, [self.lookup_kwarg_gte,\n self.lookup_kwarg_lt]),\n 'display': 'All'\n }\n\n goal = settings.FACEBOOK_CLICK_GOAL\n yield {\n 'selected': self.lookup_val_gte and not self.lookup_val_lt,\n 'query_string': cl.get_query_string({self.lookup_kwarg_gte: goal},\n [self.lookup_kwarg_lt]),\n 'display': 'Yes'\n }\n yield {\n 'selected': self.lookup_val_lt and not self.lookup_val_gte,\n 'query_string': cl.get_query_string({self.lookup_kwarg_lt: goal},\n [self.lookup_kwarg_gte]),\n 'display': 'No'\n }", "def choices(self, typename, value_field='description'):\n rows = self.type(typename).values('id', value_field)\n return [(r['id'], r[value_field]) for r in rows]", "def test_get_prior_string_list(self):\n categories = list(range(10))\n categories[0] = \"asdfa\"\n categories[2] = \"lalala\"\n dim = Categorical(\n \"yolo\", categories, shape=2, default_value=[\"asdfa\", \"lalala\"]\n )\n assert dim.get_prior_string() == (\n \"choices(['asdfa', 1, 'lalala', 3, 4, 5, 6, 7, 8, 9], \"\n \"shape=2, default_value=['asdfa', 'lalala'])\"\n )", "def default_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"default_values\")", "def get_choices(self, cutoff=None):\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict(\n [\n (\n # This line below is modifed.\n item.pk,\n self.display_value(item),\n )\n for item in queryset\n ]\n )", "def choices(self, choices: Iterable[Tuple[str, str]]):\n try:\n iter(choices)\n except TypeError:\n raise TypeError(\"'choices' isn't a valid iterable\")\n\n apply_choices = []\n for i, (choice_id, choice_label) in enumerate(choices):\n apply_choices.append((str(choice_id), str(choice_label)))\n\n if len(apply_choices) < 2:\n raise ValueError(\"you need to specify at least two choices\")\n\n self._choices = apply_choices\n self.specific.refresh()\n self._selected = 0", "def choose_option(self, state):\n options = [o for o in self.options if o.initiation_set[state] == 1]\n return random.choice(options)", "def setUp(self):\n current_date = date.today()\n name = 'name'\n possible_meals = [Meal(date=current_date, name=name)]\n self.possible_meals_choices = [(possible_meal.id, possible_meal.name)\n for possible_meal in possible_meals]", "def all_options():\n return _OptionRegistry.values()", "def __init__(self, name, attrs={}):\n ChoiceFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n ChoiceFormat.__init__(self, name, attrs)", "async def choices(self, ctx, *, options):\n choices = options.split('-')\n choice = random.choice(choices)\n await ctx.send(f'My choice is\\\"{choice}\\\"')", "def get_chosen_options(user):\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n ocl = eval(user_application.options_selected)\n chosen_options = []\n for oc in ocl:\n chosen_options.append(Option.objects.get(opt_code=int(oc))) \n return chosen_options", "def initialize_options(self):", "def initDefaults(self):\n return _libsbml.Reaction_initDefaults(self)", "def get_options(self):\r\n return self._option_values", "def calendar_choices(self):\n if not self._calendars:\n if self.authenticated:\n default = self.account.schedule().get_default_calendar()\n # {\n # \"default\" : <DEFAULT_CALENDAR>,\n # \"<CALENDAR_NAME>: <CALENDAR>,\n # ...\n # }\n self._calendars = {\n DEFAULT_CALENDAR: default,\n **{\n c.name: c\n for c in self.account.schedule().list_calendars() if c.name != default.name\n }\n }\n\n return self._calendars", "def getOptionsNames(self) -> List[unicode]:\n ...", "def default_args(self) -> Optional[list[str]]:\n _args: list[Arg] = []\n _ctx = self._select(\"defaultArgs\", _args)\n return _ctx.execute_sync(Optional[list[str]])", "def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n # choicesdata = []\n # found_selected_value = False\n # for value, label in self.get_choices():\n # is_selected = value == selected_value\n # if is_selected:\n # found_selected_value = True\n # url = self.build_set_values_url(values=[value])\n # choicesdata.append({\n # 'url': url,\n # 'label': label,\n # 'is_selected': is_selected\n # })\n choicesdata, found_selected_value = self.__make_choicesdata_list(\n choices=self.get_choices(),\n selected_value=self.get_cleaned_value())\n if not found_selected_value and len(choicesdata) > 0:\n selected_index = self.get_default_is_selected_index(choicesdata=choicesdata)\n choicesdata[selected_index]['is_selected'] = True\n return choicesdata", "def available_binary_choices() -> Iterable[str]:\n for name, _ in inspect.getmembers(sys.modules[__name__], inspect.isclass):\n if name.startswith('Binary'):\n yield name", "def initDefaults(self):\n return _libsbml.Event_initDefaults(self)", "def form_CheckboxMultiChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('multiChoice', schemaish.Sequence(schemaish.Integer()))\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['multiChoice'].widget = formish.CheckboxMultiChoice(options)\n form['multiChoice'].default = [2]\n return form", "def default_selection(random, population, args):\r\n return population", "def form_SequenceOfStringsWithDefault(request):\n schema = schemaish.Structure()\n schema.add( 'myList', schemaish.Sequence( schemaish.String() ))\n\n form = formish.Form(schema, 'form')\n form.defaults = {'myList': ['a','b']}\n return form", "def test_model_choices_all_models(self):\n unique_action_admin = UniqueActionAdmin(UniqueAction, self.site)\n\n self.assertFalse(getattr(unique_action_admin, '_model_choices', False))\n\n model_choices = unique_action_admin.model_choices()\n\n self.assertTrue(getattr(unique_action_admin, '_model_choices'))\n self.assertTrue(isinstance(model_choices, list))", "def sel_prep(self):\n sel_blob = []\n for sel in self.blob['options']:\n if self.blob['defaultValue'] == sel['name']:\n sel_blob.append({'value': sel['name'], 'selected': 'true'})\n else:\n sel_blob.append({'value': sel['name'], 'selected': 'false'})\n\n return sel_blob", "def setChoices(self,report):\n\t\tif report is not None:\n\t\t\tbrowser = report[1]['objects']\n\n\t\t\tif browser is not None:\n\t\t\t\tbrowserChoices = list()\n\t\n\t\t\t\t#compute select list\n\t\t\t\tfor b in browser:\n\t\t\t\t\tif \"chrome\" in b['name'].lower():\n\t\t\t\t\t\tformString = constConfig.HISTORY_FORM_CHROME\n\t\t\t\t\telif \"firefox\" in b['name'].lower():\n\t\t\t\t\t\tformString = constConfig.HISTORY_FORM_FF\n\t\t\t\t\telif \"thunderbird\" in b['name'].lower():\n\t\t\t\t\t\tformString = constConfig.HISTORY_FORM_TH\n\n\t\t\t\t\tfor p in b['profiles']:\n\t\t\t\t\t\tformValue = str(formString)+\"_\"+p['profileName']\t\n\t\t\t\t\t\tbrowserChoices.append((formValue,b['name']+\" - \"+p['profileName']))\n\t\t\t\n\t\t\t\tch = forms.ChoiceField(label=\"Profile\",widget=forms.Select(attrs={'class':'form-control'}),choices=browserChoices)\n\t\t\t\tself.fields['choices'] = ch", "def test_default(self):\n for n in range(1, 5):\n for prefix in ['', 'git-', 'gbp-']:\n parser = GbpOptionParser('%scmd%d' % (prefix, n))\n self.assertEqual(parser.config['default_option'], 'default_default1')", "def form_SelectWithOtherChoiceDefault(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n options = [(1,'a'),(2,'b'),(3,'c')]\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectWithOtherChoice(options)\n form['mySelect'].default = 2\n return form" ]
[ "0.8790614", "0.83091384", "0.8089398", "0.74508727", "0.70026475", "0.70026475", "0.680855", "0.67124087", "0.6624441", "0.65727", "0.653437", "0.64892524", "0.6431678", "0.6409585", "0.63181144", "0.6240514", "0.6240365", "0.62128824", "0.6163351", "0.6163266", "0.6090951", "0.6066959", "0.6054693", "0.60396624", "0.60294706", "0.60005", "0.5994685", "0.59767413", "0.5959246", "0.592687", "0.59262645", "0.59103084", "0.590184", "0.5882532", "0.5882532", "0.5859008", "0.5836419", "0.58130014", "0.5798692", "0.5789201", "0.5760558", "0.57430315", "0.5737376", "0.5732801", "0.5709087", "0.56912374", "0.56849235", "0.56539017", "0.56511205", "0.5647366", "0.56454474", "0.56372243", "0.56312835", "0.5606162", "0.56021655", "0.5598892", "0.5586636", "0.55855805", "0.5569713", "0.55686575", "0.55556387", "0.554631", "0.5521598", "0.5510379", "0.54975694", "0.54883635", "0.54830474", "0.54726905", "0.54721117", "0.5445676", "0.5418068", "0.5402634", "0.5388768", "0.53883016", "0.5386314", "0.5381717", "0.53741294", "0.53630304", "0.53518015", "0.5349285", "0.5349285", "0.5348316", "0.53463614", "0.53438973", "0.53366566", "0.53365505", "0.53365135", "0.5332077", "0.53207636", "0.5319262", "0.53119504", "0.5300134", "0.5299081", "0.5287518", "0.52822554", "0.5277455", "0.5273564", "0.526702", "0.5266721", "0.5253591" ]
0.7564882
3
Return conditional comparison value with realtime adjustments, used for date and time types' 'now' value
def adjustedCompareValue(self, value): if value.startswith('now'): return repr(GenDate()) return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adjustedCompareValue(self, value):\n if value.startswith('now'):\n return repr(GenTime())\n return value", "def condition(self):\n HH = str(time.localtime().tm_hour)\n MM = str(time.localtime().tm_min)\n return eval(self._cond_str)", "def get_state_by_time(python_time):\n present = datetime.now()\n\n if python_time <= present:\n return 2\n else:\n return 1", "def now(self):\n return conditional_now() + self.timedelta(**self.now_shift_kwargs)", "def check(self, comparison, value, value_type, second_value=None):\n now = datetime.now()\n if value_type == \"WEEKDAY\":\n if comparison not in [\"NE\", \"E\", \"WEEKDAY\", \"WEEKEND\"]:\n raise Exception(f\"Comparison {comparison} \"\n \"not valid for WEEKDAY\")\n if comparison == \"E\":\n return now.weekday() == value\n elif comparison == \"NE\":\n return now.weekday() != value\n elif comparison == \"WEEKDAY\":\n return now.weekday() < 5 # ISO counts from 0\n else:\n return now.weekday() > 4 # so Sat,Sun are 5,6\n if value_type == \"DATE\":\n dt = datetime.strptime(value, DATE_FMT)\n dt = dt.date()\n now = now.date()\n elif value_type == \"TIME\":\n dt = datetime.strptime(value, TIME_FMT)\n dt = dt.time()\n now = now.time()\n else:\n dt = datetime.strptime(value, DATETIME_FMT)\n if comparison == \"LE\":\n return now <= dt\n elif comparison == \"E\":\n return now == dt\n elif comparison == \"GE\":\n return now >= dt\n # At this point, we're doing either IN or OUT, so read second time\n # format\n if value_type == \"DATE\":\n second = datetime.strptime(second_value, DATE_FMT)\n second = second.date()\n elif value_type == \"TIME\":\n second = datetime.strptime(second_value, TIME_FMT)\n second = second.time()\n else:\n second = datetime.strptime(second_value, DATETIME_FMT)\n if comparison == \"IN\":\n return now >= dt and now <= second\n elif comparison == \"OUT\":\n return now <= dt or now >= second", "def __gt__(self, other):\n return self.to_seconds() > other.to_seconds()", "def __gt__(self, other):\n return self.to_seconds() > other.to_seconds()", "def conditional_value(self) -> global___Expression.ConditionalOperator:", "def get(self):\n now = datetime.datetime.utcnow()\n if now > self.time_of_next_update:\n self._update_value()\n return self.value", "def set_when(day, today):\n if day < today:\n return \"past\"\n if day == today:\n return \"present\"\n return \"future\"", "def greater_than_or_equal(self) -> global___Expression:", "def __cmp__(self, other):\n return (self._cmp(self.seconds, other.seconds)\n or self._cmp(self.nanosecond, other.nanosecond))", "def __gt__(self, other):\n if self.date > other.date:\n return True\n else:\n return False", "def test_expression_dates(self):\n import datetime\n import time\n time1 = datetime.datetime.now()\n time.sleep(0.01)\n time2 = datetime.datetime.now()\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with integers\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at < time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time2 with models.Network.id=time1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at > time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time2 with models.Network.id=time1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at < time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time2}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at > time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time2}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at == time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at == time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")", "def less_than_or_equal(self) -> global___Expression:", "def current_time(cls) -> float:", "def next_update_in(self, now):\n # Never updated: NOW!\n if self.last_tested is None:\n return 0.0\n\n # Was updated\n seconds_ago = (now - self.last_tested).total_seconds()\n delay = self.real_period - seconds_ago\n return max(delay, 0.0) # don't allow it to be negative", "def after(v1,v2):\n return v1.time_left>v2.time_left", "def check_time_since_last_data(device_origin):\n actual_time = time.time()\n sec_since_last_data = actual_time - mon_item.read_device_status_values(device_origin)[1]\n min_since_last_data = sec_since_last_data / 60\n min_since_last_data = int(min_since_last_data)\n latest_data_hr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(latest_data))\n return min_since_last_data", "def test_process_filter_value():\n now = dt.utcnow()\n now_ts = now.timestamp()\n filter_ = {'column': \"ts_created_at\", 'value': now_ts, type: 'leq'}\n assert process_filter_value(filter_) == now\n\n filter_ = {'column': \"created_at\", 'value': now_ts, type: 'leq'}\n assert process_filter_value(filter_) == now_ts", "def compare(x, y):\n if x >= y:\n return 1.0\n else:\n return 0.0", "def newer(a, b):\n\treturn modtime(a) < modtime(b) # smaller is earlier", "def test_larger_lhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = datetime(2012, 9, 20, 3, 45)\n rhs = datetime(2012, 9, 20, 2, 45)\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(lhs, result)", "def test_get_current_time_is_constant() -> None:\n time_provider = TimeProvider()\n current_time_1 = time_provider.get_current_time()\n current_time_2 = time_provider.get_current_time()\n\n assert current_time_1 == current_time_2", "def _compare(self, value, target):\n result = getattr(self.reg, target) - value\n self.reg.N = result >> 7\n self.reg.C = getattr(self.reg, target) >= value\n self.reg.Z = result == 0", "def search_cond(ts):\n ts = convert(ts, mode='timestamp')\n at = [\"year\", \"month\", \"day\", \"hour\", \"minute\"]\n if all(getattr(ts, a) == getattr(upper_bound, a) for a in at):\n return 0\n elif ts < upper_bound:\n return -1\n elif ts > upper_bound:\n return 1", "def time_before(time_a, time_b=None) -> bool:\n if time_b is None:\n time_b = time_now()\n\n # make sure both times are floats\n time_a = float(date_to_epoch(time_a))\n time_b = float(date_to_epoch(time_b))\n return time_a < time_b", "def native_value(self) -> float:\n if (self.coordinator.data is None) or (self._last_updated is not None and \"last_updated\" in self.coordinator.data and self._last_updated > self.coordinator.data[\"last_updated\"]):\n self._attributes[\"last_updated_timestamp\"] = self._last_updated\n return self._state\n \n self._attributes[\"last_updated_timestamp\"] = self.coordinator.data[\"last_updated\"]\n self._state = self.coordinator.data[\"charge_limit_weekday\"]\n \n return self._state", "def less_than(self) -> global___Expression:", "def _comparison_function(comp, value=0.0, **kwargs):\n if comp == 'g' or comp == '>':\n func = np.greater\n elif comp == 'ge' or comp == '>=':\n func = np.greater_equal\n elif comp == 'l' or comp == '<':\n func = np.less\n elif comp == 'le' or comp == '<=':\n func = np.less_equal\n elif comp == 'e' or comp == '=' or comp == '==':\n func = np.equal\n elif comp == 'ne' or comp == '!=':\n func = np.not_equal\n else:\n raise ValueError(\"Unrecognized comparison '{}'.\".format(comp))\n\n def comp_func(xx):\n return func(xx, value, **kwargs)\n\n return comp_func", "def report_status(scheduled_time, estimated_time):\n if scheduled_time == estimated_time:\n return 'on time'\n elif scheduled_time > estimated_time:\n return 'early'\n else:\n return 'delayed'", "def is_before(self,other_date):", "def REAL_TIME_ADVANCE(dt):", "def __gt__(self, other):\n self_list = self.date.split(\"/\")\n other_list = other.date.split(\"/\")\n if self_list[2] > other_list[2]:\n return True\n else:\n if self_list[2] == other_list[2]:\n if self_list[1] > other_list[1]:\n return True\n elif self_list[1] == other_list[1]:\n if self_list[0] > other_list[0]:\n return True\n return False", "def __gt__(self, other):\n return self._metric_value > other.metric_value()", "def last_checked(self):\n\t\treturn self.current().time", "def greater_than(self) -> global___Expression:", "def ge(self, val):\n\t\treturn GreaterOrEquals(self, val)", "def when(self):\n\n # current UTC time\n now = datetime.datetime.utcnow()\n # calculate timedelta and return\n return now - self.creation_time", "def _greater_than_or_equal_to_op(spec):", "def test_since(self):\n import datetime\n dt1 = datetime.datetime(2013, 12, 15, 10, 10, 10)\n dt2 = datetime.datetime(2013, 12, 15, 10, 11, 10)\n\n check_list = health.CheckList(refresh=1)\n check_list._refreshed_at = dt1\n\n mock_datetime = self.mocker.replace(datetime)\n mock_datetime.datetime.now()\n self.mocker.result(dt2)\n self.mocker.replay()\n\n self.assertEqual(check_list.since(), '0:01:00')", "def comparison(self):\n return self._comparison", "def __gt__(self, other):\n if isinstance(other, float):\n return self.floatvalue > other\n else:\n return not self.negative and not self == other", "def __gt__(self, value):\n self = self.__ge__(value)\n return self.__invert__()", "def deciding(self):\n\n if not self.db.cacheEmpty():\n cacheMsgs = self.db.getCacheMsgs()\n prev = datetime.datetime.min\n prev_location = \"FOO LOCATION\"\n for msg in cacheMsgs:\n neutrinoTime = msg[\"neutrino_time\"]\n # go through messages to check if any two or more are within the time threshold\n if neutrinoTime - datetime.timedelta(seconds=self.coinc_threshold) <= prev:\n # verify the locations are different\n if msg[\"location\"] != prev_location:\n return True\n prev = neutrinoTime\n prev_location = msg[\"location\"]\n return False\n\n # return not self.db.cacheEmpty()", "def time_after(time_a, time_b=None) -> bool:\n if time_b is None:\n time_b = time_now()\n\n # make sure both times are floats\n time_a = float(date_to_epoch(time_a))\n time_b = float(date_to_epoch(time_b))\n return time_a > time_b", "def acceptable(self):\n now = datetime.datetime.now()\n origin = datetime.datetime.combine(self.date, datetime.time.min)\n start = origin + datetime.timedelta(hours=6)\n end = origin + datetime.timedelta(days=1)\n morning = end + datetime.timedelta(hours=6)\n if now < origin or now > morning:\n return 0\n if now >= end or now <= start:\n return 1\n return 3", "def debugTest(self):\n startTime = datetime.today()\n serverTzInfo = self.serverTimeZone\n startTime = startTime.replace(tzinfo=serverTzInfo)\n self.notify.info('startTime = %s' % startTime)\n serverTime = self.getCurServerDateTime()\n self.notify.info(\"serverTime = %s\" % serverTime)\n result = startTime <= serverTime\n self.notify.info(\"start < serverTime %s\" % result)\n startTime1MinAgo = startTime + timedelta(minutes = -1)\n self.notify.info('startTime1MinAgo = %s' % startTime1MinAgo)\n result2 = startTime1MinAgo <= serverTime\n self.notify.info(\"startTime1MinAgo < serverTime %s\" % result2)\n serverTimeForComparison = self.getCurServerDateTimeForComparison()\n self.notify.info(\"serverTimeForComparison = %s\" % serverTimeForComparison)\n result3 = startTime1MinAgo <= serverTimeForComparison\n self.notify.info(\"startTime1MinAgo < serverTimeForComparison %s\" % result3)", "def __gt__(self, other):\n\t\ttry:\n\t\t\treturn self.val > other.val\n\t\texcept:\n\t\t\treturn self.val > other", "def compare(self, value: int, /) -> None:", "def adjustedCompareValue(self, value):\n return value", "def _update_time(self):\n if self.time.year != datetime.datetime.now().year or self._this_year is None:\n self._this_year = _data.this_year(self.df, 'case_timestamp')\n if self.time.month != datetime.datetime.now().month or self._this_month is None:\n self._this_month = _data.this_month(self.df, 'case_timestamp')\n if self.time.day != datetime.datetime.now().day or self._today is None:\n self._today = _data.today(self.df, 'case_timestamp')\n self.time = datetime.datetime.now()", "def check_last_update(self):\n now = self.get_clock().now()\n diff_L = (now - self.last_stamp_L).nanoseconds * 1e-9\n diff_R = (now - self.last_stamp_R).nanoseconds * 1e-9\n if diff_L > 0.1:\n self.duty_left = 0.0\n if diff_R > 0.1:\n self.duty_right = 0.0", "def __gt__(self, other):\n return self.__f > other.get_f()", "def is_after(t1,t2):\n return (t1.hour, t1.minute, t1.second) > (t2.hour, t2.minute, t2.second)", "def curr_time():\r\n try:\r\n curr_time = datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n except Exception as e:\r\n print(e)\r\n curr_time = False\r\n return curr_time", "def __ge__( self, value ):\r\n\t\treturn ( self > value ) or ( self == value )", "def evaluate_stopping_condition(self, current_value: Union[float, int, np.float64, np.ndarray]):\n\n if self.__reference_value is not None:\n\n if type(current_value) in [float, int, np.float64]:\n if not self.__smaller_value_required:\n if not self.__equal_required:\n return current_value > self.__reference_value\n else:\n return current_value >= self.__reference_value\n else:\n if not self.__equal_required:\n return current_value < self.__reference_value\n else:\n return current_value <= self.__reference_value\n\n elif type(current_value) == np.ndarray:\n if not self.__smaller_value_required:\n if not self.__equal_required:\n return (current_value > self.__reference_value).all()\n else:\n return (current_value >= self.__reference_value).all()\n else:\n if not self.__equal_required:\n return (current_value < self.__reference_value).all()\n else:\n return (current_value <= self.__reference_value).all()\n\n else:\n raise NotImplementedError\n\n else:\n return False", "def statusCompare (x, y):\n xs = db.status.get(x, 'order')\n ys = db.status.get(y, 'order')\n c = float(xs) - float(ys)\n if c >= 0.0: \n return int(c)\n else:\n return -int(abs(c))", "def test_larger_rhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = datetime(2012, 9, 20, 2, 59)\n rhs = datetime(2012, 9, 20, 3, 00)\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(rhs, result)", "def update_waiting(self):\n if self.get_value(0) is not None and self.get_value(1) is not None:\n if self.name == \"greater\":\n self.set_value(self.get_value(0) > self.get_value(1), 0)\n if self.name == \"greater or equal\":\n self.set_value(self.get_value(0) >= self.get_value(1), 0)\n if self.name == \"less\":\n self.set_value(self.get_value(0) < self.get_value(1), 0)\n if self.name == \"less or equal\":\n self.set_value(self.get_value(0) <= self.get_value(1), 0)\n if self.name == \"not equal\":\n self.set_value(self.get_value(0) != self.get_value(1), 0)\n if self.name == \"xor\":\n self.set_value(bool(self.get_value(0)) ^ bool(self.get_value(1)), 0)\n self.state = ACTIVE", "def _get_current_time_if_none(given_time):\n\t\treturn given_time or time.time()", "def value_equal_keyvalue(attr, current_time=False):\n anim_val = get_anim_value_at_current_frame(attr)\n if current_time:\n val = cmds.getAttr(attr, time=current_time)\n else:\n val = cmds.getAttr(attr)\n if anim_val == val:\n return True", "def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds", "def compare_dates(date1, date2, flag):\n if date1 > date2:\n if flag == \"l\":\n return date1\n return date2\n if flag == \"l\":\n return date2\n return date1", "def _get_comparison_func(self, adjective):\n return self.SONG_ADJECTIVES.get(adjective, {}).get(\"comparison\")", "def above_threshold(self, value):\n # We use floating point number here so we have to take care\n return finf(value,self.min) or finf(self.max,value)", "def __le__(self, other):\n return self.timestamp <= other.timestamp", "def getValueAt(self, time):\n for tvp in self.timeValuePairs:\n if time <= tvp[0]:\n return tvp[1]\n return self.defaultValue", "def __cmp__(self, other):\n if not isinstance(other, datetime):\n types = (type(other), datetime)\n raise TypeError('Type mismatch: %s not instance of %s' % types)\n # pylint: disable=protected-access\n return (self._cmp(self._days, other._days)\n or self._cmp(self.seconds, other.seconds)\n or self._cmp(self.nanosecond, other.nanosecond))", "def dynamic_comparison(v1, op, v2):\n assert op in ['gt', 'lt']\n\n operator_map = {'gt': operator.gt,\n 'lt': operator.lt}\n\n return operator_map[op](v1, v2)", "def ge(self, y):\n return 1 - self.lt(y)", "def less_equal(value, other):\n return value >= other", "def test_equal_inputs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = rhs = datetime(2012, 9, 20, 2, 59)\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(rhs, result)\n self.assertIs(lhs, result)", "def check_compare(change, reference_value):\n rounded_change = round(change, 2)\n compare_values(reference_value, rounded_change)", "def _get_delta(self, now, then):\n if now.__class__ is not then.__class__:\n now = datetime.date(now.year, now.month, now.day)\n then = datetime.date(then.year, then.month, then.day)\n if now < then:\n raise ValueError(\"Cannot determine moderation rules because date field is set to a value in the future\")\n return now - then", "def _greater_than_op(spec):", "def currentTime(*args, update: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[time, Any]:\n pass", "def lessThanEqualTo(self, t):\n if t is None:\n return False\n if isinstance(t, (float, int)):\n return self._micros <= long(t * 1000000)\n else:\n return self._micros <= t._micros", "def get_now():\n return datetime.now()", "def get_now():\n return datetime.now()", "def __ge__(self, other):\n self.conds.append((self.name, '>=', other))\n return self\n return self.name, '>=', other", "def __get_timeval():\n return convert_timeval(time.time())", "def absulute2relative_time(x): \n if x.viewed:\n x.viewed_reltime=x.viewed_time-x.start\n \n if x.completed:\n x.completed_reltime=x.completed_time-x.start\n \n return x", "def ComputeTimeReward(self, currentTime, expectedTime):\r\n return (expectedTime - currentTime) * 1 if currentTime < expectedTime else (expectedTime - currentTime) * 1", "def _get_half_time(self):\n return self.__half_time", "def lessThan(self, t):\n if t is None:\n return False\n if isinstance(t, (float, int)):\n return self._micros < long(t * 1000000)\n else:\n return self._micros < t._micros", "def new_value(self):\n on_val = get_usable_value(self._momentary_mode_on_prop)\n follow_val = get_usable_value(self._momentary_follow_sense_prop)\n on_off_val = get_usable_value(self._momentary_on_off_trigger_prop)\n\n new_value = _calc_relay_mode(on_val, follow_val, on_off_val)\n if new_value == self.value:\n return None\n return new_value", "def __gt__(self, other):\n return self.greaterThan(other)", "def is_after(t1, t2):\n return (t1.hour, t1.minute, t1.second) > (t2.hour, t2.minute, t2.second)", "def __ge__(self, other):\n # self >= other\n return self.runtime.greater_than_equal(self, other)", "def match(self, dt):\n logic_map = {\n CLOSED_CLOSED: ((self.start is None or dt >= self.start) and\n (self.end is None or dt <= self.end)),\n CLOSED_OPEN: ((self.start is None or dt >= self.start) and\n (self.end is None or dt < self.end)),\n OPEN_CLOSED: ((self.start is None or dt > self.start) and\n (self.end is None or dt <= self.end)),\n OPEN_OPEN: ((self.start is None or dt > self.start) and\n (self.end is None or dt < self.end)),\n }\n return logic_map[self.interval]", "def check(self):\r\n boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed))\r\n\r\n if self.hourly and not self.last_executed:\r\n return 0\r\n \r\n if self.daily and not self.last_executed:\r\n if int(self.hour) == self.now.hour:\r\n return 0\r\n else:\r\n return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60\r\n\r\n delta = self.now - self.last_executed\r\n if self.hourly:\r\n if delta.seconds >= 60*60:\r\n return 0\r\n else:\r\n return 60*60 - delta.seconds\r\n else:\r\n if int(self.hour) == self.now.hour:\r\n if delta.days >= 1:\r\n return 0\r\n else:\r\n return 82800 # 23 hours, just to be safe\r\n else:\r\n return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60", "def __gt__(self, *args):\n return _ida_hexrays.cdo_t___gt__(self, *args)", "def _get_detection_time_multiplier(self):\n return self.__detection_time_multiplier", "def test_details_time(self):\n self.assertLess(self.details.time, datetime.now(timezone.utc))", "def compare_datetime(self_datetime, other_datetime):\n # pylint: disable=superfluous-parens\n if (isinstance(self_datetime and other_datetime, (datetime, type(None)))):\n return (\n (self_datetime == other_datetime\n if all(str(_.time()) != \"00:00:00\"\n for _ in [self_datetime, other_datetime])\n else self_datetime.date() == other_datetime.date())\n if self_datetime and other_datetime\n else self_datetime == other_datetime)\n else:\n Representation.attrs_values_types_error(\n self_attr=self_datetime, other_attr=other_datetime,\n expected_types=(datetime.__name__, type(None).__name__))", "def comparison(self) -> str:\n return self._values.get('comparison')", "def compare(date1,date2):\n d1,m1,y1 = breakdate(date1)\n d2,m2,y2 = breakdate(date2)\n if y2>y1:\n return -1\n elif y1>y2:\n return 1\n else:\n if m2>m1:\n return -1\n elif m1>m2:\n return 1\n else:\n if d2>d1:\n return -1\n elif d1>d2:\n return 1\n else:\n return 0", "def test_greater_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::gt\"},\n )" ]
[ "0.7139558", "0.61179876", "0.6032351", "0.60296464", "0.5851021", "0.5813862", "0.5813862", "0.58051044", "0.5691663", "0.56752634", "0.563707", "0.55923384", "0.5590049", "0.55772096", "0.5574569", "0.5568509", "0.5496361", "0.54885936", "0.54716676", "0.5465949", "0.5443612", "0.5395156", "0.536688", "0.5360831", "0.53564024", "0.53204775", "0.53169084", "0.5310457", "0.5301735", "0.5275172", "0.5272399", "0.52617353", "0.52592015", "0.5256417", "0.5243143", "0.52323925", "0.5230336", "0.52280307", "0.5226374", "0.5223179", "0.5222565", "0.5216643", "0.51985043", "0.5185708", "0.5182788", "0.518212", "0.51818764", "0.5179986", "0.51754576", "0.5174136", "0.5171443", "0.51594496", "0.51574576", "0.51515496", "0.5138663", "0.51228094", "0.5118961", "0.51143086", "0.51123536", "0.51058453", "0.5102444", "0.5101774", "0.5093231", "0.50888216", "0.5081163", "0.5077364", "0.50753796", "0.50699407", "0.5066601", "0.50649315", "0.5051801", "0.5049154", "0.5039871", "0.5038639", "0.5037569", "0.50356054", "0.5035435", "0.5034982", "0.50342613", "0.50332314", "0.50332314", "0.50299144", "0.50258625", "0.50211143", "0.5019165", "0.50181323", "0.50033295", "0.5001131", "0.50007665", "0.4999905", "0.49994156", "0.49931398", "0.49895588", "0.49891952", "0.49809903", "0.4979709", "0.4974725", "0.4968319", "0.49666235", "0.49659923" ]
0.689713
1
Any format, prefix, suffix, html info in attrs dict
def __init__(self, name, attrs={}): TextFormat.__init__(self, name, attrs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def html_attrs(attrs):\n html = \"\"\n for a in attrs.items():\n if a[1]:\n html = html + \"%s=\\\"%s\\\" \"%(a)\n return html", "def attr(*attrs: ATTRIBUTE) -> str:\n return PyHTML.attr(*attrs)", "def attrs(context):\n result = \"\"\n for key, value in context.flatten().items():\n if key not in [\"True\", \"False\", \"None\", \"content\", \"element\"]:\n if \"hx_\" in key:\n key = key.replace(\"_\", \"-\")\n result += f' {key}=\"{value}\"'\n return mark_safe(result)", "def __get_attr_format (self, attrs):\r\n format = { \r\n 'editor': None,\r\n 'min': None,\r\n 'max': None,\r\n 'step': None,\r\n 'subtype': None,\r\n 'flags': None,\r\n 'enums': None\r\n }\r\n\r\n for attr in attrs: \r\n attr_type = attr[\"type\"]\r\n if \"editor\" == attr_type:\r\n format['editor'] = attr[\"value\"] \r\n if \"min\" == attr_type:\r\n format['min'] = attr[\"value\"] \r\n if \"max\" == attr_type:\r\n format['max'] = attr[\"value\"] \r\n if \"default\" == attr_type:\r\n format['default'] = attr[\"value\"] \r\n if \"step\" == attr_type:\r\n format['step'] = attr[\"value\"]\r\n if \"subtype\" == attr_type:\r\n format['subtype'] = attr[\"value\"]\r\n if \"flags\" == attr_type:\r\n format['flags'] = attr['value']\r\n if \"enums\" == attr_type:\r\n format['enums'] = attr['value']\r\n\r\n return format", "def _formatAttributes(self, attr=None, allowed_attrs=None, **kw):\n\n # Merge the attr dict and kw dict into a single attributes\n # dictionary (rewriting any attribute names, extracting\n # namespaces, and merging some values like css classes).\n attributes = {} # dict of key=(namespace,name): value=attribute_value\n if attr:\n for a, v in attr.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n if kw:\n for a, v in kw.items():\n a_ns, a_name = rewrite_attribute_name(a)\n extend_attribute_dictionary(attributes, a_ns, a_name, v)\n\n # Add title attribute if missing, but it has an alt.\n if ('html', 'alt') in attributes and ('html', 'title') not in attributes:\n attributes[('html', 'title')] = attributes[('html', 'alt')]\n\n # Force both lang and xml:lang to be present and identical if\n # either exists. The lang takes precedence over xml:lang if\n # both exist.\n #if ('html', 'lang') in attributes:\n # attributes[('xml', 'lang')] = attributes[('html', 'lang')]\n #elif ('xml', 'lang') in attributes:\n # attributes[('html', 'lang')] = attributes[('xml', 'lang')]\n\n # Check all the HTML attributes to see if they are known and\n # allowed. Ignore attributes if in non-HTML namespaces.\n if allowed_attrs:\n for name in [key[1] for key in attributes if key[0] == 'html']:\n if name in _common_attributes or name in allowed_attrs:\n pass\n elif name.startswith('on'):\n pass # Too many event handlers to enumerate, just let them all pass.\n else:\n # Unknown or unallowed attribute.\n err = 'Illegal HTML attribute \"%s\" passed to formatter' % name\n raise ValueError(err)\n\n # Finally, format them all as a single string.\n if attributes:\n # Construct a formatted string containing all attributes\n # with their values escaped. Any html:* namespace\n # attributes drop the namespace prefix. We build this by\n # separating the attributes into three categories:\n #\n # * Those without any namespace (should only be xmlns attributes)\n # * Those in the HTML namespace (we drop the html: prefix for these)\n # * Those in any other non-HTML namespace, including xml:\n\n xmlnslist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if not k[0]]\n htmllist = ['%s=\"%s\"' % (k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] == 'html']\n otherlist = ['%s:%s=\"%s\"' % (k[0], k[1], wikiutil.escape(v, 1))\n for k, v in attributes.items() if k[0] and k[0] != 'html']\n\n # Join all these lists together in a space-separated string. Also\n # prefix the whole thing with a space too.\n htmllist.sort()\n otherlist.sort()\n all = [''] + xmlnslist + htmllist + otherlist\n return ' '.join(all)\n return ''", "def get_format_attrs(self, name, field, alt_field_info={}):\n # important_props = ('initial', 'autofocus', 'widget')\n if name in alt_field_info:\n field = deepcopy(field)\n for prop, value in alt_field_info[name].items():\n setattr(field, prop, value)\n initial = field.initial\n initial = initial() if callable(initial) else initial\n attrs, result = {}, []\n if initial and not isinstance(field.widget, Textarea):\n attrs['value'] = str(initial)\n data_val = self.form.data.get(get_html_name(self.form, name), None)\n if data_val not in ('', None):\n attrs['value'] = data_val\n attrs.update(field.widget_attrs(field.widget))\n result = ''.join(f'{key}=\"{val}\" ' for key, val in attrs.items())\n if getattr(field, 'autofocus', None):\n result += 'autofocus '\n if issubclass(self.form.__class__, FormOverrideMixIn):\n # TODO: Expand for actual output when using FormOverrideMixIn, or a sub-class of it.\n result += '%(attrs)s' # content '%(attrs)s'\n else:\n result = '%(attrs)s' + result # '%(attrs)s' content\n return result", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def flatatt(attrs):\n return u''.join([u' %s=\"%s\"' % (k.replace('_', '-'), conditional_escape(v)) for k, v in attrs.items()])", "def render_attr(key, value, attr_format='{key}=\"{value}\"'):\n\n if not key or ' ' in key:\n raise InvalidAttribute('Invalid name \"{}\"'.format(key))\n\n if value:\n if type(value) is RawNode:\n value = str(value)\n else:\n value = html.escape(str(value))\n\n return attr_format.format(key=key, value=value)\n\n return key", "def make_attrs(self, mixed):\n if isinstance(mixed, dict):\n return ''.join('%s=\"%s\" ' % (k, v) for k, v in mixed.items())\n return str(mixed)", "def gen_tag_attrs(self, *a, **kw):\n return gen_tag_attrs(self, *a, **kw)", "def attributes(self):\n _attrs = [\"label\"]\n if self.attrs:\n _attrs.append(\"attrs\")\n return _attrs", "def back_to_tag(tag, attrs):\n sol = '<' + tag\n for (prop, val) in attrs:\n sol += ' ' + prop + '=\"' + val + '\"'\n sol += '>'\n return sol", "def add_attrs(value, arg):\n try:\n # Split list on comma\n kv_pairs = arg.split(\",\")\n except ValueError:\n raise template.TemplateSyntaxError(\n \"add_attrs requires as an argument a string in the format 'key:value, key1:value1, key2:value2...'\"\n )\n\n\n # Create dictionary\n html_attrs = dict()\n\n # Clean items and add attribute pairs to dictionary\n for item in kv_pairs:\n item = item.strip()\n k, v = item.split(\":\")\n html_attrs.update({k.strip():v.strip()})\n\n return value.as_widget(attrs=html_attrs)", "def _wrap_attr(attr):\n return '={0},'.format(attr)", "def string_for_attrs(attrs):\n if not attrs: return ''\n return ''.join(' %s=\"%s\"' % (attr, value) for attr, value in attrs)", "def attr(*attrs: ATTRIBUTE) -> str:\n formatted = []\n for attr_ in attrs:\n if isinstance(attr_, str):\n formatted.append(attr_)\n elif isinstance(attr_, tuple) and len(attr_) == 2:\n formatted.append(f'{attr_[0]}=\"{attr_[1]}\"')\n else:\n raise ValueError(f\"Bad attribute: {attr_}\")\n return \" \".join(formatted)", "def attrs(self):\n return self.size, self.propSuffix, self.specified", "def attrs(**kwds):\n\n def decorate(f):\n for k in kwds:\n setattr(f, k, kwds[k])\n return f\n\n return decorate", "def dot_node_attrs(self):\n\n lbl_name = '%s' % self.format_name(True, True, 24)\n lbl_acc = '<font point-size=\"8.0\">%s</font>' % self.format_id()\n label = self.node_label_fmt % (self.url(), self.name,\n lbl_name, lbl_acc)\n\n node_attrs = {'label': label}\n return node_attrs", "def attrs(*attributes):\n return ';'.join([ str(i) for i in attributes ])", "def ATTRIBUTE():\n return \"author\", \"title\", \"publisher\", \"shelf\", \"category\", \"subject\"", "def getAttributeInfoDictionary(attr, format=None):\n format = format or _getDocFormat(attr)\n return {'name': attr.getName(),\n 'doc': renderText(attr.getDoc() or '', format=format)}", "def attrsToString(self, attrs):\n string = \"\"\n # for every attribut\n for attr in attrs:\n # converts its name and value to string and adds this to string\n string += \" {}=\\\"{}\\\"\".format(attr[0], attr[1])\n # no exception!\n print(\"Das Attribut ist zu lang!\") if len(attr) > 2 else None\n return string", "def format_attr(attr: str) -> str:\r\n prefix = query_params[Toml.REMOVE_PREFIX]\r\n suffix = query_params[Toml.REMOVE_SUFFIX]\r\n prefix_len = len(prefix)\r\n suffix_len = len(suffix)\r\n stripped = attr.strip()\r\n if stripped[:prefix_len] == prefix:\r\n stripped = stripped[prefix_len:]\r\n if stripped[-suffix_len:] == suffix:\r\n stripped = stripped[:-suffix_len]\r\n return constcase(stripped).replace('__', '_')", "def handleAttributes(text, parent):\r\n def attributeCallback(match):\r\n parent.set(match.group(1), match.group(2).replace('\\n', ' '))\r\n return ATTR_RE.sub(attributeCallback, text)", "def _attrs(self, element, attrs):\n for attr, val in list(attrs.items()):\n element.setAttribute(attr, val)\n return element", "def date_attrs(name):\n attrs = battrs(name)\n attrs.update({'class': 'form-control datepicker'})\n return attrs", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def extract_attrs(attr_string):\n attributes = {}\n for name, val in FIND_ATTRS.findall(attr_string):\n val = (\n val.replace(\"&lt;\", \"<\")\n .replace(\"&gt;\", \">\")\n .replace(\"&quot;\", '\"')\n .replace(\"&amp;\", \"&\")\n )\n attributes[name] = val\n return attributes", "def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):\n attrs = dict(base_attrs, **kwargs)\n if extra_attrs:\n attrs.update(extra_attrs)\n return attrs", "def get_attrs(foreground, background, style):\n return foreground + (background << 4) + style", "def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def parse_tag_attrs(self, *a, **kw):\n return parse_tag_attrs(*a, **kw)", "def extend_attribute_dictionary(attributedict, ns, name, value):\n\n key = ns, name\n if value is None:\n if key in attributedict:\n del attributedict[key]\n else:\n if ns == 'html' and key in attributedict:\n if name == 'class':\n # CSS classes are appended by space-separated list\n value = attributedict[key] + ' ' + value\n elif name == 'style':\n # CSS styles are appended by semicolon-separated rules list\n value = attributedict[key] + '; ' + value\n elif name in _html_attribute_boolflags:\n # All attributes must have a value. According to XHTML those\n # traditionally used as flags should have their value set to\n # the same as the attribute name.\n value = name\n attributedict[key] = value", "def _attribs(self, name=None, description=None):\n a = {}\n if name:\n a['name'] = name\n if description:\n a['description'] = description\n return a", "def _base_attrs(self, service):\n keys = ['name', 'desc', 'url']\n return {name:getattr(service, name, None) for name in keys}", "def addattrs(field, my_attrs):\n my_attrs = my_attrs.split(',')\n my_attrs = dict([attr.split('=') for attr in my_attrs])\n return field.as_widget(attrs=my_attrs)", "def attrs(xml):\r\n return lxml.html.fromstring(xml).attrib", "def parse_tag_attrs(tag_str, options_d=None, font_d=None, case=\"\", **kwargs):\n attr_b = kwargs.pop(\"attr\", \"\")\n auto_b = kwargs.pop(\"auto\", False)\n font_d = kwargs.pop(\"font_d\", font_d or {})\n options_d = kwargs.pop(\"options_d\", options_d or {})\n case = kwargs.pop(\"case\", case)\n widget = kwargs.pop(\"widget\", None)\n text_w = kwargs.pop(text_s, None)\n bad_opts = []\n # INTs: height repeatdelay repeatinterval underline width; size fun fov\n for keyval in split_attrs(tag_str):\n if \"=\" in keyval:\n key, val = keyval.split(\"=\")\n val = unquote(val)\n elif keyval:\n key, val = keyval, None\n else:\n continue\n key = key.lower()\n key2, key3, key4 = key[:2], key[:3], key[:4]\n lowval = val.lower() if val else val\n key = unalias(key)\n kalias = alias(key)\n if val == \"None\": # in ('False', 'None') #\n pass\n elif key3 in (\n bg_s,\n background_s[:3],\n fg_s,\n foreground_s[:3],\n ) or kalias in (bg_s, fg_s):\n options_d.update(**{key: val})\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n options_d.update(**{key: val})\n if auto_b and compound_s not in options_d:\n options_d.update(compound=tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],) or kalias == bd_s:\n options_d.update(borderwidth=val)\n elif key4 in (command_s[:4], compound_s[:4],) or kalias in (\n command_as,\n compound_as,\n ):\n options_d.update(**{key: val})\n elif (\n key2 in (height_s[:2], width_s[:2])\n or key3 in (repeatdelay_s[:3], repeatinterval_s[:3])\n or kalias\n in (height_as, width_as, repeatdelay_as, repeatinterval_as)\n ):\n options_d.update(**{key: int(val)})\n elif (\n key2 in (cursor_s[:2],)\n or key3 == font_s[:3]\n or kalias in (cursor_as, font_as)\n ):\n options_d.update(**{key: val})\n elif key2 in (\"r\", relief_s[:2],) or kalias == relief_as:\n options_d.update(relief=val)\n if auto_b and borderwidth_s not in options_d and val != tk.FLAT:\n options_d.update(borderwidth=str(1))\n elif key2 == underline_s[:2] or kalias == underline_as:\n options_d.update(underline=-1 if val is None else int(val))\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ) or kalias in (selectbackground_as, selectforeground_as):\n options_d.update(**{key: val})\n # special for fonts\n elif key2 in (family_s[:2],) or kalias == family_as:\n font_d[family_s] = val\n elif key2 in (size_s[:2],) or kalias == size_as:\n try:\n font_d[size_s] = int(val)\n except ValueError:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: ERROR Setting Font Size to %r\" % val,\n Raise=True,\n )\n elif key3 in (bold_as, tk_font.BOLD[:3]) or kalias == bold_as:\n font_d[weight_s] = (\n tk_font.BOLD\n if str(val) not in (\"0\", \"False\",)\n else tk_font.NORMAL\n )\n elif key2 in (weight_s[:2],) or kalias == weight_as:\n font_d[weight_s] = val\n elif key2 in (italic_as, tk_font.ITALIC[:2]) or kalias == italic_as:\n font_d[slant_s] = (\n tk_font.ITALIC\n if str(val) not in (\"0\", \"False\",)\n else tk_font.ROMAN\n )\n elif key2 in (slant_s[:2],) or kalias == slant_as:\n font_d[slant_s] = val\n elif (\n key3 in (funderline_as, funderline_s[:3])\n or kalias == funderline_as\n ):\n font_d[underline_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n elif (\n key3 in (foverstrike_as, foverstrike_s[:3])\n or kalias == foverstrike_as\n ):\n font_d[overstrike_s] = 1 if str(val) not in (\"0\", \"False\",) else 0\n # special \"case\" implementation\n elif key3 in (case_s[:3],) or kalias == case_as:\n for s in (upper_s, capitalize_s, lower_s, title_s, swapcase_s):\n if s.startswith(lowval):\n case = s if s != capitalize_s else upper_s\n break\n elif (\n key2 == upper_s[:2]\n or key3 in (capitalize_s[:3],)\n or kalias in (upper_as, capitalize_as)\n ):\n if str(val) not in (\"0\", \"False\",):\n case = upper_s\n elif key2 in (lower_s[:2],) or kalias == lower_as:\n if str(val) not in (\"0\", \"False\",):\n case = lower_s\n elif key2 == title_s[:2] or kalias == title_as:\n if str(val) not in (\"0\", \"False\",):\n case = title_s\n elif key2 == swapcase_s[:2] or kalias == swapcase_as:\n if str(val) not in (\"0\", \"False\",):\n case = swapcase_s\n elif key in ():\n bad_opts.append((key, val))\n else:\n options_d.update(**{key: val})\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n if attr_b:\n return (\n case\n if attr_b == case_s\n else options_d.get(attr_b, font_d.get(attr_b))\n )\n return options_d, font_d, case", "def gen_tag_attrs(widget=None, options_d=None, font=None, case=None, **kwargs):\n auto_b = kwargs.get(\"auto\", False)\n case = kwargs.get(case_s, case)\n extend_b = kwargs.get(\"extend\", False)\n font = kwargs.pop(\"font\", font or {})\n index_i = kwargs.pop(\"index\", None)\n kmode_s = kwargs.get(\"kmode\", \"\") # a=alias, o=options, ''=unchanged\n options_d = kwargs.pop(\"options\", options_d or {})\n pare_b = kwargs.get(\"pare\", True)\n widget = kwargs.pop(\"widget\", widget)\n text_w = kwargs.get(text_s, None)\n recurse_b = kwargs.pop(\"recurse\", widget and isinstance(widget, TTWidget))\n fmt_s = \"\"\n font_d = {}\n w_font_d, w_options_d = {}, {}\n if index_i is not None and widget is None:\n raise Exception(\"Cannot set 'index' when 'widget' is None\")\n if widget: # and isinstance(widget, TTWidget): #\n excludes_t = () if widget.emulation_b else ()\n w_options_d = {\n k: v[-1]\n for k, v in widget.config().items()\n if len(v) == 5 and str(v[-1]) != str(v[-2]) and k not in excludes_t\n }\n try:\n w_options_d[case_s] = widget.case\n except AttributeError:\n pass\n w_font = widget.cget(font_s) # w_options_d.pop(font_s, None)\n w_font_d = get_font_dict(w_font) if w_font else {}\n if pare_b and w_font_d:\n def_w_font = widget.config(font_s)[-2]\n def_w_font_d = get_font_dict(def_w_font)\n w_font_d = pare_dict(w_font_d, def_w_font_d)\n if font:\n if isinstance(font, str):\n try:\n font = tk_font.nametofont(font)\n except tk.TclError:\n pass\n elif type(font) in (list, tuple):\n font = tk_font.Font(font=font)\n if isinstance(font, tk_font.Font):\n font = font.actual()\n if isinstance(font, dict):\n font_d = font\n if case: # is not None:\n options_d = _merge_dicts(options_d, dict(case=case))\n d = _merge_dicts(\n w_options_d,\n convert_font_dict_to_ttoptions_dict(w_font_d),\n options_d,\n convert_font_dict_to_ttoptions_dict(font_d),\n kwargs,\n )\n bad_opts = []\n for key, val in d.items():\n key = key.lower()\n if key in (\"auto\", \"extend\", \"kmode\", \"pare\",): # text_s, ): #\n continue\n key2, key3, key4 = key[:2], key[:3], key[:4]\n kalias = alias(key)\n koption = unalias(key)\n if kmode_s:\n if kmode_s[0] == \"a\": # alias\n keyout = kalias\n kfunc = alias\n auto_cpd, auto_bd = compound_as, bd_s\n elif kmode_s[0] == \"o\": # option\n keyout = koption\n kfunc = unalias\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n else:\n keyout = key\n kfunc = str\n auto_cpd, auto_bd = compound_s, borderwidth_s # bd_s #\n if val:\n val = quote(val)\n if (\n key3 in (bg_s, background_s[:3], fg_s, foreground_s[:3])\n or key2 == underline_s[:2]\n or kalias in (bg_s, fg_s, underline_as)\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (bitmap_s[:2], image_s[:2],) or kalias in (\n bitmap_as,\n image_as,\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_cpd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_cpd, tk.CENTER)\n elif key3 in (bd_s, borderwidth_s[:3],):\n if \"%s=%s \" % (auto_bd, 1) in fmt_s:\n if val != 1:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_bd, 1), \"%s=%s \" % (keyout, val)\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key4 in (compound_s[:4],) or kalias == compound_as:\n if \"%s=%s \" % (auto_cpd, tk.CENTER) in fmt_s:\n if val != tk.CENTER:\n fmt_s = fmt_s.replace(\n \"%s=%s \" % (auto_cpd, tk.CENTER),\n \"%s=%s \" % (keyout, val),\n )\n else:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == cursor_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key3 == font_s[:3]:\n fmt_s += \"%s=%s \" % (keyout, get_named_font(val))\n elif key2 in (relief_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n if auto_b and \"%s=\" % auto_bd not in fmt_s:\n fmt_s += \"%s=%s \" % (auto_bd, 1)\n # special for TTListbox\n elif key[:7] in (\n sbg_s,\n selectbackground_s[:7],\n sbd_s,\n selectborderwidth_s[:7],\n sfg_s,\n selectforeground_s[:7],\n ):\n fmt_s += \"%s=%s \" % (keyout, val)\n # special for fonts\n elif key2 in (family_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (size_s[:2],):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key2 in (weight_s[:2],):\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.BOLD),\n 1\n if isinstance(val, str) and val.lower() == tk_font.BOLD\n else 0,\n )\n elif key2 == slant_s[:2]:\n fmt_s += \"%s=%d \" % (\n kfunc(tk_font.ITALIC),\n 1\n if isinstance(val, str) and val.lower() == tk_font.ITALIC\n else 0,\n )\n elif key3 in (funderline_as, funderline_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(funderline_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n elif key3 in (foverstrike_as, foverstrike_s[:3]):\n fmt_s += \"%s=%d \" % (\n kfunc(foverstrike_s),\n 1 if str(val) in (\"1\", \"True\") else 0,\n )\n # special \"case\" implementation\n elif key3 == case_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(case_s), val)\n elif key2 == upper_s[:2] or key3 == capitalize_s[:3]:\n fmt_s += \"%s=%s \" % (kfunc(upper_s), val)\n elif key2 in (lower_s[:2], title_s[:2], swapcase_s[:2]):\n fmt_s += \"%s=%s \" % (keyout, val)\n elif key in ():\n bad_opts.append((key, val))\n elif key in (text_s, text_as):\n if extend_b or widget:\n fmt_s += \"%s=%s \" % (keyout, val)\n else:\n # bad_opts.append((key, val))\n fmt_s += \"%s=%s \" % (keyout, val)\n if bad_opts:\n _print_out(\n widget,\n text_w,\n \"EXCEPTION: UNEXPECTED TAG ATTRS: %r\" % bad_opts,\n Raise=True,\n )\n fmt = fmt_s.strip()\n if widget and isinstance(widget, TTWidget) and recurse_b:\n fmt = [\n fmt,\n ]\n for _, gathering in widget._get_kids(items=True):\n child = gathering[\"label\"]\n case = gathering.get(case_s, \"\")\n kid_options = {\n k: v[-1]\n for k, v in child.config().items()\n if len(v) == 5\n and str(v[-1]) != str(v[-2])\n and (k, v[-1]) not in w_options_d.items()\n and not (k in label_override_d and str(v[-1]) == \"0\")\n } #\n cf = kid_options.pop(font_s, None)\n cdf = child.config(font_s)[-2]\n if cf != cdf:\n c_font_d = pare_dict(get_font_dict(cf), get_font_dict(cdf))\n else:\n c_font_d = {}\n if case:\n kid_options.update(case=case)\n fmt.append(\n gen_tag_attrs(options=kid_options, font=c_font_d, **kwargs)\n )\n return fmt if index_i is None else fmt[index_i]", "def format_attributes(attributes):\n return ';'.join([k + '=' + v for k, v in attributes.items()])", "def handle_meta(self, tag, attrs):\n ad = {}\n for tup in attrs:\n ad[tup[0]] = tup[1]\n if 'name' in ad.keys() \\\n and 'keywords' == ad['name'] \\\n and 'content' in ad.keys():\n self.filetype = ad['content']\n if 'name' in ad.keys() \\\n and 'description' == ad['name']:\n self.description = 'present'\n if 'charset' in ad.keys():\n self.charset = 'present'", "def add_attributes(self, attrs):\n self.attrs.add_container(attrs)", "def set_attrs(dict, elem, attrs):\n for attr in attrs:\n if attr in elem.keys():\n dict[attr] = elem.get(attr)", "def __init__(self, attrs: Dict[str, Any]) -> None:\n self.attrs = attrs", "def prepare_node_attrs(self):", "def get_attribute_data(self, attrs):\n return {\n 'id': attrs['data-id'],\n }", "def get_attrs(self):\n req_attrv = self._ptr.contents.attrv\n attrs = {}\n if bool(req_attrv):\n i = 0\n while 1:\n s = bytestostr(req_attrv[i])\n i += 1\n if s == None:\n break\n try:\n k, v = s.split(\"=\", 1)\n attrs[k] = v\n except:\n pass\n return attrs", "def attkey_to_SVG_attribs(self,k):\n atts= k.split('@')\n o= ''\n acodes= {'C':'stroke','W':'stroke-width','S':'stroke-dasharray','O':'stroke-opacity'}\n for a in atts:\n if a[0] in acodes:\n o+= '%s=\"%s\" ' % (acodes[a[0]],a[1:])\n# elif a[0] == 'S': # Maybe do something special like this.\n# o+= 'stroke-dasharray=\"%\" ' % a[1:]\n return o", "def add_attributes(self, attrs):\n self.attrs.add_attributes(attrs)", "def fix_attributes(string):\n defs = re.compile('<dl class=\"attribute\">(?P<descrip>.*?)</dl>',flags=re.DOTALL)\n name = re.compile('<code class=\"descclassname\">(?P<name>[^<]*)</code>')\n prefix = ''\n remain = string\n \n match = defs.search(remain)\n while match:\n prefix += remain[:match.start(1)]\n prefsub = ''\n remnsub = remain[match.start(1):match.end(1)]\n descrip = name.search(remnsub)\n if descrip:\n prefix += remnsub[:descrip.start()]\n prefix += remnsub[descrip.end():]\n prefix += remain[match.end(1):match.end(0)]\n else:\n prefix += remain[match.start(1):match.end(0)]\n remain = remain[match.end(0):]\n match = defs.search(remain)\n return prefix+remain", "def a_attr_dict (self) :\n return dict (href = self.abs_href)", "def attributes(table,attrs): \n if isinstance(table,Table):\n table.html_attributes = attrs\n return table", "def read_attribs(self):\n\n attribs = {}\n while self.index < self.length:\n self.ignore_whitespaces()\n if self.xtext[self.index] == '>':\n break\n name = self.read_until('=')\n self.index += 1\n self.read_until('\"')\n self.index += 1\n value = self.read_until('\"')\n self.index += 1\n\n attribs[name] = value\n\n return attribs", "def extensible_attributes():\n return 'extensibleattributedef?'", "def transform_attributes(attrs):\n transformed = {}\n for key, value in attrs.items():\n if key in [\"raw_message\", \"text\"]:\n transformed[\"raw_content\"] = value\n elif key in [\"diaspora_handle\", \"sender_handle\", \"author\"]:\n transformed[\"handle\"] = value\n elif key == \"recipient_handle\":\n transformed[\"target_handle\"] = value\n elif key == \"parent_guid\":\n transformed[\"target_guid\"] = value\n elif key == \"first_name\":\n transformed[\"name\"] = value\n elif key == \"image_url\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"large\"] = value\n elif key == \"image_url_small\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"small\"] = value\n elif key == \"image_url_medium\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"medium\"] = value\n elif key == \"tag_string\":\n transformed[\"tag_list\"] = value.replace(\"#\", \"\").split(\" \")\n elif key == \"bio\":\n transformed[\"raw_content\"] = value\n elif key == \"searchable\":\n transformed[\"public\"] = True if value == \"true\" else False\n elif key == \"target_type\":\n transformed[\"entity_type\"] = DiasporaRetraction.entity_type_from_remote(value)\n elif key == \"remote_photo_path\":\n transformed[\"remote_path\"] = value\n elif key == \"remote_photo_name\":\n transformed[\"remote_name\"] = value\n elif key == \"status_message_guid\":\n transformed[\"linked_guid\"] = value\n transformed[\"linked_type\"] = \"Post\"\n elif key in BOOLEAN_KEYS:\n transformed[key] = True if value == \"true\" else False\n elif key in DATETIME_KEYS:\n try:\n # New style timestamps since in protocol 0.1.6\n transformed[key] = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n # Legacy style timestamps\n transformed[key] = datetime.strptime(value, \"%Y-%m-%d %H:%M:%S %Z\")\n elif key in INTEGER_KEYS:\n transformed[key] = int(value)\n else:\n transformed[key] = value or \"\"\n return transformed", "def tag(func):\n @functools.wraps(func)\n def wrapper(**kwargs):\n\n name = func.__name__\n\n if kwargs:\n \n try:\n\n check_text = kwargs['text']\n del kwargs['text']\n \n \n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n open('index.html', 'a+').write(f\"{check_text}\")\n open('index.html', 'a+').write(f\"</{name}>\")\n\n except KeyError:\n\n kwargs = {\n k.replace(\"__\", \"\").replace(\"_\", \"-\"): v for k, v in kwargs.items()\n }\n\n all_attr = f\"<{name} \", *(f' {key}=\"{value}\"' for key, value in kwargs.items()), \">\"\n open('index.html', 'a+').write(f\"\\n{join_attr(all_attr)}\")\n\n else:\n\n open('index.html', 'a+').write(f\"\\n<{name}>\")\n\n\n func(**kwargs)\n \n return wrapper", "def getAttrsDict(attrs):\r\n attrsDict = json.loads(re.sub('/\\\"(?!(,\\s\"|}))','\\\\\"',attrs).replace(\"\\t\",\" \").replace(\"\\n\",\" \")) if len(attrs)>0 else {}\r\n return attrsDict", "def parse_tag_attrs(\n self, tags_str, options_d=None, font_d=None, case=\"\", **kwargs\n ):\n return parse_tag_attrs(\n tags_str,\n options_d,\n font_d,\n case,\n widget=self,\n text=getattr(self, \"debug_text\", None),\n **kwargs\n )", "def _get_annotation_data_attr(self, index, el):\r\n\r\n data_attrs = {}\r\n attrs_map = {\r\n 'body': 'data-comment-body',\r\n 'title': 'data-comment-title',\r\n 'problem': 'data-problem-id'\r\n }\r\n\r\n for xml_key in attrs_map.keys():\r\n if xml_key in el.attrib:\r\n value = el.get(xml_key, '')\r\n html_key = attrs_map[xml_key]\r\n data_attrs[html_key] = {'value': value, '_delete': xml_key}\r\n\r\n return data_attrs", "def get_attributes(self) -> Dict[str, str]:\n pass", "def transform(attrs: dict) -> dict:\n\n pass", "def get_html_element_attributes(self):\n html_element_attributes = {\n 'class': self.css_classes or False, # Fall back to false to avoid class=\"\"\n }\n if self.should_render_as_link():\n html_element_attributes['href'] = self.url\n return html_element_attributes", "def create_descr(self, attr_name):", "def _checkTableAttr(self, attrs, prefix):\n if not attrs:\n return {}\n\n result = {}\n s = [] # we collect synthesized style in s\n for key, val in attrs.items():\n # Ignore keys that don't start with prefix\n if prefix and key[:len(prefix)] != prefix:\n continue\n key = key[len(prefix):]\n val = val.strip('\"')\n # remove invalid attrs from dict and synthesize style\n if key == 'width':\n s.append(\"width: %s\" % val)\n elif key == 'height':\n s.append(\"height: %s\" % val)\n elif key == 'bgcolor':\n s.append(\"background-color: %s\" % val)\n elif key == 'align':\n s.append(\"text-align: %s\" % val)\n elif key == 'valign':\n s.append(\"vertical-align: %s\" % val)\n # Ignore unknown keys\n if key not in self._allowed_table_attrs[prefix]:\n continue\n result[key] = val\n st = result.get('style', '').split(';')\n st = '; '.join(st + s)\n st = st.strip(';')\n st = st.strip()\n if not st:\n try:\n del result['style'] # avoid empty style attr\n except:\n pass\n else:\n result['style'] = st\n #logging.debug(\"_checkTableAttr returns %r\" % result)\n return result", "def set_attrs(self, username, attrs):\n pass", "def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,\r\n prettyPrint=False, indentLevel=0):\r\n\r\n encodedName = self.toEncoding(self.name, encoding)\r\n\r\n attrs = []\r\n if self.attrs:\r\n for key, val in self.attrs:\r\n fmt = '%s=\"%s\"'\r\n if isString(val):\r\n if self.containsSubstitutions and '%SOUP-ENCODING%' in val:\r\n val = self.substituteEncoding(val, encoding)\r\n\r\n # The attribute value either:\r\n #\r\n # * Contains no embedded double quotes or single quotes.\r\n # No problem: we enclose it in double quotes.\r\n # * Contains embedded single quotes. No problem:\r\n # double quotes work here too.\r\n # * Contains embedded double quotes. No problem:\r\n # we enclose it in single quotes.\r\n # * Embeds both single _and_ double quotes. This\r\n # can't happen naturally, but it can happen if\r\n # you modify an attribute value after parsing\r\n # the document. Now we have a bit of a\r\n # problem. We solve it by enclosing the\r\n # attribute in single quotes, and escaping any\r\n # embedded single quotes to XML entities.\r\n if '\"' in val:\r\n fmt = \"%s='%s'\"\r\n if \"'\" in val:\r\n # TODO: replace with apos when\r\n # appropriate.\r\n val = val.replace(\"'\", \"&squot;\")\r\n\r\n # Now we're okay w/r/t quotes. But the attribute\r\n # value might also contain angle brackets, or\r\n # ampersands that aren't part of entities. We need\r\n # to escape those to XML entities too.\r\n val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)\r\n\r\n attrs.append(fmt % (self.toEncoding(key, encoding),\r\n self.toEncoding(val, encoding)))\r\n close = ''\r\n closeTag = ''\r\n if self.isSelfClosing:\r\n close = ' /'\r\n else:\r\n closeTag = '</%s>' % encodedName\r\n\r\n indentTag, indentContents = 0, 0\r\n if prettyPrint:\r\n indentTag = indentLevel\r\n space = (' ' * (indentTag-1))\r\n indentContents = indentTag + 1\r\n contents = self.renderContents(encoding, prettyPrint, indentContents)\r\n if self.hidden:\r\n s = contents\r\n else:\r\n s = []\r\n attributeString = ''\r\n if attrs:\r\n attributeString = ' ' + ' '.join(attrs)\r\n if prettyPrint:\r\n s.append(space)\r\n s.append('<%s%s%s>' % (encodedName, attributeString, close))\r\n if prettyPrint:\r\n s.append(\"\\n\")\r\n s.append(contents)\r\n if prettyPrint and contents and contents[-1] != \"\\n\":\r\n s.append(\"\\n\")\r\n if prettyPrint and closeTag:\r\n s.append(space)\r\n s.append(closeTag)\r\n if prettyPrint and closeTag and self.nextSibling:\r\n s.append(\"\\n\")\r\n s = ''.join(s)\r\n return s", "def attributes(self):\n _attrs = [\"type\", \"name\", \"value\"]\n if self.confidence is not None:\n _attrs.append(\"confidence\")\n if self.constant:\n _attrs.append(\"constant\")\n if self.tags:\n _attrs.append(\"tags\")\n\n return _attrs", "def Attribute(name, value=None):\r\n if value:\r\n return '{}=\"{}\"'.format(name, value)\r\n else:\r\n return ''", "def convert_attributes(cls, attrs):\n return {}", "def get_switched_form_field_attrs(self, prefix, input_type, name):\n attributes = {'class': 'switched', 'data-switch-on': prefix + 'field'}\n attributes['data-' + prefix + 'field-' + input_type] = name\n return attributes", "def AttributeString(self) -> str:", "def AttributeString(self) -> str:", "def proc_attr(inp):\n dic = {}\n for att in inp.attrs.keys():\n if getattr(inp.attrs[att], \"dtype\", None) is None:\n dic[att] = inp.attrs[att]\n elif inp.attrs[att].dtype.char == 'S':\n dic[att] = [\n x.strip() for x in inp.attrs[att].tostring().decode('ascii').split(',')\n ]\n else:\n dic[att] = (\n inp.attrs[att][0]\n if isinstance(inp.attrs[att],np.ndarray) and\n inp.attrs[att].size==1\n else inp.attrs[att]\n )\n return dic\n pass", "def add_attr(self, section_name: str, attr_name: str, value: str) -> None:\n pass", "def _get_attrs_symbols():\n return {\n 'True', 'False', 'None', # those are identifiers in Python 2.7\n 'self',\n 'parent',\n 'id',\n 'uid',\n 'context',\n 'context_today',\n 'active_id',\n 'active_ids',\n 'allowed_company_ids',\n 'current_company_id',\n 'active_model',\n 'time',\n 'datetime',\n 'relativedelta',\n 'current_date',\n 'abs',\n 'len',\n 'bool',\n 'float',\n 'str',\n 'unicode',\n }", "def widget_attrs(self, widget):\n\n attrs = super(RelateField, self).widget_attrs(widget)\n\n attrs.update({'content_type': self.content_types})\n\n return attrs", "def attributes(doc, header, renderer=Attribute, item_class=DefinitionItem):\n items = doc.extract_items(item_class)\n lines = []\n renderer = renderer()\n for item in items:\n renderer.item = item\n lines += renderer.to_rst()\n lines.append('')\n return lines", "def info_from_entry(self, entry):\n info = super().info_from_entry(entry)\n info[ATTR_NAME] = info[ATTR_PROPERTIES]['Name'].replace('\\xa0', ' ')\n return info", "def img(self, **kwargs):\n attrs = ''\n for item in kwargs.items():\n if not item[0] in IMGATTRS:\n raise AttributeError, 'Invalid img tag attribute: %s'%item[0]\n attrs += '%s=\"%s\" '%item\n return '<img src=\"%s\" %s>'%(str(self),attrs)", "def gen_tag_attrs(self, *a, **kw):\n if kw.get(\"widget\", sentinel) is not None:\n raise Exception(\n \"TTToolTip.gen_tag_attrs(): 'widget' keyword must be set\"\n \" to None\"\n )\n return gen_tag_attrs(None, *a, **kw)", "def init_attrs(self):\n raise NotImplementedError", "def _arg_attr(identifier, attr1, attr2):\n return attr1 if identifier.startswith('t') else attr2", "def _get_var_attrs(var):\n\n generic_dict = {'instrument': '', 'valid_range': (-1e+35,1e+35),\n 'missing_value': -9999, 'height': '',\n 'standard_name': '', 'group_name': '',\n 'serial_number': ''}\n\n generic_dict.update(attrs_dict[var])\n return generic_dict", "def parseAttrLine(line):\n\tpre, post = line.strip().split(':')\n\tnumber, attr = pre.strip().split('.')\n\tattr = attr.strip().replace('%','').replace(' ', '-')\n\tvals = [clean(x) for x in post.strip().strip('.').split(',')]\n\treturn {'num':int(number), 'attr':clean(attr), 'vals':vals}", "def _style_to_basic_html_attributes(self, element, style_content,\n force=False):\n if style_content.count('}') and \\\n style_content.count('{') == style_content.count('{'):\n style_content = style_content.split('}')[0][1:]\n\n attributes = {}\n for rule in style_content.split(';'):\n split = rule.split(':')\n if len(split) != 2:\n continue\n key = split[0].strip()\n value = split[1]\n\n if key == 'text-align':\n attributes['align'] = value.strip()\n elif key == 'background-color':\n attributes['bgcolor'] = value.strip()\n elif key == 'width' or key == 'height':\n value = value.strip()\n if value.endswith('px'):\n value = value[:-2]\n attributes[key] = value\n\n for key, value in list(attributes.items()):\n if key in element.attrib and not force or key in self.disable_basic_attributes:\n # already set, don't dare to overwrite\n continue\n element.attrib[key] = value", "def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}", "def domAttributesToString( node ):\n strOut = \"node has %d attribute(s):\\n\" % node.attributes.length;\n for i in range(node.attributes.length):\n attr = node.attributes.item(i);\n strOut += \"- %s:'%s'\\n\" % (attr.name, attr.value );\n return strOut;", "def dict_with_attrs2(*args):\n class CustomDict(object):\n __slots__ = args\n __dict__ = {}\n\n def __init__(self, *args, **kwargs):\n super(CustomDict, self).__init__()\n if args:\n self.__dict__.update(*args)\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n def __getattr__(self, name):\n return self.__dict__[name]\n\n return CustomDict", "def attributes(self):\n attrs_ = [\"name\", \"type\"]\n if self.exclusive:\n attrs_.append(\"exclusive\")\n if self.default is not None:\n attrs_.append(\"default\")\n\n return attrs_", "def replace_tag_attributes(code_attrs, tag, tag_attrs):\n\n new_attrs = code_attrs.copy()\n for key, value in tag_attrs.items():\n if key in new_attrs:\n new_attrs[key] = new_attrs[key].replace(tag, value)\n\n return new_attrs", "def _iterattrs(self, handle=\"\"):\n if not handle:\n handle = self.handle\n attr = gv.firstattr(handle)\n while gv.ok(attr):\n yield gv.nameof(attr), decode_page(gv.getv(handle, attr))\n attr = gv.nextattr(handle, attr)", "def as_html(self):\n return mark_safe(\" \".join([\n self.attribute_template % (k, escape(v if not callable(v) else v()))\n for k, v in six.iteritems(self) if not v in EMPTY_VALUES]))", "def strpatt(self, name):\n return name.replace(\"att.\", \"\")", "def format_link(attrs: Dict[tuple, str], new: bool = False):\n try:\n p = urlparse(attrs[(None, 'href')])\n except KeyError:\n # no href, probably an anchor\n return attrs\n\n if not any([p.scheme, p.netloc, p.path]) and p.fragment:\n # the link isn't going anywhere, probably a fragment link\n return attrs\n\n c = urlparse(settings.SITE_URL)\n if p.netloc != c.netloc:\n # link is external - secure and mark\n attrs[(None, 'target')] = '_blank'\n attrs[(None, 'class')] = attrs.get((None, 'class'), '') + ' external'\n attrs[(None, 'rel')] = 'nofollow noopener noreferrer'\n\n return attrs", "def extractAttrs(obj, justLabel=False, dictName=''):\n return extractAttrsCore(obj, {}, justLabel, dictName)", "def parseAttrs(self,attrs,date_type):\n\tattrs=copy.copy(attrs) #make sure we don't change user/group attributes\n \tattr_holders=self.getAttrHolders(attrs)\n\tmap(lambda x:x.setDateType(date_type),attr_holders)\n\tmap(lambda x:attrs.update(x.getParsedDic()),attr_holders)\n\treturn attrs", "def add_attributes(self, attrs):\n for attr in attrs:\n self.add_attribute(attr)", "def _parse_attr(self, attr_proto):\n attrs = {}\n for a in attr_proto:\n for f in ['f', 'i', 's']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['floats', 'ints', 'strings']:\n if list(getattr(a, f)):\n assert a.name not in attrs, \"Only one type of attr is allowed\"\n attrs[a.name] = tuple(getattr(a, f))\n for f in ['t', 'g']:\n if a.HasField(f):\n attrs[a.name] = getattr(a, f)\n for f in ['tensors', 'graphs']:\n if list(getattr(a, f)):\n raise NotImplementedError(\"Filed {} is not supported in mxnet.\".format(f))\n if a.name not in attrs:\n raise ValueError(\"Cannot parse attribute: \\n{}\\n.\".format(a))\n return attrs" ]
[ "0.735201", "0.6754294", "0.67166066", "0.67071074", "0.66780305", "0.65807486", "0.6522693", "0.6522693", "0.65187657", "0.6471306", "0.6269984", "0.62653935", "0.6153201", "0.6090701", "0.60323846", "0.60278016", "0.6011661", "0.60042846", "0.59841794", "0.5941162", "0.59205276", "0.5918955", "0.59121054", "0.5903962", "0.5884743", "0.5876164", "0.5857109", "0.5851559", "0.583173", "0.58274394", "0.5816038", "0.58061635", "0.5784312", "0.5755998", "0.5755998", "0.57360405", "0.57051307", "0.5701552", "0.5687975", "0.5650812", "0.5618766", "0.561154", "0.5605911", "0.56030387", "0.5602799", "0.55926436", "0.5587559", "0.5571399", "0.5567558", "0.55631375", "0.555545", "0.5550559", "0.55490625", "0.55470836", "0.55410224", "0.5519966", "0.55098814", "0.5492064", "0.547102", "0.5470936", "0.54692423", "0.5467515", "0.54661024", "0.54518676", "0.54405665", "0.5438651", "0.54003173", "0.5388153", "0.5382598", "0.5375904", "0.5375076", "0.53706104", "0.5359634", "0.5354708", "0.5354708", "0.5331472", "0.5324531", "0.53227526", "0.5316361", "0.5309617", "0.5308968", "0.53067", "0.5306182", "0.5299369", "0.52990687", "0.5287107", "0.52791494", "0.5277907", "0.5276578", "0.52742803", "0.5270845", "0.52608305", "0.52524847", "0.5244876", "0.5239417", "0.5234171", "0.5224983", "0.5215326", "0.521457", "0.5212088", "0.5203955" ]
0.0
-1
Return formatted text, properly escaped if not in titleMode
def formatOutput(self, storedText, titleMode, internal=False): try: text = GenTime(storedText).timeStr(self.format) except GenTimeError: text = _errorStr return TextFormat.formatOutput(self, text, titleMode, internal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def title(text, level=0):\n return '\\n' + text + '\\n' + '=-~_#%^' [level] * len(text) + '\\n\\n'", "def format_title(self, data):\n return data", "def output_plain_sep_title(title):\n print(f\"{plain_sep_mark}\\t{title}{plain_sep_mark}\")", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_text(downgrade_titles=False):", "def PROPER(text):\n return text.title()", "def title(self, string):\n return self.bold(string)", "def html_title(title):\n return '<center><h1>%s</h1></center>' % (title)", "def _get_title_text(self):\n return Text(\n self,\n self.settings.font_bold_filename,\n 96,\n self.settings.font_color,\n 'zuckbot',\n {'center': self.screen_rect.center},\n 0,\n -50,\n )", "def get_title_repr(self) -> str:\n try:\n return Title[self.title].value\n except (KeyError, ValueError):\n pass", "def editModeHeading(text):\n return u'<p style=\"editModeHeading\">%s</p>' % text", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def emph_text(text):\n\n if use_color():\n return colorama.Style.BRIGHT + text + colorama.Style.RESET_ALL\n else:\n return text", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()", "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "def formatOutput(self, storedText, titleMode, internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = ['<img src=\"%s\">' % escape(url, treedoc.escDict) for url\n in paths]\n return u'<br />'.join(results)", "def _text_formatting(bs4_tag):\n return bs4_tag.get_text().replace('\\n', '')", "def print_with_title(title, content, before='', after='', hl='='):\n cont_maxlen = max(len(s) for s in content.split('\\n'))\n hl_len = max(cont_maxlen, len(title))\n print('{}{}\\n{}\\n{}{}'.format(before, title, hl * hl_len, content, after))", "def title(self, txt):\n num = len(txt)\n ticks = \"=\" * num\n print(ticks)\n print(txt)\n print(ticks)", "def test_title(self):\n self.assertEquals(\"Title\\n=====\", trans(\"== Title ==\"))\n self.assertEquals(\"Title\\n-----\", trans(\"=== Title ===\"))\n self.assertEquals(\"#### Title\", trans(\"==== Title ====\"))\n self.assertEquals(\"##### Title\", trans(\"===== Title =====\"))", "def helptext(self):\n return \"\"", "def book_title(book_text):\n search = re.search(\"Title:(.*)\", book_text)\n title = search.group(1).replace(\"\\r\", \" \").strip()\n return title", "def get_title():", "def __str__(self):\n date_str = self.date.strftime(self.journal.config['timeformat'])\n title = date_str + \" \" + self.title\n body = self.body.strip()\n\n return \"{title}{sep}{body}\\n\".format(\n title=title,\n sep=\"\\n\" if self.body else \"\",\n body=body\n )", "def __str__(self) -> str:\n return textwrap.wrap(self.title, _POST_TITLE_MAX_LENGTH // 4)[0]", "def title_draw():\n nonlocal width\n widthTitle = len(self.str_title)\n if widthTitle > width:\n self.str_title = self.str_title[0:width-5] + '...'\n widthTitle = len(self.str_title)\n h_len = widthTitle + self.l_padding + self.r_padding\n top = ''.join(['┌'] + ['─' * h_len] + ['┐']) + '\\n'\n result = top + \\\n '│' + \\\n ' ' * self.l_padding + \\\n self.str_title + \\\n ' ' * self.r_padding + \\\n '│' + self.str_shadow + '\\n'\n offset = 2 + self.l_padding + len(self.str_title) + self.r_padding\n return result, offset", "def show_title():\r\n complement = (\r\n '\\n __ ')\r\n title = ('\\n _______ _______________ ____ _______ __ ___ _ _______/ /_ ____ _____ ____ ____ ')\r\n title += ('\\n / ___/ / / / ___/ ___/ _ \\/ __ \\/ ___/ / / / / _ \\| |/_/ ___/ __ \\/ __ `/ __ \\/ __ `/ _ \\ ')\r\n title += ('\\n/ /__/ /_/ / / / / / __/ / / / /__/ /_/ / / __/> </ /__/ / / / /_/ / / / / /_/ / __/ ')\r\n title += ('\\n\\___/\\__,_/_/ /_/ \\___/_/ /_/\\___/\\__, / \\___/_/|_|\\___/_/ /_/\\__,_/_/ /_/\\__, /\\___/ ')\r\n title += ('\\n /____/ /____/ ')\r\n # Add Styles\r\n break_line = ('-' * len(complement) + \"\\n\") * 2\r\n print(\"{}\\n{}\\n{}\\n\".format(break_line, title, break_line))", "def format_title(self, title):\n new_title = ''.join(word.lower().strip('!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ ') for word in title)\n return new_title", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def pretty_title(title):\n output = '-' * 5 + ' ' + title + ' ' + '-' * 5\n return output", "def html_title(self, title=None):\r\n if title is None:\r\n return \"<title>PyBossa</title>\"\r\n else:\r\n return \"<title>PyBossa &middot; %s</title>\" % title", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def output_sep_title(title):\n print(f\"{sep_mark}\\t{title}{sep_mark}\")", "def make_main_title(self, end, end_center=False):\n main_title = r\"\\begin{center}\"\n if self.detector is not None:\n main_title += \"%s \"%self.detector\n if self.selection is not None:\n main_title += \"%s Event Selection \"%self.selection\n main_title += end\n if end_center:\n main_title += r\"\\end{center}\"\n return main_title", "def title(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"=\")))", "def get_title(self):\n if not hasattr(self, '_title'):\n self._title = 'NO TITLE'\n if self._title:\n title = _(self._title)\n title = title.replace('&', '&amp;') \n title = title.replace('\"', '&quot;')\n return title\n else:\n return u''", "def escape_if_needed(text, options):\n if hasattr(text, '__html__'):\n # Text has escape itself:\n return to_string(text.__html__())\n if need_to_escape(options):\n return escape(to_string(text))\n return to_string(text)", "def ftitle(self, text):\n return \"{} - {}\".format(self._app_name, text)", "def get_rst_title_char(level):\n chars = (u'=', u'-', u'`', u\"'\", u'.', u'~', u'*', u'+', u'^')\n if level < len(chars):\n return chars[level]\n return chars[-1]", "def process_title(self, title):\n\t\t# strip apostrophes\n\t\tif '\\'' in title:\n\t\t\ttitle = re.sub('\\'', '', title)\n\t\tif '.' in title:\n\t\t\ttitle = re.sub('.', '', title)\n\t\treturn title", "def pretty_title(title):\n output = '-' * 5 + ' ' + title.lower() + ' ' + '-' * 5\n return output", "def html(self):\n bop = ('<b>' if self._bold else '')\n iop = ('<i>' if self._italic else '')\n icl = ('</i>' if self._italic else '')\n bcl = ('</b>' if self._bold else '')\n txt = escape(self._text)\n s = '%s%s%s%s%s' % (bop, iop, txt, icl, bcl)\n return '%s' % s", "def escape_for_display(s) :\n if len(s) == 0 :\n return \"[EMPTY]\"\n return s.replace(\"\\n\",\"[NL]\").replace(\"\\t\",\"[TAB]\") #.replace(\" \",\"[SP]\") # Escape newlines so not to confuse debug output.", "def style_title(self) -> str:\n style_title = \"\"\".title\n {margin-bottom: 10px}\\n\"\"\"\n self.html_doc = self.html_doc + style_title\n return self.html_doc", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def getHTMLText(self, s):\r\n\r\n # Removes any \"<\" or \">\" from the text, and replaces line ends with <br> tags\r\n if s is not None:\r\n res = str(s)\r\n res = string.replace(res, \">\", \"&gt;\")\r\n res = string.replace(res, \"<\", \"&lt;\")\r\n res = string.replace(s, \"\\n\", \"<br style='mso-data-placement:same-cell;'/>\")\r\n else:\r\n res = \"\"\r\n\r\n # Inserts formatting tag around text, if defined\r\n if self.formatBeginTag:\r\n res = self.formatBeginTag + res + self.formatEndTag\r\n\r\n return res", "def escape_single_quotes(custom_data):\n # https://stackoverflow.com/questions/10569438/how-to-print-unicode-character-in-python\n # https://regex101.com/r/nM4bXf/1\n if re.search(\"(?<!u)'(?!:|}|,)\", custom_data.get('title_name', '')):\n z = re.sub(r\"(?<!u)'(?!:|}|,)\", '\\\\\\'', custom_data.get('title_name', None))\n\n custom_data['title_name'] = z\n return custom_data\n return custom_data", "def format_title(self, ticket_id, subject):\n # TODO: strip block tags?\n title = \"#%i %s\" % (ticket_id, subject)\n return title.strip()", "def subtitle(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"-\")))", "def title(self, value):\n if len(value):\n self._title = self._wrap_line(value, self._width)\n\n # Add a blank line\n self._title.append('')", "def emphasize(text: str, tablefmt: str | TableFormat, strong: bool = False) -> str:\n # formats a title for a table produced using tabulate,\n # in the formats tabulate understands\n if tablefmt in [\"html\", \"unsafehtml\", html_with_borders_tablefmt]: # type: ignore\n if strong:\n emph_text = f\"<strong>{text}</strong>\"\n else:\n emph_text = f\"<em>{text}</em>\"\n elif tablefmt in [\"latex\", \"latex_raw\", \"latex_booktabs\", \"latex_longtable\"]:\n if strong:\n emph_text = r\"\\textbf{\" + text + r\"}\"\n else:\n emph_text = r\"\\emph{\" + text + r\"}\"\n else: # use the emphasis for tablefmt == \"pipe\" (Markdown)\n star = \"**\" if strong else \"*\"\n emph_text = f\"{star}{text}{star}\"\n return emph_text", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def textual(title, ordering_field=None):\n def decorator(func):\n def wraps(self, obj):\n result = func(self, obj)\n return result if result else u'---'\n\n wraps.short_description = title\n wraps.allow_tags = True\n\n if ordering_field:\n wraps.admin_order_field = ordering_field\n\n return wraps\n return decorator", "def outputText(self, item, titleMode, internal=False):\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''", "def inclusive_title(self):\n return self.title + (\" %s\" % (self.episode_to_string(self.latest_season, self.latest_episode),) if self.is_series() else \"\")", "def group_title(self, group):\n group_title = group.getProperty('title')\n if self.short:\n splitted = group_title.split('(')\n if len(splitted) > 1:\n group_title = group_title.split('(')[-1][:-1]\n return html.escape(group_title)", "def outputText(self, item, titleMode, internal=False):\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)", "def format_heading(self, level, text):\n underlining = ['=', '-', '~', ][level-1] * len(text)\n return '%s\\n%s\\n\\n' % (text, underlining)", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)", "def formatted(self) -> str:\r\n ...", "def __str__(self):\n return \"{title}\".format(title=self.title)", "def gen_title_rst(txt):\n # Just add a few useful directives\n txt = \".. highlight:: cmake\\n\\n\" + txt\n return txt", "def _prettyfilename(self):\n return self.title", "def wrap_title(title, mpl_layout):\n fig = mpl_layout.canvas.figure\n ax = fig.axes[0]\n ext_pixels = ax.get_window_extent()\n ext_inches = ext_pixels.transformed(fig.dpi_scale_trans.inverted())\n magic_number = 10\n letters_per_line = int(ext_inches.width * magic_number)\n title_wrapped = '\\n'.join(textwrap.wrap(title, letters_per_line))\n return title_wrapped", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title} :\" + s\n\n return s", "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "def get_title(text, uuid=None):\n if uuid is not None:\n text += get_provenance_link(uuid)\n title = pn.Row(pn.pane.HTML('<h2>{}</h2>'.format(text)), align='start')\n\n return title", "def print_title(title):\n print \"\\n\"+\"#\"*32+\"\\n# \"+title+\"\\n\"+\"#\"*32+\"\\n\"", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''", "def transform(text: str) -> str:\n return text.title()", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n # add prefix/suffix within the executable path:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' %\n (escape(path, treedoc.escDict), altText or url))\n return u'<br />'.join(results)", "def complete_alt_title(self, obj):\n return str(obj)", "def clean_title(\r\n title: str,\r\n mode: Literal[\"soft\", \"hard\", \"safe\"],\r\n allow_dot: bool = False,\r\n n: Optional[int] = None,\r\n) -> str:\r\n ...", "def text(self) -> str:", "def get_title(self):\n\n title = ''\n doc = self.article.doc\n\n title_element = self.parser.getElementsByTag(doc, tag='title')\n # no title found\n if title_element is None or len(title_element) == 0:\n return title\n\n # title elem found\n title_text = self.parser.getText(title_element[0])\n used_delimeter = False\n\n # split title with |\n if '|' in title_text:\n title_text = self.split_title(title_text, PIPE_SPLITTER)\n used_delimeter = True\n\n # split title with -\n if not used_delimeter and '-' in title_text:\n title_text = self.split_title(title_text, DASH_SPLITTER)\n used_delimeter = True\n\n # split title with »\n if not used_delimeter and u'»' in title_text:\n title_text = self.split_title(title_text, ARROWS_SPLITTER)\n used_delimeter = True\n\n # split title with :\n if not used_delimeter and ':' in title_text:\n title_text = self.split_title(title_text, COLON_SPLITTER)\n used_delimeter = True\n\n title = MOTLEY_REPLACEMENT.replaceAll(title_text)\n return title", "def print_title(title):\n\n print(\"\\n\" + title)\n print(\"=\" * len(title))", "def format_rich_quote(rich_text_quote):\n rich_text = format_rich_text(rich_text_quote)\n return \"> \" + \"\\n> \".join(rich_text.split(\"\\n\")) + \"\\n\"", "def SearchableText(self):\n ctool = getToolByName(self, 'portal_cpscalendar')\n if getattr(ctool, 'event_fulltext_index', False):\n return '%s %s' % (self.title, self.description)\n return ''", "def __str__(self):\n t = Template(\n \"\"\"\n <h4>$title</h4>\n $imgs\n $footnotes\n <hr/>\"\"\")\n # Return result.\n return t.substitute({\n \"title\": self.title,\n \"imgs\": self.render_images(),\n \"footnotes\": self.render_footnotes()\n })", "def visit_title_reference(self, node):\n self.body.append('\\\\emph{\\\\textbf{')", "def render(resolve_unicode,\n title_force_uppercase,\n msdos_eol_style,\n output_encoding,\n omit_fields=[]):", "def inject_title(self,template,title):\n return re.sub('TITLE',title,template)", "def format_screen(self,str):\n # Paragraph continue\n par_re = re.compile(r'\\\\$',re.MULTILINE)\n str = par_re.sub('',str)\n return str", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def title_content(label=\"A title\"):\n return {'label':label}", "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def title_p(self):\n self.run_command('title_p')", "def title(self):\n # Use the first line of the articles text as title, if not title\n # exists.\n title = self._text[:min(32, self._text.find(\"\\n\"))]\n return title", "def numbered_title(self):\n return f\"{self.title}\"", "def numbered_title(self):\n return f\"{self.title}\"", "def render_text(self):\n if self.text_type == 1:\n return markdown.markdown(self.text)\n else:\n return self.text", "def dumps(self):\n\n if not self.numbering:\n num = '*'\n else:\n num = ''\n\n string = Command(self.latex_name + num, self.title).dumps()\n string += '%\\n' + self.dumps_content()\n\n return string", "def processTitle(title):\n\n cleaned = re.sub(r'[@#\\\"]+', '', title.lower().strip())\n cleaned = re.sub(r'\\(\\d{4}.*\\)', '', cleaned)\n cleaned = re.sub(r':.+', '', cleaned).strip()\n return cleaned", "def show(self) -> str:\n return f'[{self.font}]{self.text}[{self.font}]' if self.font else self.text" ]
[ "0.67517006", "0.6623557", "0.64947814", "0.6347113", "0.6307539", "0.621596", "0.6210496", "0.60684896", "0.60674477", "0.60663515", "0.60421175", "0.6019259", "0.59935653", "0.59802073", "0.59790826", "0.595393", "0.5948588", "0.5939195", "0.590317", "0.5872387", "0.58521676", "0.5838757", "0.5835408", "0.5834278", "0.5832544", "0.58303535", "0.58232164", "0.58196765", "0.5818879", "0.581837", "0.58134586", "0.58123326", "0.57893336", "0.5777435", "0.5773666", "0.5759935", "0.57562524", "0.57514244", "0.5736761", "0.5721786", "0.57156", "0.5693657", "0.56579095", "0.56524575", "0.56516933", "0.56416726", "0.5639766", "0.5630319", "0.56235963", "0.5607828", "0.5597865", "0.5593643", "0.55868447", "0.5576239", "0.55753696", "0.5570099", "0.556155", "0.55568874", "0.55474097", "0.5539662", "0.5532411", "0.5531814", "0.5512975", "0.5479672", "0.54774815", "0.54768354", "0.5473451", "0.54682344", "0.5464578", "0.54521894", "0.5445922", "0.5437787", "0.54369724", "0.5422958", "0.5415149", "0.5415149", "0.5399354", "0.539413", "0.53890395", "0.5382889", "0.5382856", "0.53564143", "0.535306", "0.53529805", "0.5352455", "0.5347083", "0.5333787", "0.5333257", "0.5332394", "0.5331696", "0.53306514", "0.53304696", "0.53293514", "0.5327383", "0.53269297", "0.53269297", "0.53238297", "0.53169096", "0.5314785", "0.5314103" ]
0.55989367
50
Return tuple of text in edit format and bool validity, using edit format option
def formatEditText(self, storedText): format = globalref.options.strData('EditTimeFormat', True) try: return (GenTime(storedText).timeStr(format), True) except GenTimeError: return (storedText, not storedText)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def reformat(ctx):\n pass", "def task_format_check():\n return {\"actions\": [[\"black\", HERE, \"--check\"]], \"verbosity\": 1}", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def formatted(s):\n matches = re.findall(_format_re, normalize(s))\n if len(matches) == 1 and matches[0][0] != '':\n return matches[0][0]\n def to_fmt(txt_none, txt_sw, txt_rem, txt_em, txt_a):\n if txt_none != '':\n return FORMAT_NONE, txt_none\n elif txt_sw != '':\n return FORMAT_SW, txt_sw\n elif txt_rem != '':\n return FORMAT_REM, txt_rem\n elif txt_em != '':\n return FORMAT_EM, txt_em\n elif txt_a != '':\n return FORMAT_A, txt_a\n return [to_fmt(*m) for m in matches]", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def test_format_permissions_and_docstring(self):\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\"],\n {\"some\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"permission formatted string\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\"\n ),\n )\n\n self.assertEqual(\n format_permissions_and_docstring(\n [\"permission formatted string\", \"another permission\"],\n {\"some\": \"docstring\", \"another\": \"docstring\"},\n ),\n (\n \"## Permissions\\n\\n\"\n \"- permission formatted string\\n\"\n \"- another permission\\n\"\n \"### Permission description\\n\\n\"\n \"- **some** : docstring\\n\"\n \"- **another** : docstring\"\n ),\n )", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def text(value):\n return True", "def test_is_entry_formatted(self):\n\n valid_formats = test_case_data.get('valid_formats')\n for i, valid_entry in enumerate(test_case_data.get('valid_entries')):\n entry = [value.strip() for value in valid_entry.split(',')]\n format_fields = valid_formats[i].split(',')\n valid = self.parser._is_entry_formatted(entry, format_fields)\n self.assertTrue(valid, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [value.strip() for value in invalid_entry.split(',')]\n for f in valid_formats:\n format_fields = f.split(',')\n entry_dict = self.parser._is_entry_formatted(entry, format_fields)\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def _check_style(file_path, clang_format_bin):\n with open(file_path, 'r') as f:\n is_valid_header = f.read().startswith(CppFormatter.standard_header)\n\n cmd = [\n clang_format_bin,\n \"-style=file\",\n \"-output-replacements-xml\",\n file_path,\n ]\n result = subprocess.check_output(cmd).decode(\"utf-8\")\n if \"<replacement \" in result:\n is_valid_style = False\n else:\n is_valid_style = True\n return (is_valid_style, is_valid_header)", "def info(level):\n if level == 'basic':\n string = _(\"Basic markup\")\n text = _(\"Only basic text tags are available in this input field.\")\n elif level == 'rich':\n string = _(\"Rich markup\")\n text = _(\"Rich and basic text tags are available in this input field.\") \n elif level == 'full':\n string = _(\"Full markup\")\n text = _(\"Every tags are available in this input field.\") \n elif level == 'none':\n string = _(\"No markup\")\n text = _(\"No tags are available in this input field.\") \n\n if level != 'none':\n text = text + \" \" + _(\"Check the markup reminder in related documentation for a description of these tags.\")\n\n return '<span class=\"help\" title=' + quoteattr(text) \\\n + '><img src=\"' + settings.STATIC_MEDIA_URL \\\n + 'images/savane/common/misc.default/edit.png' \\\n + ' border=\"0\" class=\"icon\" alt=\"\" />' \\\n + string + '</span>'", "def human_readable(self):\n if self.no_flags_set():\n return \"no flags set\"\n else:\n flag_desc = []\n for name in (\"bookmarked\", \"for_validation\", \"candidate\", \"final causative\"):\n if getattr(self, \"flag_%s\" % name.replace(\" \", \"_\")):\n flag_desc.append(name)\n for name in (\"visual\", \"validation\", \"molecular\", \"phenotype_match\", \"summary\"):\n field = getattr(self, \"flag_%s\" % name)\n if field and field != \"empty\":\n flag_desc.append(\"%s rating is %s\" % (name.split(\"_\")[0], field))\n return \", \".join(flag_desc)", "def fancyString(inVal, correctOutput, funcOutput):\r\n checkCorrect = \"Correct = \" + u'\\u2713'*(funcOutput == correctOutput) + 'X'*(funcOutput != correctOutput)\r\n # Check mark code from site below:\r\n # https://stackoverflow.com/questions/16676101/print-the-approval-sign-check-mark-u2713-in-python\r\n return \"Input(s) = {:<15} Output = {:<25} Your Output = {:<35} \".format(str(inVal), str(correctOutput), str(funcOutput)) + checkCorrect", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))", "def parse_edits(line):\n \n # Get edits inside brackets allowing parentheses\n edit_strings = re.findall(r'\\((.*?)\\)[,\\]]', line)\n \n edits = []\n for edit_string in edit_strings:\n # Splitting by comma is not enough. Some of the edits actually fix spacing\n # when commas are used, so we may can't use edit_string.split(', ')\n # For some reason, the unicode gold strings are enclosed in a list\n m = re.match(r'^(\\d+), (\\d+), (.*), \\[?(.*)\\]?$', edit_string)\n edit_items = [m.group(i) for i in range(1, 5)]\n \n # No way to handle this in regex\n if edit_items[3].endswith(']'):\n edit_items[3] = edit_items[3][:-1]\n \n # Cast the indices\n edit_items[0] = int(edit_items[0])\n edit_items[1] = int(edit_items[1])\n \n # Convert unicode-string-inside-string into actual unicode string\n edit_items[2] = codecs.decode(edit_items[2][2:-1], 'unicode_escape') or 'NIL'\n edit_items[3] = codecs.decode(edit_items[3][2:-1], 'unicode_escape') or 'NIL'\n \n edits.append(edit_items)\n \n return edits", "def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text", "def show_line_edit_dialog(text):\n dialog = QDialog()\n interface = lineEditEntryGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True, str(interface.lineEdit.text())\n else:\n return False, \"\"", "def as_tuple(self, selected: bool) -> tuple:\n if self.check_ans:\n current = self.num.ljust(2) + self.answer\n else:\n current = self.num.ljust(2)+self.current.rjust(1)\n return ((\n \"fg:\"+self.style_fg + \" bg:\" # apply foreground color\n + (self.style_bg_selected if selected else self.style_bg),\n current\n if self.answer != \".\" else \"▐\" + \"█\" + \"▌\"), ('bg:#fefefe fg:#000', \"│\"))", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def hints(s):\n if s == 'hello':\n # string, color, bold\n return (' World', 35, False)\n return None", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def FormatYesNo(value):\n if value:\n return u'Yes'\n else:\n return u'No'", "def get_data_from_nonformat_text():\n pass", "def validateWorkFormat(format):\n\n if not(format):\n return \"You must select a work format.\"", "def rich(text):\n return full(text, False)", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def change_prompt_format(self, arg, **_):\n if not arg:\n message = 'Missing required argument, format.'\n return [(None, None, None, message)]\n\n self.prompt_format = self.get_prompt(arg)\n return [(None, None, None, \"Changed prompt format to %s\" % arg)]", "def test_command_edit_info_boolean_flags():\n def f(inputfile):\n with tempfile.NamedTemporaryFile() as tmp:\n shutil.copy(inputfile, tmp.name)\n\n for flag in (\"write_protected\", \"synchronized\", \"cleaned\"):\n for true_value, false_value in ((\"1\", \"0\"),\n (\"yes\", \"no\"),\n (\"YES\", \"No\"),\n (\"true\", \"false\"),\n (\"tRuE\", \"FaLsE\")):\n wozardry.parse_args([\"edit\", \"-i\", \"%s:%s\" % (flag, true_value), tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[flag] == True\n wozardry.parse_args([\"edit\", \"-i\", \"%s:%s\" % (flag, false_value), tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.info[flag] == False\n f(kValid1)\n f(kValid2)", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def get_help_text(self):\n requirements = \"Your password must contain at least: {min_length} \" \\\n \"character(s), {min_uppercase} uppercase letter(s), \" \\\n \"{min_lowercase} lowercase letter(s) and \" \\\n \"{min_digits} digit(s). \".format(\n min_length=self.min_length,\n min_uppercase=self.min_uppercase,\n min_lowercase=self.min_lowercase,\n min_digits=self.min_digits)\n return requirements", "def help_for(self, flag: str) -> Tuple[str, str]:\n # Obtain arg obj\n if flag not in self.flags:\n err = \"{!r} is not a valid flag for this context! Valid flags are: {!r}\" # noqa\n raise ValueError(err.format(flag, self.flags.keys()))\n arg = self.flags[flag]\n # Determine expected value type, if any\n value = {str: \"STRING\", int: \"INT\"}.get(arg.kind)\n # Format & go\n full_names = []\n for name in self.names_for(flag):\n if value:\n # Short flags are -f VAL, long are --foo=VAL\n # When optional, also, -f [VAL] and --foo[=VAL]\n if len(name.strip(\"-\")) == 1:\n value_ = (\"[{}]\".format(value)) if arg.optional else value\n valuestr = \" {}\".format(value_)\n else:\n valuestr = \"={}\".format(value)\n if arg.optional:\n valuestr = \"[{}]\".format(valuestr)\n else:\n # no value => boolean\n # check for inverse\n if name in self.inverse_flags.values():\n name = \"--[no-]{}\".format(name[2:])\n\n valuestr = \"\"\n # Tack together\n full_names.append(name + valuestr)\n namestr = \", \".join(sorted(full_names, key=len))\n helpstr = arg.help or \"\"\n return namestr, helpstr", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def __check_format(node, lint_ctx, profile: str, allow_ext=False):\n if \"format_source\" in node.attrib and (\"ext\" in node.attrib or \"format\" in node.attrib):\n lint_ctx.warn(\n f\"Tool {node.tag} output '{node.attrib.get('name', 'with missing name')}' should use either format_source or format/ext\",\n node=node,\n )\n if \"format_source\" in node.attrib:\n return True\n if node.find(\".//action[@type='format']\") is not None:\n return True\n # if allowed (e.g. for discover_datasets), ext takes precedence over format\n fmt = None\n if allow_ext:\n fmt = node.attrib.get(\"ext\")\n if fmt is None:\n fmt = node.attrib.get(\"format\")\n if fmt == \"input\":\n message = f\"Using format='input' on {node.tag} is deprecated. Use the format_source attribute.\"\n if Version(str(profile)) <= Version(\"16.01\"):\n lint_ctx.warn(message, node=node)\n else:\n lint_ctx.error(message, node=node)\n\n return fmt is not None", "def get_opt_formatted(self, command):\n if \"formatted\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"formatted\"]\n else:\n return CommandDict.DEFAULT_OPT_FORMATTED", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def verify_diagnostics_and_usage_text():\r\n msg, status = \"\", True\r\n try:\r\n sleep(10)\r\n if g.platform == 'android':\r\n sleep(3)\r\n 'verify end user license agreement label'\r\n flag1,msg = element_textvalidation('Diagnostics_usage_lbl','Diagnostics and Usage')\r\n sleep(4) \r\n 'Read verification input data'\r\n text_to_verify = util.read_file(g.demomode_Diagnostcs_nd_usage_txt)\r\n 'verify end user license agreement label'\r\n flag2,msg = element_textvalidation('Diagnostics_usage_txt',text_to_verify)\r\n \r\n flag = False if not (flag1 and flag2) else True\r\n else:\r\n\r\n \r\n 'Verify diagnostics usage text'\r\n flag1 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_lbl'))\r\n 'Verify diagnostics usage text'\r\n flag2 = ui_controls.ui_element(get_obj_identifier('Diagnostics_usage_txt'))\r\n \r\n sleep(4) \r\n \r\n flag = False if not (flag1 and flag2) else True\r\n if not flag:\r\n return False, msg\r\n else:\r\n print \"License agreement screen name is displayed properly\"\r\n \r\n \r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return False, msg\r\n return True, msg", "def _format_action(self, action):\n parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)\n if action.nargs == argparse.PARSER:\n parts = \"\\n\".join(parts.split(\"\\n\")[1:])\n return parts", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def _engine_option_string_and_comment(option: engine.Option, value: engine.ConfigValue) -> Tuple[str, str]:\n if value is None:\n value = ''\n name_equals_val = f'{option.name}={value}'\n if option.type == 'check' or option.type == 'string' or option.type == 'button':\n return (name_equals_val, f'type={option.type}')\n if option.type == 'spin':\n return (name_equals_val, f'type=spin, min={option.min}, max={option.max}')\n if option.type == 'combo':\n return (name_equals_val, f'type=combo, var={option.var}')\n return (name_equals_val, 'type=unknown')", "def TEXT(number, format_type):\n raise NotImplementedError()", "def __verify_plot_options(self, options_str):\n default_line = '-'\n default_marker = ''\n default_colour = 'k'\n\n # Split str into chars list\n options_split = list(options_str)\n\n # If 0, set defaults and return early\n if len(options_split) == 0:\n return [default_line, default_marker, default_colour]\n\n # If line_style given, join the first two options if applicable\n # (some types have 2 characters)\n for char in range(0, len(options_split) - 1):\n # If char is '-' (only leading character in double length option)\n if options_split[char] == '-' and len(options_split) > 1:\n # If one of the leading characters is valid\n if options_split[char + 1] == '-' or \\\n options_split[char + 1] == '.':\n # Join the two into the first\n options_split[char] = options_split[char] \\\n + options_split[char + 1]\n # Shuffle down the rest\n for idx in range(char + 2, len(options_split)):\n options_split[idx - 1] = options_split[idx]\n # Remove duplicate extra\n options_split.pop()\n\n # If any unknown, throw error\n for option in options_split:\n if option not in self.__line_styles and \\\n option not in self.__marker_styles and \\\n option not in self.__colour_styles:\n error_string = \"Unknown character entered: '{0}'\"\n raise ValueError(error_string.format(option))\n\n ##############################\n # Verify Line Style\n ##############################\n line_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n line_style_index = 0\n for option in options_split:\n if option in self.__line_styles:\n line_style_count = line_style_count + 1\n line_style_index = self.__line_styles.index(option)\n\n # If more than one, throw error\n if line_style_count > 1:\n raise ValueError(\n \"Too many line style arguments given. Only one allowed\")\n # If none, set as solid\n elif line_style_count == 0 or not any(\n item in options_split for item in self.__line_styles):\n output_line = default_line\n # If one, set as given\n else:\n output_line = self.__line_styles[line_style_index]\n ##############################\n\n ##############################\n # Verify Marker Style\n ##############################\n marker_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n marker_style_index = 0\n for option in options_split:\n if option in self.__marker_styles:\n marker_style_count = marker_style_count + 1\n marker_style_index = self.__marker_styles.index(option)\n\n # If more than one, throw error\n if marker_style_count > 1:\n raise ValueError(\n \"Too many marker style arguments given. Only one allowed\")\n # If none, set as no-marker\n elif marker_style_count == 0 or not any(\n item in options_split for item in self.__marker_styles):\n output_marker = default_marker\n # If one, set as given\n else:\n output_marker = self.__marker_styles[marker_style_index]\n # If marker set and no line given, turn line to no-line\n if line_style_count == 0 or not any(\n item in options_split for item in self.__line_styles):\n output_line = ''\n ##############################\n\n ##############################\n # Verify Colour Style\n ##############################\n colour_style_count = 0 # Count of options used\n # Index position of index used (only used when count == 1)\n colour_style_index = 0\n for option in options_split:\n if option in self.__colour_styles:\n colour_style_count = colour_style_count + 1\n colour_style_index = self.__colour_styles.index(option)\n\n # If more than one, throw error\n if colour_style_count > 1:\n raise ValueError(\n \"Too many colour style arguments given. Only one allowed\")\n # If none, set as black\n elif colour_style_count == 0 or not any(\n item in options_split for item in self.__colour_styles):\n output_colour = default_colour\n # If one, set as given\n else:\n output_colour = self.__colour_styles[colour_style_index]\n ##############################\n\n return [output_line, output_marker, output_colour]", "def test_form_help_text_is_correct(self):\n # https://stackoverflow.com/questions/24344981/how-to-change-help-\n # text-of-a-django-form-field\n\n # Above link helped figure out how to access help_text.\n self.assertEqual(\n self.form.fields[\"texture\"].help_text,\n \"One word descriptions seperated by commas.\",\n )", "def reformat():\n toolkit.reformat()", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def validate_format(self):\n raise NotImplementedError()", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def _validate_performatives(performative: str) -> Tuple[bool, str]:\n # check performative is not a reserved name\n if _is_reserved_name(performative):\n return (\n False,\n \"Invalid name for performative '{}'. This name is reserved.\".format(\n performative,\n ),\n )\n\n # check performative's format\n if not _is_valid_regex(PERFORMATIVE_REGEX_PATTERN, performative):\n return (\n False,\n \"Invalid name for performative '{}'. Performative names must match the following regular expression: {} \".format(\n performative, PERFORMATIVE_REGEX_PATTERN\n ),\n )\n\n return True, \"Performative '{}' is valid.\".format(performative)", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def _format (color, style=''):\n _format = QtGui.QTextCharFormat()\n if color != '':\n _format.setForeground(getattr(QtCore.Qt, color))\n if 'bold' in style:\n _format.setFontWeight(QtGui.QFont.Bold)\n if 'italic' in style:\n _format.setFontItalic(True)\n return _format", "def get_replacement():\n run_linter_throw(\"path/to/file\",\n \"{s}\\n{m} Text{e}\",\n style,\n whitelist=[\"headerblock/desc_space\"])", "def edit_form_entry_help_text_extra(cls):\n return \"\"\"\n <ul class=\"{container_class}\">\n {edit_option_html}\n <li><a href=\"{delete_url}\">\n <span class=\"{delete_option_class}\"></span> {delete_text}</a>\n </li>\n </ul>\n <input type=\"hidden\" value=\"{form_element_position}\"\n name=\"form-{counter}-position\"\n id=\"id_form-{counter}-position\"\n class=\"form-element-position\">\n <input type=\"hidden\" value=\"{form_element_pk}\"\n name=\"form-{counter}-id\" id=\"id_form-{counter}-id\">\n \"\"\".format(\n container_class=cls.form_list_container_class,\n edit_option_html=\"{edit_option_html}\",\n delete_url=\"{delete_url}\",\n delete_option_class=cls.form_delete_form_entry_option_class,\n delete_text=\"{delete_text}\",\n form_element_position=\"{form_element_position}\",\n counter=\"{counter}\",\n form_element_pk=\"{form_element_pk}\",\n )", "def extension (formatStr):\n assert False, \"TODO:\"", "def _generateReadOnly(self, obj, **args):\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'readonly'\n if self._script.utilities.isReadOnlyTextArea(obj):\n result.append(self._script.formatting.getString(**args))\n return result", "def __format__(self, format_spec):\n # This calls the compiled regex stored on ANSIString's class to analyze the format spec.\n # It returns a dictionary.\n format_data = self.re_format.match(format_spec).groupdict()\n clean = self.clean()\n base_output = ANSIString(self.raw())\n align = format_data.get(\"align\", \"<\")\n fill = format_data.get(\"fill\", \" \")\n\n # Need to coerce width into an integer. We can be certain that it's numeric thanks to regex.\n width = format_data.get(\"width\", None)\n if width is None:\n width = len(clean)\n else:\n width = int(width)\n\n if align == \"<\":\n base_output = self.ljust(width, fill)\n elif align == \">\":\n base_output = self.rjust(width, fill)\n elif align == \"^\":\n base_output = self.center(width, fill)\n elif align == \"=\":\n pass\n\n # Return the raw string with ANSI markup, ready to be displayed.\n return base_output.raw()", "def is_valid(self):\r\n for lineedit in self.lineedits:\r\n if lineedit in self.validate_data and lineedit.isEnabled():\r\n validator, invalid_msg = self.validate_data[lineedit]\r\n text = to_text_string(lineedit.text())\r\n if not validator(text):\r\n QMessageBox.critical(self, self.get_name(),\r\n \"%s:<br><b>%s</b>\" % (invalid_msg, text),\r\n QMessageBox.Ok)\r\n return False\r\n return True", "def test_incorrect_format_1(self):\n changelog = changelog_temp.format(\n before_changelog=\"## [Not yet released]\\n\\n### Added\\n\\n- Added a new feature\\n\"\n )\n with pytest.raises(ValueError):\n parse_changelog(changelog)", "def test_match_entry_to_format(self):\n\n # matches valid entries with valid formats\n for valid_entry in test_case_data.get('valid_entries'):\n entry = [e.strip() for e in valid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertTrue(entry_dict, f'{entry} is not of a valid format')\n\n # fails with invalid entries\n for invalid_entry in test_case_data.get('invalid_entries'):\n entry = [e.strip() for e in invalid_entry.split(',')]\n entry_dict = self.parser._match_entry_to_format(entry)\n\n self.assertFalse(entry_dict, f'{entry} is not of a valid format')", "def is_text_editable(path):\n return False", "def _format_answer(self, text):\n text = str(text).replace('\\n', ' ')\n answer_width = 70\n pretty_text = '\\n\\t'.join(textwrap.wrap(text, answer_width))\n\n return pretty_text", "def flags(self, f):\n if f.is_inlined:\n return \" (inlined)\"\n return \"\"", "def text_to_display(level):\n if level == \"html\":\n return html_answers, html_text\n elif level == \"css\":\n return css_answers, css_text\n elif level == \"python\":\n return python_answers, python_text", "def editorForTyp(typ):\n\n if typ == \"quint32\":\n return (\"QSpinBox\", \"setValue\", \"value\")\n elif typ == \"QString\":\n return (\"QLineEdit\", \"setText\", \"text\")\n elif typ == \"bool\":\n return (\"QCheckBox\", \"setChecked\", \"isChecked\")\n return (None, None, None)", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def show_fields(*fields):\n\n fields = filter( lambda x: x, fields )\n target_len = max( len(name) for name, value in fields ) + 2\n for name, value in fields:\n line = name + ':' + \" \" * (target_len - len(name))\n if type(value) == bool:\n line += color_text(\"Yes\", 'green') if value else color_text(\"No\", 'red')\n else:\n line += str(value)\n print line", "def testCheckRequiredFormat(self):\n plugin = gdrive_synclog.GoogleDriveSyncLogTextPlugin()\n\n file_system_builder = fake_file_system_builder.FakeFileSystemBuilder()\n file_system_builder.AddFile('/file.txt', (\n b'2018-01-24 18:25:08,454 -0800 INFO pid=2376 7780:MainThread '\n b'logging_config.py:295 OS: Windows/6.1-SP1\\n'))\n\n file_entry = file_system_builder.file_system.GetFileEntryByPath('/file.txt')\n\n parser_mediator = self._CreateParserMediator(None, file_entry=file_entry)\n\n file_object = file_entry.GetFileObject()\n text_reader = text_parser.EncodedTextReader(file_object)\n text_reader.ReadLines()\n\n result = plugin.CheckRequiredFormat(parser_mediator, text_reader)\n self.assertTrue(result)", "def formatted(self) -> str:\r\n ...", "def ISTEXT(value):\n return isinstance(value, (basestring, AltText))", "def text_editor():\n return True", "def validate(self, txt, pos):\n state, rpos = qt.QDoubleValidator.validate(self, txt, pos)\n if txt.length() == 0:\n state = qt.QValidator.Acceptable\n return state, rpos", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text" ]
[ "0.74414396", "0.73706305", "0.7158762", "0.7101311", "0.7101311", "0.70359075", "0.7028627", "0.68839306", "0.655066", "0.6453073", "0.6392096", "0.63097495", "0.63002694", "0.61881006", "0.60249096", "0.58351296", "0.5735489", "0.56134075", "0.5609827", "0.5604693", "0.55462486", "0.5309045", "0.52991766", "0.5209923", "0.52029717", "0.52029717", "0.52029717", "0.52029717", "0.52029717", "0.51995766", "0.5173284", "0.5163057", "0.51601666", "0.5118592", "0.51145864", "0.5095493", "0.50919986", "0.507758", "0.5076713", "0.5066263", "0.505785", "0.5048219", "0.5010526", "0.50008905", "0.49893162", "0.497208", "0.49717423", "0.49444363", "0.4933414", "0.49300218", "0.49295422", "0.49209118", "0.4898785", "0.48946583", "0.4889563", "0.48859346", "0.48719627", "0.48597744", "0.48540577", "0.48528767", "0.48496515", "0.48474717", "0.484253", "0.48417863", "0.4838876", "0.48276186", "0.4824091", "0.48239785", "0.4821053", "0.48203927", "0.48191538", "0.48187387", "0.4813598", "0.48087016", "0.48079142", "0.48021486", "0.47995895", "0.47952986", "0.47923297", "0.47902", "0.4777973", "0.4775352", "0.47560427", "0.47556764", "0.47520024", "0.47499168", "0.47496396", "0.4736746", "0.47359556", "0.47356373", "0.4734012", "0.47335908", "0.47326112", "0.47288954", "0.4723627", "0.47181588", "0.4714406", "0.47135657", "0.47128206", "0.47116646" ]
0.5770396
16
Return tuple of stored text from edited text and bool validity, using edit format option
def storedText(self, editText): try: return (repr(GenTime(editText)), True) except GenTimeError: return (editText, not editText and not self.isRequired)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def get_data_from_nonformat_text():\n pass", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def on_edit(self, event, text):\n return None", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text", "def reformat(ctx):\n pass", "def main(text,result=\"latex\",check_text=True,index_project='',file_name=''): # TODO detect and make a warning for genuine latex marks\n if isinstance(text,str):\n text = text.split('\\n')\n \n if check_text:\n check(text)\n \n print(text)\n \n ### managing placeholders\n text = parsers['v'].main(text)\n \n ### saving names\n if index_project:\n indexer.parse(text,index_project,file_name)\n \n \n for i in range(len(text)):\n line = text[i]\n ### managing end of line\n line = line.replace(\" ,,\",\"\\\\\\\\\")\n \n while line.count(opening_mark):\n first_part, mark, late_part = line.partition(',;')\n if not late_part:\n break\n late_part, text = parsers[late_part[0]].main(late_part = late_part,\n text=text,\n result=result,\n line_nb = i)\n line = first_part + late_part\n text[i] = line\n \n return '\\n'.join(text)", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def get_edit_text(self):\n # grab edit page\n response = self._get_page('edit.php')\n html = response.text\n # parse out existing plan\n soup = bs4.BeautifulSoup(html, 'html5lib')\n plan = soup.find('textarea')\n if plan is None:\n raise PlansError(\"Couldn't get edit text, are we logged in?\")\n else:\n plan = u'' + plan.contents[0]\n # prepending the empty string somehow prevents BS from\n # escaping all the HTML characters (weird)\n assert type(plan) == str\n # convert to CRLF line endings\n plan = convert_endings(plan, 'CRLF')\n # parse out plan md5\n md5sum = soup.find('input',\n attrs={'name': 'edit_text_md5'}).attrs['value']\n # also, explicitly compute the hash, for kicks\n assert md5sum == plans_md5(plan)\n # verify that username has not changed\n assert self.username == self.parser.username\n return plan, md5sum", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def normalize_text_for_edit(user, text, rich_text, escape_html=True):\n if text is None:\n return ''\n\n if not rich_text and is_rich_text_default_for_user(user):\n # This isn't rich text, but it's going to be edited as rich text,\n # so escape it.\n text = djblets_markdown.markdown_escape(text)\n\n if escape_html:\n text = escape(text)\n\n return text", "def parse_edits(line):\n \n # Get edits inside brackets allowing parentheses\n edit_strings = re.findall(r'\\((.*?)\\)[,\\]]', line)\n \n edits = []\n for edit_string in edit_strings:\n # Splitting by comma is not enough. Some of the edits actually fix spacing\n # when commas are used, so we may can't use edit_string.split(', ')\n # For some reason, the unicode gold strings are enclosed in a list\n m = re.match(r'^(\\d+), (\\d+), (.*), \\[?(.*)\\]?$', edit_string)\n edit_items = [m.group(i) for i in range(1, 5)]\n \n # No way to handle this in regex\n if edit_items[3].endswith(']'):\n edit_items[3] = edit_items[3][:-1]\n \n # Cast the indices\n edit_items[0] = int(edit_items[0])\n edit_items[1] = int(edit_items[1])\n \n # Convert unicode-string-inside-string into actual unicode string\n edit_items[2] = codecs.decode(edit_items[2][2:-1], 'unicode_escape') or 'NIL'\n edit_items[3] = codecs.decode(edit_items[3][2:-1], 'unicode_escape') or 'NIL'\n \n edits.append(edit_items)\n \n return edits", "def _do_checkbox_setting(cls, text: str, title: str, replace_args: tuple) -> typing.Tuple[str, bool, bool]:\n title = title.strip()\n\n lines = []\n found = False\n modified = False\n for line, is_checkbox in cls._iterate_lines(text):\n if is_checkbox and title == cls._get_checkbox_title(line):\n found = True\n lines.append(line.replace(*replace_args))\n if line != lines[-1]:\n modified |= True\n\n continue\n\n lines.append(line)\n\n return \"\\n\".join(lines), found, modified", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_edits(text):\n edit_p = re.compile(\"(?P<open><edit.*?>)(?P<inner>.*?)(?P<close></edit>)\")\n corr_p = re.compile(\"<corrections>.*?</corrections>\")\n edits = []\n\n offset = 0\n\n for m in re.finditer(edit_p, text):\n # Make an edit object\n edit_text = \"\".join(m.groups())\n edit = ET.XML(m.group(0))\n\n # Set the bounds of the original text and adjust offset\n inner_string = m.group('inner') \n start = m.start() - offset\n corr_m = re.search(corr_p, inner_string)\n \n if corr_m: # Replacement/insertion have a correction\n offset += len(corr_m.group(0)) \n \n if not inner_string.startswith(\"<empty/>\"):\n end = start + corr_m.start()\n else:\n offset += len(\"<empty/>\") # It is \"\" in plain text\n end = start\n else:\n # Deletions may not have a correction\n if not inner_string.startswith(\"<empty/>\"):\n end = start + len(inner_string)\n else: # Unspecified error <empty/> is \"\" in plain text\n end = start\n offset += len(inner_string)\n\n\n edit.set(\"start\", \"%d\" % start) \n edit.set(\"end\", \"%d\" % end)\n\n offset += len(m.group('open')) + len(m.group('close'))\n \n\n # Make the original text a subelement of <edit>\n # Original text may be a string or <empty/> element.\n original = ET.SubElement(edit, \"original\")\n \n if edit.text:\n original.text = edit.text\n edit.text = \"\"\n else:\n empty = edit.find('empty')\n \n try:\n edit.remove(empty)\n original.append(empty)\n except Exception as e:\n pass\n \n edits.append(edit)\n\n return edits", "def refang(self, text: str):", "def edit_text(self, _, val):\n t_edit = text_editor.TextEditor(val, \"value\")\n t_edit.execute = self.execute", "def handle_data(self, text):\n if self.bankacctfrom:\n if self.bankid:\n self.compte['banque'] = text.strip()\n self.bankid = False\n if self.branchid:\n self.compte['guichet'] = text.strip()\n self.branchid = False\n if self.acctid:\n self.compte['compte'] = text.strip()\n self.acctid = False\n if self.banktranlist:\n if self.stmttrn:\n if self.dtposted:\n self.ecriture_tmp['date'] = datetime.strptime(text.strip(), \"%Y%m%d\")\n self.dtposted = False\n if self.trnamt:\n self.ecriture_tmp['montant'] = locale.atof(text.strip())\n self.trnamt = False\n if self.trntype:\n self.ecriture_tmp['type'] = text.strip()\n self.trntype = False\n if self.name:\n self.ecriture_tmp['name'] = text.strip()\n self.name = False\n if self.memo:\n self.ecriture_tmp['memo'] = text.strip()\n self.memo = False", "def get_mark(text, short):\n\n line = text.readline()\n\n # check that the line begins with a valid entry type\n if not short and not re.match(r'^\\s*(text|mark) = \"', line):\n raise ValueError('Bad entry: ' + line)\n\n # read until the number of double-quotes is even\n while line.count('\"') % 2:\n next_line = text.readline()\n\n if not next_line:\n raise EOFError('Bad entry: ' + line[:20] + '...')\n\n line += next_line\n if short:\n pattern = r'^\"(.*?)\"\\s*$'\n else:\n pattern = r'^\\s*(text|mark) = \"(.*?)\"\\s*$'\n entry = re.match(pattern, line, re.DOTALL)\n\n return entry.groups()[-1].replace('\"\"', '\"')", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def getText(self):", "def get_text(text_input):\r\n return text_input", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def rich(text):\n return full(text, False)", "def text(value):\n return True", "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def formatter(text):\n repl_map = {\n \"degC\": \"$^o$C\",\n \"K\": \"$^o$C\",\n \"month-1\": \"month$^{{-1}}$\",\n \"day-1\": \"day$^{{-1}}$\",\n \"d-1\": \"day$^{{-1}}$\",\n \"decade-1\": \"decade$^{{-1}}$\",\n \"year-1\": \"year$^{{-1}}$\",\n \"rcp85\": \"RCP8.5\",\n \"rcp45\": \"RCP4.5\",\n \"rcp26\": \"RCP2.6\",\n \"RCP85\": \"RCP8.5\",\n \"RCP45\": \"RCP4.5\",\n \"RCP26\": \"RCP2.6\",\n \"cmip5-85\": \"RCP8.5\",\n \"cmip5-60\": \"RCP6.0\",\n \"cmip5-45\": \"RCP4.5\",\n \"cmip5-26\": \"RCP2.6\",\n \"ssp585\": \"SSP5-8.5\",\n \"ssp245\": \"SSP2-4.5\",\n \"ssp126\": \"SSP1-2.6\",\n \"SSP585\": \"SSP5-8.5\",\n \"SSP245\": \"SSP2-4.5\",\n \"SSP126\": \"SSP1-2.6\",\n \"cmip6-85\": \"SSP5-8.5\",\n \"cmip6-70\": \"SSP3-7.0\",\n \"cmip6-60\": \"SSP4-6.0\",\n \"cmip6-34\": \"SSP4-3.4\",\n \"cmip6-45\": \"SSP2-4.5\",\n \"cmip6-26\": \"SSP1-2.6\",\n \"cmip6-19\": \"SSP1-1.9\",\n \"1\": \"%\",\n \"era5\": \"ERA5\",\n \"gpcc025x025_v8\": \"GPCC\",\n \"cru\": \"CRU\",\n \"jra55\": \"JRA55\",\n \"HIGHRESMIP\": \"HighResMIP\",\n \" \": \"\",\n }\n for key, val in repl_map.items():\n if key in text:\n text = text.replace(key, val)\n break\n return text", "def formatOutput(self, storedText, titleMode, internal=False):\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)", "def value_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),value=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def get_text_editor_input(initial_msg):\n EDITOR = os.environ.get('EDITOR', 'vi')\n CROP_MARK = ('\\n\\nAnything above this line will be ignored:\\n' +\n ('-' * 34) + '>8' + ('-' * 34) + '\\n')\n\n wrapper = TextWrapper(replace_whitespace=False, drop_whitespace=False)\n initial_msg = '\\n'.join(wrapper.wrap(initial_msg))\n initial_msg += CROP_MARK\n\n with tempfile.NamedTemporaryFile(suffix='.md') as temp:\n temp.write(initial_msg.encode('utf-8'))\n temp.flush() # Write buffer to the file\n subprocess.call([EDITOR, temp.name])\n\n # The pointer was already after the initial message, but we return to\n # the beginning just in case the user added content before the mark\n temp.seek(0)\n return temp.read().decode('utf-8').split(CROP_MARK, 1)[1].strip()", "def edit():", "def text_example():\n \n text_store = \"01000001011000010010000001000010011000100000110100001010001100010011001000110011\"\n text.delete('1.0', tk.END) \n text.insert(tk.END, text_store) \n box=tk.Tk()\n m = tk.Message(box, text=\"You should be able to save this file and open it in a text editor like Notepad or Nano to read it. If you edit the values you may find it does not display properly as text. Unchanged, it should be interpreted by a text editor as:\\n\\nAa Bb\\n123\\n\\nAs the file was made on a Windows machines you may find other systems display the line breaks differently.\")\n m.config(padx=50, pady=50, width=350)\n m.pack()", "def is_text_editable(path):\n return False", "def show_line_edit_dialog(text):\n dialog = QDialog()\n interface = lineEditEntryGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True, str(interface.lineEdit.text())\n else:\n return False, \"\"", "def on_text(self, instance, value):\n if not EVENTS['IS_OBJ']:\n EVENTS['EDITOR_SAVED'] = False\n\n if value:\n self.valid_text = True\n EVENTS['IS_RAM_EMPTY'] = False\n else:\n self.valid_text = False", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def edit_once(self, text):\n return self._edit_engine(text, break_on_success=True)", "def format_text(text):\n text = text.replace('to do ', '') \\\n .replace('to ', '') \\\n .replace(' at ', ' on ') \\\n .replace(' a ', ' 1 ') \\\n .replace(' an ', ' 1 ') \\\n .replace('minutes', '60') \\\n .replace('minute', '60') \\\n .replace('seconds', '1') \\\n .replace('second', '1') \\\n .replace('hours', '3600') \\\n .replace('hour', '3600')\n text = text.split(' ')\n pure_text = \"\"\n if text[-3] == 'in':\n for i in range(0, (len(text)-3)):\n pure_text += \" \" + text[i]\n final_text = [pure_text, text[-3], text[-2], text[-1]]\n else:\n for i in range(0, (len(text)-2)):\n pure_text += text[i]\n final_text = [pure_text, text[-2], text[-1]]\n if len(text) < 3:\n pprint(final_text)\n raise Exception(\"Bad remind request\")\n\n return final_text", "def edit_google_vision_text(self,text):\n s1=text\n try:\n log_info(\"Correcting google vision text to remove extra spacing\",MODULE_CONTEXT)\n i=0\n while(i<len(text)):\n s1=text\n if text[i] in [\"/\",\"।\",'।' ,':','|',\",\" ,'०',\"]\",\"-\",\")\",\"}\"] and text[i-1]==\" \": \n text=text[:i-1]+text[i:]\n if i > 0 :\n if text[i-1] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i]==\" \":\n text=text[:i]+text[i+1:]\n elif text[i] in [\"-\",\"[\",\"{\",\"/\",\"(\"] and text[i+1]==\" \":\n text=text[:i+1]+text[i+2:]\n i=i+1\n except Exception as e:\n log_exception(\"Exception while correcting google vision text\", MODULE_CONTEXT, e)\n return s1\n return text", "def process_text(self, text, language):", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def get_row_input_text(self, row_idx):\n return self.row_items[row_idx][1].get()", "def updateText(widget,text,format=''):\n # autorecognition\n if format not in ['plain','html','rest']:\n if type(text) is str and text.startswith('..'):\n format = 'rest'\n\n # conversion\n if format == 'rest' and pf.options.rst2html:\n html = utils.rst2html(text)\n if html[:10] == text[:10]:\n #print \"CONVERSION TO HTML FAILED\"\n text += \"\\n\\nNote: This reStructuredText is displayed as plain text because it could not be converted to html. If you install python-docutils, you will see this text (and other pyFormex messages) in a much nicer layout!\\n\"\n else:\n text = html\n\n # We leave the format undefined, because we are not sure\n # that the conversion function (docutils) is available\n # and always produces good results\n format = ''\n\n if format == 'plain':\n widget.setPlainText(text)\n elif format == 'html':\n widget.setHtml(text)\n else:\n # As a last rescue, try QT4's autorecognition\n widget.setText(text)", "def modified_flag(self, event):\n text = self.get_current()\n text.modified = 1", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def get_text_from_editor():\n with tempfile.NamedTemporaryFile(suffix='.tmp', mode='w+t') as f:\n # Create a temporary file with instructions on describing bug\n f.write(message + '\\n\\n')\n f.flush()\n # Open the editor and allow the user to type\n editor = os.environ.get('EDITOR', 'vim')\n subprocess.call([editor, f.name])\n # Read and clean the file\n f.seek(0)\n text = ''.join([line.lstrip() for line in f.readlines()\n if line and not line.lstrip().startswith('#')])\n return '\\n'.join(textwrap.wrap(text, width=100))", "def storeTextEditValue(self):\n\n\t\tcategory, attr = self.getWidgetMeta(self.sender())\n\t\tvalue = self.sender().toPlainText()\n\t\tself.storeValue(category, attr, value)", "def _editorText(self):\n if self.__lineEditKind:\n return self._editor.text()\n else:\n return self._editor.currentText()", "def _hidden_in_unicode(self, txt):", "def fix_document(key, value, _format, _meta):\n if key == \"Link\":\n url = value[2][0]\n if url.startswith(\"user-manual\") or url.startswith(\"developers-guide\"):\n # Return the link text\n return value[1]\n # Reformat the text inside block quotes\n elif key == \"BlockQuote\":\n try:\n first_string = value[0][\"c\"][0][\"c\"]\n if first_string == \"[!NOTE]\":\n value[0][\"c\"][0] = Strong([Str(\"Note:\")])\n return BlockQuote(value)\n elif first_string == \"[!INFO]\":\n value[0][\"c\"][0] = Strong([Str(\"Info:\")])\n return BlockQuote(value)\n elif first_string == \"[!TIP]\":\n value[0][\"c\"][0] = Strong([Str(\"Tip:\")])\n return BlockQuote(value)\n elif first_string == \"[!WARNING]\":\n value[0][\"c\"][0] = Strong([Str(\"Warning:\")])\n return BlockQuote(value)\n elif first_string == \"[!ATTENTION]\":\n value[0][\"c\"][0] = Strong([Str(\"Attention:\")])\n return BlockQuote(value)\n except Exception:\n return\n return", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def edit_type(self, candidate, word):\n edit = [False] * 4\n correct = \"\"\n error = \"\"\n replaced = ''\n replacer = ''\n for i in range(min([len(word), len(candidate)]) - 1):\n if candidate[0:i + 1] != word[0:i + 1]: # inconsistency in the first (i + 1) characters of the two strings\n if candidate[i:] == word[i - 1:]:\n edit[1] = True # deletion\n correct = candidate[i - 1] # candidate[i - 1] is deleted and we get word\n error = ''\n replaced = candidate[i - 2]\n replacer = candidate[i - 2] + candidate[i - 1]\n break\n elif candidate[i:] == word[i + 1:]:\n edit[0] = True # insertion\n correct = ''\n error = word[i] # word[i] is redundant\n if i == 0:\n replacer = '@'\n replaced = '@' + error\n else:\n replacer = word[i - 1]\n replaced = word[i - 1] + error\n break\n if candidate[i + 1:] == word[i + 1:]:\n edit[2] = True # substitution\n correct = candidate[i]\n error = word[i]\n replaced = error\n replacer = correct\n break\n if candidate[i] == word[i + 1] and candidate[i + 2:] == word[i + 2:]:\n edit[3] = True # transposition\n correct = candidate[i] + candidate[i + 1]\n error = word[i] + word[i + 1]\n replaced = error\n replacer = correct\n break\n\n # string inversion\n candidate = candidate[::-1]\n word = word[::-1]\n\n for i in range(min([len(word), len(candidate)]) - 1):\n if candidate[0:i + 1] != word[0:i + 1]:\n if candidate[i:] == word[i - 1:]:\n edit[1] = True\n correct = candidate[i - 1]\n error = ''\n replaced = candidate[i - 2]\n replacer = candidate[i - 2] + candidate[i - 1]\n break\n elif candidate[i:] == word[i + 1:]:\n correct = ''\n error = word[i]\n if i == 0:\n replacer = '@'\n replaced = '@' + error\n else:\n replacer = word[i - 1]\n replaced = word[i - 1] + error\n edit[0] = True\n break\n if candidate[i + 1:] == word[i + 1:]:\n edit[2] = True\n correct = candidate[i]\n error = word[i]\n replaced = error\n replacer = correct\n break\n if candidate[i] == word[i + 1] and candidate[i + 2:] == word[i + 2:]:\n edit[3] = True\n correct = candidate[i] + candidate[i + 1]\n error = word[i] + word[i + 1]\n replaced = error\n replacer = correct\n break\n\n if word == candidate:\n return \"None\", '', '', '', ''\n if edit[0]:\n return EDIT_TYPE_INSERTION, correct, error, replaced, replacer\n elif edit[1]:\n return EDIT_TYPE_DELETION, correct, error, replaced, replacer\n elif edit[2]:\n return EDIT_TYPE_SUBSTITUTION, correct, error, replaced, replacer\n elif edit[3]:\n return EDIT_TYPE_TRANSPOSITION, correct, error, replaced, replacer", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def text_editor():\n return True", "def task_format_check():\n return {\"actions\": [[\"black\", HERE, \"--check\"]], \"verbosity\": 1}", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def stepText2Changed(build, step, text2):", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def retrieve_input():\r\n inputValue = simpleText.get(\"1.0\",\"end-1c\") #Our Variable\r\n #\"1.0\" = start from first character in the text widget\r\n #\"end-1c = delete the last character that Text creates every time\"\r\n return inputValue", "def on_idEdit_textChanged(self, txt):\n self.__generateDefaultCommitMessage()\n self.__updateOK()", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def element_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier))\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def process_raw_text(self, file_name, column_side):\n self.mvc_check()\n\n model_txt = None\n if column_side == LEFT_TEXT:\n model_txt = self.model.txt1\n elif column_side == RIGHT_TEXT:\n model_txt = self.model.txt2\n\n model_txt.open_raw(file_name)\n model_txt.process_raw()\n self.opened_txt[column_side] = True\n self.can_align = self.opened_txt[LEFT_TEXT] and self.opened_txt[RIGHT_TEXT]\n\n # Goldsmith\n model_txt.make_trie(column_side)\n model_txt.apply_goldsmith(1.1, 20, column_side)\n\n # Associate word for alignment if both text were opened\n if self.can_align:\n for view in self.views:\n view.end_task()\n view.change_task(\"Associating words\")\n self.model.associate_words(1.5)\n for view in self.views:\n view.end_task()\n\n # TODO : coherent saving to database using model.save_data\n\n return model_txt.str", "def reformat():\n toolkit.reformat()", "def get_text(self):\n\n if self.text: return self.text\n # retrieve from args and return if exists\n text = Settings.get_text() or None\n if text: \n self.text = text\n return text\n # prompt skip\n if not Settings.prompt(\"text\"): return None\n question = {\n 'type': 'input',\n 'name': 'text',\n 'message': 'Text:'\n }\n text = prompt(question)[\"text\"]\n # confirm text\n if not Settings.confirm(text): return self.get_text()\n self.text = text\n return self.text", "def handleFormatText(paragraphContent):\n # We tokenize and remove the stop word\n words = tokenizeWord(paragraphContent) \n \n stemWords = []\n # We loop on each word.\n for word in words:\n stemWord = STEMMER.stem(word)\n \n # Selection on a part of string.\n stemWord = re.sub(\"[*\\'\\.+:,\\`:/]\", '', stemWord)\n if stemWord.isdigit() or len(stemWord) < 2:\n continue\n \n stemWords.append(stemWord)\n my_r_string = stemWords.pop(0)\n for word in stemWords:\n my_r_string += \" \"+str(word)\n return my_r_string", "def formatOutput(self, storedText, titleMode, internal=False):\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)", "def read_plain_txt(input_fn: str) -> Tuple[List[str], List[str]]:\n\n with open(input_fn, 'r') as f:\n migrations = []\n queries = []\n mode = 'none'\n for line in f:\n stripped = line.strip()\n if len(stripped) == 0:\n continue\n if stripped.lower() == '== migrations':\n if mode != 'none':\n raise ValueError(f'Invalid {input_fn}: The migrations section should appear first.')\n mode = 'migrations'\n elif stripped.lower() == '== queries':\n if mode != 'migrations':\n raise ValueError(f'Invalid {input_fn}: The queries section should appear after the migrations section.')\n mode = 'queries'\n elif stripped[0] == '#':\n pass\n else:\n if mode == 'migrations':\n migrations.append(stripped)\n elif mode == 'queries':\n queries.append(stripped)\n else:\n pass\n return migrations, queries", "def on_lineEdit_textChanged(self, p0):\n # str_me = \"我爱我的祖国\"\n # self.lineEdit.setText(str_me) # 设置单行文本内容\n input_text = self.lineEdit.text()\n self.textEdit.setPlainText(input_text)\n # self.textEdit.setHtml(input_text) # 显示Html,如 <font color='red' size='20'>HELLO!</font>\n a = self.textEdit.toPlainText()\n print(a)", "def post_process_text(self, text):\n\t\treturn text", "def text(self) -> str:", "def editText(self, text, jumpIndex=None, highlight=None):\n try:\n import gui\n except ImportError, e:\n print 'Could not load GUI modules: %s' % e\n return text\n editor = gui.EditBoxWindow()\n return editor.edit(text, jumpIndex=jumpIndex, highlight=highlight)", "def alter_text_format(self):\n service = self.slides_service\n requests = [\n {\n 'updateParagraphStyle': {\n 'objectId': self.song_numbers_box_id,\n 'style': {\n 'alignment': 'CENTER'\n },\n 'fields': 'alignment'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.song_numbers_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.TITLE_FONT_SIZE, # numbers slightly larger than lyrics\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.left_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.FONT_SIZE,\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n },\n {\n 'updateTextStyle': {\n 'objectId': self.right_box_id,\n 'style': {\n 'bold': self.bold,\n 'fontFamily': 'Arial',\n 'fontSize': {\n 'magnitude': self.FONT_SIZE,\n 'unit': 'PT'\n },\n 'foregroundColor': {\n 'opaqueColor': {\n 'rgbColor': {\n 'blue': 1.0,\n 'green': 1.0,\n 'red': 1.0\n }\n }\n }\n },\n 'fields': 'bold,foregroundColor,fontFamily,fontSize'\n }\n }\n ]\n body = {\n 'requests': requests\n }\n response = service.presentations().batchUpdate(presentationId=self.presentation_id, body=body).execute()\n print(f'Updated the text style for shape with ID: {self.left_box_id}')\n return response", "def T(value):\n return (value if isinstance(value, basestring) else\n str(value) if isinstance(value, AltText) else \"\")", "def as_tuple(self, selected: bool) -> tuple:\n if self.check_ans:\n current = self.num.ljust(2) + self.answer\n else:\n current = self.num.ljust(2)+self.current.rjust(1)\n return ((\n \"fg:\"+self.style_fg + \" bg:\" # apply foreground color\n + (self.style_bg_selected if selected else self.style_bg),\n current\n if self.answer != \".\" else \"▐\" + \"█\" + \"▌\"), ('bg:#fefefe fg:#000', \"│\"))", "def edit(self,edits):\n\t\tself.alphanumeric=edits['alphanumeric'] if 'alphanumeric' in edits else None\n\t\tself.alphanumeric_color = edits['alphanumeric_color'] if 'alphanumeric_color' in edits else None\n\t\tif self.alphanumeric_color ==\"grey\":\n\t\t\tself.alphanumeric_color = \"gray\"\n\t\tself.background_color = edits['background_color'] if 'background_color' in edits else None\n\t\tif self.background_color == \"grey\":\n\t\t\tself.background_color = \"gray\";\n\t\tshapeChoices = dict((x,y) for x,y in Target.SHAPE_CHOICES)\n\t\tself.shape = str(shapeChoices[edits['shape']]) if 'shape' in edits else None\n\t\tself.orientation = edits['orientation'] if 'orientation' in edits else None\n\t\tself.ptype = edits['ptype']\n\t\tself.description = edits['description'] if 'description' in edits else None\n\t\tself.save()", "def label_textvalidation(objectidentifier,validationtext):\r\n msg, status = \"\", True\r\n\r\n try:\r\n 'Here we will pass objectidentifier for required element'\r\n text_heading = ui_controls.text_view(get_obj_identifier(objectidentifier),label=True)\r\n print \"Current text returned by UI is==>\"+str(text_heading)\r\n\r\n 'To check if it is returning empty value here'\r\n if not text_heading:\r\n print str(validationtext)+\" text does not exist and it is returning empty value.\"\r\n return False, msg\r\n\r\n 'Comparing text retrieved from UI with validation text'\r\n if validationtext.strip() == text_heading.strip():\r\n print (str(validationtext)+\" text has been found!!!\")\r\n else:\r\n print(\"Sorry!!!text has been mismatched,it should be \"+str(validationtext))\r\n print (\"Text shown at UI is==>\"+str(text_heading))\r\n return False, msg \r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def displayText():\n global entryWidget,entryWidget1,entryWidget2,entryWidget3,entryWidget4 ,entryWidget5,entryWidget6\n global thefilename,itrial,do_stim, delaylen,ntest_arms,stop_if_error,timeout_arm_sec\n thefilename=entryWidget.get().strip()\n itrial=entryWidget1.get().strip()\n do_stim=entryWidget2.get().strip()\n delaylen=entryWidget3.get().strip()\n ntest_arms=entryWidget4.get().strip()\n stop_if_error=int(entryWidget5.get().strip())==1 # convert to logical\n print 'stop_if_error is ', stop_if_error\n\n\n timeout_arm_sec=entryWidget6.get().strip()\n root.destroy()\n return thefilename,itrial,do_stim,delaylen,ntest_arms,stop_if_error,timeout_arm_sec" ]
[ "0.787282", "0.76840067", "0.7569935", "0.7569935", "0.73801994", "0.7312141", "0.7185404", "0.71525943", "0.70902133", "0.69048536", "0.6863909", "0.6808694", "0.66044503", "0.62728953", "0.6122511", "0.60096884", "0.5692688", "0.5692688", "0.5692688", "0.5692688", "0.5692688", "0.5534609", "0.55292165", "0.55122524", "0.54905003", "0.5460751", "0.539562", "0.5386719", "0.5354485", "0.53479636", "0.53389454", "0.532986", "0.53046286", "0.53009975", "0.52862155", "0.525454", "0.5231931", "0.52078664", "0.5165661", "0.51404953", "0.51277274", "0.51189363", "0.50971085", "0.5088265", "0.5054947", "0.5048544", "0.5042088", "0.5035617", "0.5031853", "0.5024283", "0.5017046", "0.50136364", "0.50128716", "0.5011537", "0.49993557", "0.49967337", "0.4986811", "0.4980363", "0.49699798", "0.4965772", "0.49580243", "0.4952371", "0.4948915", "0.49471527", "0.49142417", "0.49096134", "0.48924685", "0.48859364", "0.48703972", "0.4866565", "0.4855901", "0.48431557", "0.48414293", "0.4838512", "0.48360094", "0.48314703", "0.48113507", "0.48052862", "0.4801024", "0.4800837", "0.4797172", "0.47929224", "0.47766042", "0.4773588", "0.47715577", "0.47713175", "0.47710884", "0.47694808", "0.47682908", "0.4765882", "0.47634518", "0.47566456", "0.4753546", "0.47510284", "0.47495022", "0.47466233", "0.47453552", "0.47350493", "0.47240865", "0.4720859" ]
0.67496157
12
Return list of choices for combo box, each a tuple of edit text and annotated text
def getEditChoices(self, currentText=''): format = globalref.options.strData('EditTimeFormat', True) now = GenTime().timeStr(format) choices = [(now, '(%s)' % _('now'))] for hr in (6, 9, 12, 15, 18, 21, 0): time = GenTime((hr, 0)).timeStr(format) choices.append((time, '')) return choices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def choices(self, typename, value_field='description'):\n rows = self.type(typename).values('id', value_field)\n return [(r['id'], r[value_field]) for r in rows]", "def select_combo_text(cb, text, index=0):\n i = 0\n for n in cb.get_model():\n if n[index] == text:\n break\n i += 1\n cb.set_active(i)", "def _get_choices_str(self):\n return ', '.join(\n '\"%s\"' % choice\n for choice in self.choices\n )", "def iter_choices(self):\n\n for pk, obj in self._get_object_list():\n if hasattr(obj, self.id):\n selected = getattr(obj, self.id)\n else:\n selected = obj in self.data\n\n yield (pk, self.get_label(obj), selected)", "def getComboTerms(tuples):\t\t\t\n\t\t\t#return \"[{0}]\".format('; '.join([\"({0})\".format(','.join([text[indices[0]:indices[1]], str(indices[0])])) for indices in tuples]))\n\t\t\treturn \"{0}\".format('; '.join((\"{0}\".format(text[indices[0]:indices[1]]) for indices in tuples)))", "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def choices(self):\n return tuple(self._choices)", "def application_command_autocomplete_choice_builder(name, value):\n return {\n 'name': name,\n 'value': value,\n }", "def initDefaultChoices(self):\n return [text for text in self.formatList]", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice>\" +\r\n \"tag; got {0} instead\".format(choice.tag)\r\n )\r\n\r\n components = []\r\n choice_text = ''\r\n if choice.text is not None:\r\n choice_text += choice.text\r\n # Initialize our dict for the next content\r\n adder = {\r\n 'type': 'text',\r\n 'contents': choice_text,\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n components.append(adder)\r\n\r\n for elt in choice:\r\n # for elements in the choice e.g. <text> <numtolerance_input>\r\n adder = {\r\n 'type': 'text',\r\n 'contents': '',\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n tag_type = elt.tag\r\n # If the current `elt` is a <numtolerance_input> set the\r\n # `adder`type to 'numtolerance_input', and 'contents' to\r\n # the `elt`'s name.\r\n # Treat decoy_inputs and numtolerance_inputs the same in order\r\n # to prevent students from reading the Html and figuring out\r\n # which inputs are valid\r\n if tag_type in ('numtolerance_input', 'decoy_input'):\r\n # We set this to textinput, so that we get a textinput html\r\n # element.\r\n adder['type'] = 'textinput'\r\n adder['contents'] = elt.get('name')\r\n else:\r\n adder['contents'] = elt.text\r\n\r\n # Add any tail text(\"is the mean\" in the example)\r\n adder['tail_text'] = elt.tail if elt.tail else ''\r\n components.append(adder)\r\n\r\n # Add the tuple for the current choice to the list of choices\r\n choices.append((choice.get(\"name\"), components))\r\n return choices", "def _find_options(self, inputfield):\r\n elements = inputfield.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def __str__(self):\n return \"choice_text: \" + self.choice_text", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def __init__(self, *args, **kwargs):\n super(TaggedContentItemForm, self).__init__(*args, **kwargs)\n wtf = Tag.objects.filter(group__system=False)\n wlist = [w for t, w in self.fields.items() if t.endswith(\"tags\")]\n choices = []\n for choice in wtf:\n choices.append((choice.id, str(choice)))\n [setattr(w, 'choices', choices) for w in wlist]", "def get_choices(cls):\n return cls.values.items()", "def __str__(self):\n return self.choice_text", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices", "def choices(cls):\n return tuple(item.as_tuple for item in list(cls))", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices", "def fill_combobox_attributes(self):\n\n list_char = [\"\"]\n list_num = [\"\"]\n if self.ui.radioButton_file.isChecked():\n for a in self.attributes:\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n else:\n for a in self.attributes:\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n self.ui.comboBox_num_attributes.blockSignals(True)\n self.ui.comboBox_char_attributes.blockSignals(True)\n self.ui.comboBox_num_attributes.clear()\n self.ui.comboBox_char_attributes.clear()\n self.ui.comboBox_char_attributes.addItems(list_char)\n self.ui.comboBox_num_attributes.addItems(list_num)\n self.ui.comboBox_num_attributes.blockSignals(False)\n self.ui.comboBox_char_attributes.blockSignals(False)", "def __init__(self,choices,caption='ListSelection',default=[],single=False,check=False,sort=False,*args,**kargs):\n InputDialog.__init__(self,caption=caption,items = [\n dict(name='input',value=default,itemtype='list',choices=choices,\n text='',single=single,check=check,sort=sort,*args,**kargs),\n ],)", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)", "def combo_callback(self, eventobj):\n print(self.combo_input.get()) # print name\n print(self.combo_input.current()) # print index", "def __str__(self):\n return gettext('One of %s') % self._get_choices_str()", "def set_choices(self, index, choices):\n if len(choices) == 1:\n self._label(index)\n self._widgets[index][\"text\"] = str(choices[0])\n else:\n self._combo(index)\n self._widgets[index][\"values\"] = [str(t) for t in choices]\n width = max(len(str(t)) for t in choices)\n width = max(5, width)\n self._widgets[index][\"width\"] = width", "def choices(self) -> list:\n return [self.mapper(item) for item in self]", "def _build_dropdown(options):\n return [(x, x) for x in options]", "def get_choices_new_protected():\n ret = []\n ret.append( (1, _(u'Nur Community-Mitglieder dürfen neue Beiträge leisten')) )\n ret.append( (-1, _(u'Offener Zugang')) )\n return ret", "def create_combo(self) -> typing.Iterable[Combo]:\n raise NotImplementedError()", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice> tag; got %s instead\"\r\n % choice.tag)\r\n choices.append((choice.get(\"name\"), stringify_children(choice)))\r\n return choices", "def as_choices():\n return (\n # Live is currently disabled as a choice\n # pending implementation\n (\"live\", \"Use working directory\"),\n (\"latest\", \"Use latest snapshot\"),\n (\"pinned\", \"Pinned to snapshot\"),\n )", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def get_generic_choices(model, key, allow_null=False):\n CHOICES = [('', '--------')] if allow_null else []\n for i in model.objects.all().values_list(key, flat=True).distinct():\n CHOICES.append((str(i), str(i)))\n CHOICES.sort()\n\n return CHOICES", "def _parse_choices(self, text):\n choices = dict()\n\n matches = re.findall(self.choice_regex, text)\n for match in matches:\n # remove the brackets\n match = match.replace('[[', '')\n match = match.replace(']]', '')\n\n if '|' in match:\n # format is {text}|{node_id}, the text and node id are different\n text, node_id = match.split('|')\n choices[node_id] = text\n else:\n choices[match] = match\n\n return choices", "def input_choices_from_list(choices, text):\n no_courses_text = \"\"\"\n init will only list the courses you are enrolled in\n and there seem to be none.\n Either enrol in a course or add the course id as command line argument.\n \"\"\"\n if choices is None or len(choices) == 0:\n print(no_courses_text)\n raise SystemExit(1)\n\n digits = str(math.ceil(math.log10(len(choices))))\n format_str = '{:' + digits + 'd} {}'\n for n, c in enumerate(choices):\n print(format_str.format(n, c))\n try:\n return [int(c) for c in input(text).split()]\n except EOFError:\n return []", "def __init__(self, \n num_fld=1, \n lab_txt=[\"1\"], \n txt_fld=[\"1\"], \n title_txt=\"test\", \n comb_txt=[],\n comb_lab_txt=[], \n comb_num=0, \n root_x=50, \n root_y=50):\n super().__init__()\n self.geometry(f'+{root_x}+{root_y}') #head=y+20px\n self.str_in=[]\n self.title(title_txt)\n if comb_txt:\n self.name=[0]*num_fld\n ent=[0]*num_fld\n self.comb=[]\n self.act=[]\n lab=[0]*num_fld\n lab_comb=[0]*comb_num\n else:\n self.name=[0]*num_fld\n ent=[0]*num_fld\n lab=[0]*num_fld\n self.comb=[]\n self.act=[]\n for i in range(num_fld):\n self.name[i]=tk.StringVar()\n ent[i]=tk.Entry(self,textvariable=self.name[i])\n ent[i].insert(0, txt_fld[i])\n lab[i] = tk.Label(self,width=15, text=lab_txt[i])\n lab[i].pack()\n ent[i].pack()\n for i in range(comb_num):\n lab_comb[i]=tk.Label(self,width=35, text=comb_lab_txt[i])\n self.comb.append(ttk.Combobox(self, values=comb_txt))\n lab_comb[i].pack()\n self.comb[i].pack()\n self.comb[i].current(1)\n\n but_ac=tk.Button(self, text=\"Accept\", command=self.ins)\n but_ac.pack()\n self.mainloop", "def get_choicesdata(self):\n selected_value = self.get_cleaned_value()\n choicesdata = []\n for value, label in self.get_choices_cached():\n is_selected = value == selected_value\n url = self.build_set_values_url(values=[value])\n choicesdata.append({\n 'url': url,\n 'label': label,\n 'is_selected': is_selected,\n 'dom_id': '{}_{}'.format(self.get_inputfield_dom_id(), value)\n })\n return choicesdata", "def combobox(self):\n return self._combo", "def _comboSlot(self, select):\n select = self.sender().itemText(select)\n if qt4:\n qs = str(self.sender().property(\"dom address\").toPyObject())\n else:\n qs = str(self.sender().property(\"dom address\"))\n item = QtXml.QDomElement()\n\n ind = qs.rfind('/')\n ids = qs[ind:]\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() != select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n widget_enabled = h.elem.attribute(\"Enabled\", \"True\")\n widget_visible = h.elem.attribute(\"Visible\", \"Unknown\")\n h.widget.setEnabled(False)\n if(widget_visible != \"Unknown\"):\n h.label.hide()\n h.widget.hide()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")\n\n item = self.qhash[qs].elem.firstChildElement(\"Item\")\n while(item.isNull() is False):\n itemName = item.firstChildElement(\"Name\")\n if(str(itemName.text()).strip() == select):\n activ = item.firstChildElement(\"Activate\")\n while(activ.isNull() is False):\n s = str(activ.text()).strip() + ids\n h = self.qhash[s]\n h.widget.setEnabled(True)\n h.label.show()\n h.widget.show()\n activ = activ.nextSiblingElement(\"Activate\")\n item = item.nextSiblingElement(\"Item\")", "def get_chosen_options(user):\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n ocl = eval(user_application.options_selected)\n chosen_options = []\n for oc in ocl:\n chosen_options.append(Option.objects.get(opt_code=int(oc))) \n return chosen_options", "def display_choose(self, text, choices):\n cur_index = 0\n key = None\n while key != 'KEY_NEWLINE':\n if key == 'KEY_UP':\n cur_index = max(cur_index - 1, 0)\n elif key == 'KEY_DOWN':\n cur_index = min(cur_index + 1, len(choices) - 1)\n self.stdscr.erase()\n for line in text:\n self.stdscr.addstr(f'{PADCHAR}{line}\\n')\n for index, value in enumerate(choices):\n self.stdscr.addstr('\\n')\n self.stdscr.addstr(PADCHAR)\n self.stdscr.addstr(value, color_pair(7 if index == cur_index else 1))\n self.stdscr.addstr(f'\\n\\n{PADCHAR}') \n key = self.get_key() \n return cur_index", "def get_context(self, name, value, attrs=None, choices=()):\n context = super(TriStateCheckboxSelectMultiple, self).get_context(\n name, value, attrs\n )\n\n choices = dict(it.chain(self.choices, choices))\n if value is None:\n value = dict.fromkeys(choices, False)\n else:\n value = dict(dict.fromkeys(choices, False).items() +\n value.items())\n\n context['values'] = [\n (choice, label, value[choice])\n for choice, label in choices.iteritems()\n ]\n\n return context", "def get_poll_choices(self, games: [Game]) -> [dict]:\n answer_texts = []\n for g in games:\n answer_texts.append(g.name + \" - \" + g.genre)\n answer_texts = sorted(answer_texts, key=str.lower)\n poll_choices = []\n for at in answer_texts:\n poll_choices.append({\"text\": at})\n return poll_choices", "def multiple_choice():\n\n return [\"MAR\", \"MAR\", \"NI\", \"NI\", \"MCAR\"]", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def _get_choices ( self, context, path = '' ):\n choices = []\n gdc = context.get_data_context\n for name in context.data_contexts:\n next_path = path_for( path, name )\n choices.append( TemplateChoice( choice_value = next_path ) )\n choices.extend( self._get_choices( gdc( name ), next_path ) )\n \n return choices", "def widgets(parameter: Parameter):\n widgets = []\n for key in parameter.keys():\n textEdit = QTextEdit()\n textEdit.setText(key)\n widgets.append(textEdit)\n if isinstance(parameter[key], Enum):\n comboBox = MyQtEnumComboBox()\n comboBox.fillValues(type(parameter[key]))\n widgets.append(comboBox)\n elif isinstance(parameter[key], bool):\n comboBox = QComboBox()\n comboBox.addItems((\"False\", \"True\"))\n widgets.append(comboBox)\n else:\n textEdit = QTextEdit()\n textEdit.setText(str(parameter[key]))\n widgets.append(textEdit)\n for widget in widgets:\n widget.setFixedHeight(30)\n return widgets", "def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n # choicesdata = []\n # found_selected_value = False\n # for value, label in self.get_choices():\n # is_selected = value == selected_value\n # if is_selected:\n # found_selected_value = True\n # url = self.build_set_values_url(values=[value])\n # choicesdata.append({\n # 'url': url,\n # 'label': label,\n # 'is_selected': is_selected\n # })\n choicesdata, found_selected_value = self.__make_choicesdata_list(\n choices=self.get_choices(),\n selected_value=self.get_cleaned_value())\n if not found_selected_value and len(choicesdata) > 0:\n selected_index = self.get_default_is_selected_index(choicesdata=choicesdata)\n choicesdata[selected_index]['is_selected'] = True\n return choicesdata", "def choice(text, choices, **kwargs):\n return click.prompt(click.style('> {}'.format(text), fg='blue', bold=True),\n type=click.Choice(choices),\n **kwargs)", "def the_option_named(text: str) -> \"SelectByText\":\n return SelectByText(text)", "def make_ddls( self, parent ):\n # ---- 0\n a_widget = ttk.Combobox( parent,\n width = self.arg_width,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n a_widget.bind( \"<<ComboboxSelected>>\", self.sync_ddl_0 )\n\n # AppGlobal.gui_style.style_combobox( a_widget )\n self.ddl_0_widget = a_widget\n\n # ---- 1\n a_widget = ttk.Combobox( parent,\n width = self.arg_width,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n self.ddl_1_widget = a_widget\n\n # ---- 2\n a_widget = ttk.Combobox( parent,\n width = self.arg_width + 5,\n state = \"normal\",\n textvariable = \"self.arg_2_var\" )\n\n # AppGlobal.gui_style.style_combobox( a_widget )\n self.ddl_2_widget = a_widget\n\n # self.ddl_widgets = [ self.ddl_0_widget, self.ddl_1_widget, self.ddl_2_widget, ]\n # print( self.ddl_widgets[ 0 ] == self.ddl_widgets[ 1 ])\n\n #self.load_ddl_0( )", "def get_text_type():\n opt = ['text', 'email', 'password']\n inp = option_menu(opt, 'Select text type:')\n\n # mark text type with option\n OPTIONS['text-type'] = opt[inp]\n\n # add option to collected list\n add_to_collected('text type', opt[inp])\n\n return", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def sortedChoices(self, inText):\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return (sortedChoices, True)\n else:\n return (sortedChoices, False)", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def widgets_from_abbreviations(self, seq):\n result = []\n for name, abbrev, default in seq:\n widget = self.widget_from_abbrev(abbrev, default)\n if not (isinstance(widget, ValueWidget) or isinstance(widget, fixed)):\n if widget is None:\n raise ValueError(\"{!r} cannot be transformed to a widget\".format(abbrev))\n else:\n raise TypeError(\"{!r} is not a ValueWidget\".format(widget))\n if not widget.description:\n widget.description = name\n widget._kwarg = name\n result.append(widget)\n return result", "def form_SelectChoiceCallableOptions(request):\n schema = schemaish.Structure()\n schema.add('mySelect', schemaish.Integer())\n def _():\n options = [(1,'a'),(2,'b'),(3,'c')]\n for option in options:\n yield option\n\n form = formish.Form(schema, 'form')\n form['mySelect'].widget = formish.SelectChoice(_)\n return form", "def get_choices(self, cutoff=None):\n queryset = self.get_queryset()\n if queryset is None:\n # Ensure that field.choices returns something sensible\n # even when accessed with a read-only field.\n return {}\n\n if cutoff is not None:\n queryset = queryset[:cutoff]\n\n return OrderedDict(\n [\n (\n # This line below is modifed.\n item.pk,\n self.display_value(item),\n )\n for item in queryset\n ]\n )", "def _validate_selects(text, response):\n answer_options = re.split(settings.MULTISELECT_DELIMITER_RE, str(text))\n choices = map(lambda choice: choice.lower(), response.event.choices)\n logger.debug('Question (%s) answer choices are: %s, given answers: %s' % (datatype, choices, answer_options))\n new_answers = copy(answer_options)\n for idx, opt in enumerate(answer_options):\n logger.debug('Trying to format (m)select answer: \"%s\"' % opt)\n try: \n #in the case that we accept numbers to indicate option selection\n opt_int = int(opt)\n if not (1 <= opt_int <= len(choices)): \n return text, 'Answer %s must be between 1 and %s' % (opt_int, len(choices))\n else:\n new_answers[idx] = str(opt_int)\n\n except ValueError: \n # in the case where we accept the actual text of the question\n logger.debug('Caught value error, trying to parse answer string choice of: %s' % choices)\n if opt.lower() not in choices:\n return text, 'Answer must be one of the choices'\n else:\n new_answers[idx] = str(choices.index(opt.lower()) + 1)\n return ' '.join(new_answers), None", "def _find_options(self):\r\n elements = self.xml.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def complete_opt_display(self, text, *_):\n return [t + \" \" for t in DISPLAYS if t.startswith(text)]", "def occurrence_choices():\n OCCURRENCE_CHOICES = [('one_off', 'One Off'), ('weekly', 'Weekly'), ('fortnightly', 'Fortnightly')]\n occurrence = forms.ChoiceField(choices=OCCURRENCE_CHOICES, widget=forms.RadioSelect())\n return occurrence", "def objects_to_choices(queryset):\n res = []\n for elm in queryset:\n res.append((elm.pk, unicode(elm)))\n return res", "def _combobox_choice(self, _=None):\n combobox_string = self.value_combobox.var.get()\n if combobox_string.startswith(\"Unknown: \"):\n value = int(combobox_string[len(\"Unknown: \"):])\n else:\n value = int(self.value_combobox.var.get().split(\" \")[0])\n self.master.change_field_value(self.field_name, value)", "def _get_completion(self, original_text, remaining_text, options):\n if self.should_hide_completions(original_text=original_text,\n remaining_text=remaining_text,\n allowed_suffixes=(\" \", \".\")):\n return []\n\n return [(option, len(remaining_text)) for option in options\n if option.startswith(remaining_text) and not option.startswith(\"_\")]", "def 取现行选中项文本(self): # real signature unknown; restored from __doc__\n return self.GetStringSelection()", "def test_rendering_combobox(qtbot):\n layer = Image(np.random.rand(8, 8))\n qtctrl = QtImageControls(layer)\n qtbot.addWidget(qtctrl)\n combo = qtctrl.renderComboBox\n opts = {combo.itemText(i) for i in range(combo.count())}\n rendering_options = {\n 'translucent',\n 'additive',\n 'iso',\n 'mip',\n 'minip',\n 'attenuated_mip',\n 'average',\n }\n assert opts == rendering_options\n # programmatically updating rendering mode updates the combobox\n layer.rendering = 'iso'\n assert combo.findText('iso') == combo.currentIndex()", "def on_correct_answer_select(self, spinner, text):\n\n self.answer = text\n self.multiple_choice_answer = text", "def fill_combobox(self):\n sqlstring = \"SELECT first_name, last_name from employees WHERE enabled = 1 ORDER BY last_name ASC\"\n self.CB_employee.addItem(\"\")\n for employee in self.ms.c.execute(sqlstring):\n self.CB_employee.addItem(\" \".join((employee[0], employee[1])))\n sqlstring = \"SELECT first_name, last_name from employees WHERE enabled = 0 ORDER BY last_name ASC\"\n for employee in self.ms.c.execute(sqlstring):\n self.CB_employee.addItem(\" \".join((employee[0], employee[1])))", "def get_text_data_list(self):\n return [self.name, str(self.type)]", "def prepareAutoComplete(editor, text, charPos, lineStartCharPos,\r\n wikiDocument, settings):\r\n return []", "def initDefaultChoices(self):\n return []", "def list_selector_widget(members=None,\n preselect=None,\n entry=False,\n callback=None):\n store, i=generate_list_model(members,\n active_element=preselect)\n\n if entry:\n combobox=gtk.ComboBoxEntry(store, column=0)\n else:\n combobox=gtk.ComboBox(store)\n cell = gtk.CellRendererText()\n combobox.pack_start(cell, expand=True)\n combobox.add_attribute(cell, 'text', 0)\n combobox.add_attribute(cell, 'background', 2)\n\n combobox.set_active(-1)\n if i is None:\n i = store.get_iter_first()\n if i is not None:\n combobox.set_active_iter(i)\n\n if entry:\n def get_current_element(combo):\n try:\n return combo.get_model().get_value(combo.get_active_iter(), 1)\n except (TypeError, AttributeError):\n return unicode(combo.child.get_text())\n def set_current_element(combo, t):\n combo.child.set_text(t)\n else:\n def get_current_element(combo):\n if combo.get_active_iter() is not None:\n return combo.get_model().get_value(combo.get_active_iter(), 1)\n else:\n return None\n def set_current_element(combo, el):\n # Find the index of the element\n l=[ t[0] for t in enumerate(combo.get_model()) if t[1][1] == el ]\n if l:\n # The element is present.\n combo.set_active(l[0])\n else:\n combo.set_active_iter(combo.get_model().append( (unicode(el), el, None) ))\n\n # Bind the method to the combobox object\n combobox.get_current_element = get_current_element.__get__(combobox)\n combobox.set_current_element = set_current_element.__get__(combobox)\n\n if callback is not None:\n combobox.connect('changed', callback)\n\n return combobox", "def multiple_choices(self, choices, response):\n for elem in self.method_order:\n if elem in choices:\n return [elem]\n raise NoData", "def get_classes(self, code):\n \n select = v.Combobox(\n _metadata={'name':code}, \n items=self.items, \n v_model=None, \n dense=True,\n hide_details=True\n )\n \n select.observe(partial(self.store, code), 'v_model')\n \n return select", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices", "def comboBox(args: list, slot) -> QComboBox:\n comboBox = QComboBox()\n comboBox.addItems(args[0])\n comboBox.currentTextChanged.connect(slot)\n return comboBox", "def _getBrailleRegionsForComboBox(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForComboBox\", obj)\n\n regions = []\n\n focusedRegionIndex = 0\n label = self._script.getDisplayedLabel(obj)\n if label and (len(label) > 0):\n regions.append(braille.Region(label + \" \"))\n focusedRegionIndex = 1\n\n # Check to see if the text is editable. If so, then we want\n # to show the text attributes (such as selection -- see bug\n # 496846 for more details).\n #\n textObj = None\n for child in obj:\n if child and child.getRole() == pyatspi.ROLE_TEXT:\n textObj = child\n if textObj and textObj.getState().contains(pyatspi.STATE_EDITABLE):\n textRegion = braille.Text(textObj)\n regions.append(textRegion)\n else:\n displayedText = self._script.getDisplayedText(obj)\n if displayedText:\n regions.append(braille.Region(displayedText))\n\n regions.append(braille.Region(\n \" \" + rolenames.getBrailleForRoleName(obj)))\n\n # Things may not have gone as expected above, so we'll do some\n # defensive programming to make sure we don't get an index out\n # of bounds.\n #\n if focusedRegionIndex >= len(regions):\n focusedRegionIndex = 0\n if len(regions) == 0:\n focusedRegion = None\n else:\n focusedRegion = regions[focusedRegionIndex]\n\n # [[[TODO: WDW - perhaps if a text area was created, we should\n # give focus to it.]]]\n #\n return [regions, focusedRegion]", "def on_tagCombo_editTextChanged(self, txt):\n self.__generateDefaultCommitMessage()\n self.__updateOK()", "def get_choose():\n global result, choose, win_update_concurrence, output1, confirmation, win_create_update_str, number_str\n number_str = choose.get()\n result = result[int(number_str) - 1]\n output1.delete(1.0, END)\n for i in result:\n output1.insert(END, str(i) + '\\n')\n confirmation.destroy()\n confirmation = Label(win_create_update_str, text='Измените строку и нажмите \"Редактировать\"')\n confirmation.place(x=15, y=40)\n win_update_concurrence.destroy()", "def getOptionsNames(self) -> List[unicode]:\n ...", "def get_action_choices():\n from hardware.management.commands.gpio_buttons import Command\n import re\n pattern = re.compile(r'^on_(?P<name>\\w+)_press$')\n choices = []\n for member in dir(Command):\n match = pattern.match(member)\n if match:\n action = match.groupdict()['name']\n name = action.replace('_', ' ').title()\n choices.append((action, name))\n return choices", "def list_selector(title=None,\n text=None,\n members=None,\n controller=None,\n preselect=None,\n entry=False):\n combobox = list_selector_widget(members=members,\n preselect=preselect,\n entry=entry)\n\n d = gtk.Dialog(title=title,\n parent=None,\n flags=gtk.DIALOG_DESTROY_WITH_PARENT,\n buttons=( gtk.STOCK_OK, gtk.RESPONSE_OK,\n gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL ))\n\n if text is not None:\n l=gtk.Label(text)\n l.show()\n d.vbox.add(l)\n\n d.vbox.add(combobox)\n combobox.show_all()\n\n d.connect('key-press-event', dialog_keypressed_cb)\n\n d.show()\n center_on_mouse(d)\n res=d.run()\n retval=None\n if res == gtk.RESPONSE_OK:\n retval=combobox.get_current_element()\n d.destroy()\n return retval", "def build_comboboxes(activities, events):\n global comboboxes\n # For each activity set up a selector for an event\n\n for activity in activities:\n\n # Setup frame for better display in gui\n frame = Frame(main_window)\n frame.configure(background=\"gray30\")\n\n # Label the left column as activity in a model + \"beautify gui\"\n text = \"Activity name (model):\"\n Label(frame, text=text, bg=\"gray30\", fg=\"white\", padx=5).grid(column=0, row=0)\n Label(frame, text=activity, bg=\"gray30\", fg=\"white\").grid(column=0, row=1)\n\n # Set up the combobox for an event\n combo = Combobox(frame)\n combo['values'] = events\n\n # If activity is in events preselect the current one\n if activity in events:\n combo.current(events.index(activity))\n\n # Label the combobox and place label and box in frame\n Label(frame, text=\"Event name (log):\", bg=\"gray30\", fg=\"white\", padx=5).grid(column=1, row=0)\n combo.grid(column=1, row=1)\n\n # If the last activity in the graph is handled then do not write a separator\n if activity != activities[-1]:\n Separator(frame, orient=\"horizontal\").grid(row=2, columnspan=2, sticky=\"ew\", pady=10)\n\n comboboxes[activity] = combo\n # place the frame in the main_window\n frame.grid(column=0)", "def choice(choices=[], message=\"Pick something.\", title=None):\n return dialog(\n \"choice\",\n choices=choices,\n message=message,\n title=title,\n )", "def comboBoxes(self):\r\n # Cities Combo Button\r\n self.comboCities = QComboBox()\r\n self.comboCities.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: 15px;\r\n background-color : #A8DBC5;\r\n border: 1px solid white;\r\n\r\n \"\"\")\r\n self.comboCities.addItems(\r\n ['Girón', 'Piedecuesta', 'Floridablanca', 'Bucaramanga'])\r\n self.grid.addWidget(self.comboCities, 6, 1, 1, 2)\r\n self.comboCities.setCurrentText(\"Bucaramanga\")\r\n # Payment Combo Button\r\n self.comboPayment = QComboBox()\r\n self.comboPayment.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: 15px;\r\n background-color : #A8DBC5;\r\n border: 1px solid white;\r\n\r\n \"\"\")\r\n self.comboPayment.addItems(['Efectivo', 'Nequi'])\r\n self.grid.addWidget(self.comboPayment, 7, 1, 1, 2)", "def set_model_from_list (self,cb, items):\n model = gtk.ListStore(str)\n for i in items:\n model.append([i])\n cb.set_model(model)\n if type(cb) == gtk.ComboBoxEntry:\n cb.set_text_column(0)\n elif type(cb) == gtk.ComboBox:\n cell = gtk.CellRendererText()\n cb.pack_start(cell, True)\n cb.add_attribute(cell, 'text', 0)", "def get_select_list_name(self):\n if self.is_select_type():\n row_type = self.get_type()\n _, choice_list = row_type.rsplit(maxsplit=1)\n return choice_list\n return None", "def dropdown_multiple(id_, placeholder, size=\"200px\", text=None):\n components = []\n if text:\n components.append(html.Div(text, className=\"select-dropdown-text\"))\n components.append(\n dcc.Dropdown(id=id_, placeholder=placeholder, style={\"width\": size}, multi=True)\n )\n return html.Div(className=\"select-dropdown\", children=components)", "def _select_fields(self):\r\n return []" ]
[ "0.7317573", "0.67196256", "0.64775187", "0.64775187", "0.6326026", "0.6246536", "0.62373483", "0.621034", "0.6183985", "0.6159754", "0.60702986", "0.60540736", "0.6048126", "0.6031349", "0.59674495", "0.59313804", "0.59044296", "0.589701", "0.58720684", "0.5854758", "0.5847209", "0.5820851", "0.58148324", "0.58069074", "0.57882327", "0.5786425", "0.5780959", "0.576748", "0.5740305", "0.5725926", "0.5705812", "0.56939614", "0.5691819", "0.56806564", "0.5678611", "0.56393886", "0.56306916", "0.56285214", "0.5611032", "0.56062984", "0.55966777", "0.5592304", "0.5584671", "0.558291", "0.55813944", "0.5539905", "0.55298275", "0.55076873", "0.54994667", "0.5498247", "0.5484682", "0.5477484", "0.5475317", "0.5474854", "0.54667294", "0.546522", "0.54601145", "0.54541856", "0.54387003", "0.543409", "0.54327786", "0.54251224", "0.5421484", "0.5420235", "0.5411706", "0.5386423", "0.5386027", "0.53810036", "0.5372998", "0.53663373", "0.5349993", "0.53489363", "0.5344798", "0.5334467", "0.532487", "0.53235096", "0.53049517", "0.5295218", "0.5286929", "0.5286572", "0.52769655", "0.5273178", "0.5270343", "0.5262062", "0.525322", "0.5251497", "0.52447593", "0.52444136", "0.5240632", "0.5237951", "0.52226573", "0.5220454", "0.5215175", "0.5209009", "0.52045727", "0.5197735", "0.5197278", "0.5187635", "0.51874816", "0.5182797" ]
0.6083853
10
Return initial stored value for new nodes
def getInitDefault(self): if self.initDefault in TimeFormat.timeStampStrings: return GenTime().timeStr() return TextFormat.getInitDefault(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_new_value(self):\r\n if self.initial_value is None:\r\n return None\r\n\r\n return deepcopy(self.initial_value)", "def initial_value(self):\n return self._initial_value", "def initial(self):\n return zero", "def initial_value(self) -> float:\n return self._initial_value", "def get(self, node):\n if node in self.val:\n return self.val[node]\n else:\n return self.initial", "def _node_defaults(self):\n parent = super(QTree, self)._node_defaults()\n parent[\"state\"] = np.zeros([self.size, self.size])\n parent[\"network\"] = self\n return parent", "def lazy_value(self):\n\n if self.state == Node.State.VALID:\n return self.value\n else:\n return None", "def _set_default_node(self, key):\n if key not in self._key_to_node_index:\n self._key_to_node_index[key] = self._graph.add_node(NodeData(key=key, equivs=[]))\n return self._key_to_node_index[key]", "def __init__(self):\n self.root = self.get_new_node();", "def generate_initial_state(self, x):\n\n if self.initial_state is None:\n x[:] = 0\n return x\n else:\n x[:] = self.initial_state(size=(self._num_neurons, 1))\n return x", "def getInitialValue(self):\n return _libsbml.Trigger_getInitialValue(self)", "def value(self):\n\n return deepcopy(self._node_id)", "def initial_nodes(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"initial_nodes\")", "def refresh(self):\n self._pre_action_check('refresh')\n if hasattr(self, '_id'):\n node = self.inflate(self.cypher(\"START n=node({self}) RETURN n\")[0][0][0])\n for key, val in node.__properties__.items():\n setattr(self, key, val)\n else:\n raise ValueError(\"Can't refresh unsaved node\")", "def initial_state(self):\n return 0", "def get_initial(self):\n\t\treturn self.initial", "def calculate_gn_value(self, current_path_length) :\r\n\r\n self.gn_value = (current_path_length) #The g(n) value is the distance of the path if the node is traversed\r", "def getValue(self):\n return self.initValue", "def __init__(self, value: object) -> None:\n self.value = value # to store node's data\n self.left = None # pointer to root of left subtree\n self.right = None # pointer to root of right subtree", "def __init__(self, value: object) -> None:\n self.value = value # to store node's data\n self.left = None # pointer to root of left subtree\n self.right = None # pointer to root of right subtree", "def __init__(self):\n\n\t\tself.root = None\n\t\tself.numNodes = 0", "def get_initial(self):\n return self.initial[:]", "def value(self):\n return self.node_value", "def __init__(self, data: str):\n self.root = Node(data)\n self.node_count = 1\n self.node_of_last_computed_hash = 0", "def _default_value(self):\n raise NotImplementedError", "def __init__(self, value, parent = None):\n # initialize new node\n self.value = value\n self.parent = parent\n self.left = None\n self.right = None\n self.height = 1", "def initial_state(self):\n return None", "def __init__(self):\n self.node = None\n self.data = None", "def init_id(root: TreeNode):\n current_id = [0]\n init_id_helper(root, current_id)\n return current_id[0]", "def default_value(self):\n if self.default:\n return copy.deepcopy(self.default)\n else:\n return None", "def initialize(self):\n self._value = self.initializer.evaluate(self)\n return self.value()", "def __init__(self, root_value):\n self.root = self.TreeNode(value=root_value)", "def value(self):\n self.refresh_default_value()\n return self.default_value", "def get_initial_state(self):\n return self.get_state(self.get_initial_observation())", "def __init__(self):\n self.start = Node('-1')", "def _default_value(self):\n return None", "def get_default_value(self):\n pass", "def get_initial_value(\n self, rel_name):\n return self._np_initval[rel_name].transpose()", "def initial(self) -> np.ndarray:\n return self._dist['initial']", "def __init__(self, val=None):\r\n self.root = {}", "def get_starting_node(self, graph):\n return random.choice(list(graph.nodes))", "def default(self):\r\n return self.default_value()", "def __init__(self):\n self.root = RadixTreeNode()\n self.root.key = \"\"\n self.size = 0", "def __init__(self):\n self.end_of_ngram = False #Flag marking whether this node is the end of an n-gram.\n self.value = None #Provided that the node marks the end of an n-gram, this refers to the value mapped by this n-gram.\n self.children = dict() #A dictionary which maps the next elements in the current path of the prefix tree to the respective node of the tree.", "def initialize_node(db, c):\n\n # have we already called this function?\n if saq.SAQ_NODE_ID is not None:\n return\n\n saq.SAQ_NODE_ID = None\n\n # we always default to a local node so that it doesn't get used by remote nodes automatically\n c.execute(\"SELECT id FROM nodes WHERE name = %s\", (saq.SAQ_NODE,))\n row = c.fetchone()\n if row is not None:\n saq.SAQ_NODE_ID = row[0]\n logging.debug(\"got existing node id {} for {}\".format(saq.SAQ_NODE_ID, saq.SAQ_NODE))\n\n if saq.SAQ_NODE_ID is None:\n execute_with_retry(db, c, \"\"\"INSERT INTO nodes ( name, location, company_id, is_local, last_update ) \n VALUES ( %s, %s, %s, %s, NOW() )\"\"\", \n (saq.SAQ_NODE, saq.API_PREFIX, saq.COMPANY_ID, True),\n commit=True)\n\n c.execute(\"SELECT id FROM nodes WHERE name = %s\", (saq.SAQ_NODE,))\n row = c.fetchone()\n if row is None:\n logging.critical(\"unable to allocate a node_id from the database\")\n sys.exit(1)\n else:\n saq.SAQ_NODE_ID = row[0]\n logging.info(\"allocated node id {} for {}\".format(saq.SAQ_NODE_ID, saq.SAQ_NODE))", "def get_first(self) -> object:\n if self.root is None: # If tree is empty\n return None\n\n return self.root.value # Returning root value", "def __init__(self, initial_state):\n self.initial_state = initial_state\n self.final_state = [1, 2, 3, 8, 0, 4, 7, 6, 5]\n self.nodes = {}\n self.add_node(self.initial_state)\n self.add_node(self.final_state)\n self.results = []", "def compute_default(self):\n if self.default is None and callable(self.compute_default_fn):\n self.default=self.compute_default_fn() \n if self.default not in self.objects:\n self.objects.append(self.default)", "def __init__(self):\n self._idx = Node.index\n Node.index += 1", "def initial_state(self, parameters = None):\n if parameters is None:\n parameters = self._get_static_parameters_or_die()\n return Value(\n state=ed.Categorical(logits=parameters.get('initial_dist_logits')))", "def initialstate(self):\n return self.problem.initialstate", "def zero_val(self):\r\n self.piDD = {\"[0]\": None}\r\n self.top_node = \"[0]\"\r\n self.dim = 0", "def __init__(self):\n self.root = None\n self.k = None", "def __init__(self):\n self.root = None\n self.k = None", "def _fill_root(self):\n if self.parent in filled_variables:\n return f\"{self.name} {st_persistent_perc}P {st_k}k\"\n return self.name", "def get_initial(self):\n return self.initial", "def _new_node(self):\n self._size += 1\n return self._node_factory()", "def initial_state(self):\n # Network details elided.\n initial_state = None\n\n return initial_state", "def __init__(self, value, prev=None, next=None):\n\n self.prev = prev # the node before this one — defaults to None\n self.value = value # the value to store\n self.next = next # the node after this one — defaults to None", "def initial(self):\n\n self.var.Kfrost = loadmap('Kfrost')\n self.var.Afrost = loadmap('Afrost')\n self.var.FrostIndexThreshold = loadmap('FrostIndexThreshold')\n self.var.SnowWaterEquivalent = loadmap('SnowWaterEquivalent')\n\n # FrostIndexInit=ifthen(defined(self.var.MaskMap),scalar(loadmap('FrostIndexInitValue')))\n # self.var.FrostIndex=FrostIndexInit\n self.var.FrostIndex = loadmap('FrostIndexInitValue')\n # self.var.AfrostIndex=-(1-self.var.Afrost)*self.var.FrostIndex\n # initial Frost Index value", "def getRandom(self) -> int:\n\n return random.choice(self.nodes).val", "def __init__(self):\n self.left = None\n self.right = None\n self.depth = 0\n self.val = None\n self.id = None", "def DefaultValue(self):\n return tf.zeros(self.shape, dtype=self.dtype)", "def DefaultValue(self):\n return tf.zeros(self.shape, dtype=self.dtype)", "def __init__(self, initial_node):\n self.__nodes = MinPriorityQueue({initial_node : initial_node.estimate})", "def __init__(self):\n self.sum_of_node_inputs = 0\n self.output = 0\n self.delta = 0\n self.dp = 0\n self.onehot_label = 0", "def _getDefaultValue(self):\n value = self._getDefaultValue()\n return value.getData() if value else None", "def test_find_highest_value_node_first(self):\n nn = NeuralNet(0, 0, '', '', blank=True)\n nn.create_net(2, 2, 2, 2)\n nn.eta = 0.1\n\n # Override weights to static value for reproducibility\n for node in nn.layers[2].nodes:\n node.weights = [0.6, 0.6]\n\n nn.layers[3].nodes[0].weights = [1.0, 1.0]\n nn.layers[3].nodes[1].weights = [0.0, 0.0]\n\n val = nn.assign_output([2, 3], test=True)\n self.assertEqual(val, '10')", "def get_value(self) -> T:\n return clone(self.default_value)", "def get_value(self):\n if not self.visited:\n # first visit at node\n self.visited = True\n\n # value calculation\n for node, weight in self.predecessors:\n self.value += (node.get_value() * weight)\n\n # applying activation function\n if self.activation is not None:\n self.activation()\n\n self.calculated = True\n\n return self.value\n else:\n # visited node\n if self.calculated:\n # calculated in this computation\n return self.value\n else:\n # recurrent connection\n return self.past_value", "def first_value(self):\n return 0", "def _assign_init(self, first_item):\r\n if hasattr(self.scalar_op, 'identity'):\r\n return str(self.scalar_op.identity)\r\n else:\r\n assert isinstance(self.scalar_op, (scal.Maximum,\r\n scal.Minimum))\r\n return first_item", "def _assign_init(self, first_item):\r\n if hasattr(self.scalar_op, 'identity'):\r\n return str(self.scalar_op.identity)\r\n else:\r\n assert isinstance(self.scalar_op, (scal.Maximum,\r\n scal.Minimum))\r\n return first_item", "def __call__(self):\n value = self._value\n if value is None:\n value = self._init()\n self._value = value\n return value", "def state_initial(self):\n return self.states_initial()[0]", "def __init__(self):\n self.idx = None\n self.val = None\n self.left = None\n self.right = None", "def get_value(self):\r\n return 0", "def mutate(self, node, _):\n new_node = ast.Num(n=node.n + 1)\n return new_node", "def root_value(self):\n return self.__root.get_value()", "def initial(self):\n return self.args[3]", "def __init__(self):\n self.number = None\n self.nodes = []\n self.type = None\n self.group = None\n self.material = None\n self.key = -1", "def __init__(self):\n\n self.nodes = {}", "def identity(self):\r\n self.piDD = {\"[1]\": None}\r\n self.top_node = \"[1]\"\r\n self.dim = 0", "def get_first(self) -> object:\n #binary search tree == empty\n if self.root is None:\n return None\n\n # return\n return self.root.value", "def starting_point(self, random=False):\n sqrt_C = sqrtm(self.covariance)\n sqrt_L = np.sqrt(self.mean_intensity)\n if random:\n random_matrix = np.random.rand(self.n_nodes, self.n_nodes)\n M, _ = qr(random_matrix)\n else:\n M = np.eye(self.n_nodes)\n initial = np.dot(np.dot(sqrt_C, M), np.diag(1. / sqrt_L))\n return initial", "def __init__(self):\n self.root = Node(None)", "def setInitialValue(self, *args):\n return _libsbml.Trigger_setInitialValue(self, *args)", "def __init__(self, abstract_value=None, representation=None, index=None):\n\n if len(list(filter(None, [abstract_value, representation, index]))) != 1:\n raise ValueError('Expected exactly one initial value')\n\n if index is not None:\n self.index = index\n self._abstract_value = None\n else:\n self.index = batch.add_rows('vals_'+node_type.id, tf.zeros([1, node_type.value_type.representation_shape]))[0]\n node_type.value_type.__init__(abstract_value=abstract_value, representation=representation)\n del self._representation", "def __init__(self):\n self.val = None", "def __init__(self):\n self.root = Node('')", "def rec_default(self):\n pass", "def __init__(self):\n super().__init__()\n self._value = 0", "def default_value(self) -> float:\n return pulumi.get(self, \"default_value\")", "def getDefault():", "def prepare_node(self, node):\n # Every change at the position of node will be recognized\n aexpr(lambda: node.position, globals(), locals())\\\n .on_change(lambda obs, oldv, newv: self.set_node_position(node, *newv))", "def value(self):\n\n if self.state == Node.State.VALID:\n return self._value\n else:\n with _NodeStackFrame(self):\n self.state = Node.State.PENDING\n self.value = self.compute_value(*self.args, **self.kwargs)\n return self._value", "def fillNode(node, grounding, db):\n gn = copy.deepcopy(node)\n gn.val = query(gn, grounding, db)\n return gn", "def _init_node_attributes(self):\n assert False", "def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None", "def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None", "def _update_min(self):\n tmp = self\n while tmp.left is not None:\n tmp = tmp.left\n return tmp.parent.key" ]
[ "0.6917573", "0.6599723", "0.64693916", "0.62434894", "0.6088791", "0.605029", "0.6040265", "0.5898664", "0.58754563", "0.5820543", "0.5805469", "0.5794411", "0.57890224", "0.57560384", "0.5747938", "0.57314616", "0.5730434", "0.5725593", "0.57140493", "0.57140493", "0.56994367", "0.56713814", "0.5666693", "0.56612456", "0.5652481", "0.5645944", "0.56458586", "0.5624305", "0.56189084", "0.5617808", "0.56137997", "0.5607398", "0.5596775", "0.55964184", "0.5596218", "0.55950594", "0.558086", "0.55720526", "0.5569308", "0.55512476", "0.55413604", "0.5540463", "0.5531693", "0.5495316", "0.5485417", "0.5478051", "0.54755235", "0.5470715", "0.5461766", "0.546034", "0.5433562", "0.54317236", "0.54287916", "0.54287916", "0.54279566", "0.54207116", "0.54133886", "0.5410908", "0.5410448", "0.54101855", "0.540541", "0.5404037", "0.5400453", "0.5400453", "0.5394933", "0.5392656", "0.53907937", "0.538711", "0.5385489", "0.5383486", "0.53627026", "0.5347435", "0.5347435", "0.5345404", "0.5340262", "0.532986", "0.5327101", "0.53193027", "0.5317672", "0.5315903", "0.5313174", "0.5310825", "0.53099066", "0.5309453", "0.53000796", "0.5295013", "0.52859294", "0.5282753", "0.52809393", "0.5268559", "0.5235831", "0.52314967", "0.5229555", "0.5224865", "0.52206385", "0.5219064", "0.521509", "0.5213956", "0.52090657", "0.52090657", "0.5209046" ]
0.0
-1