code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
# Generated by Django 2.1.5 on 2019-08-03 23:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crm', '0003_auto_20190802_2211'),
]
operations = [
migrations.AlterModelOptions(
name='customerinfo',
options={'verbose_name': '客户信息', 'verbose_name_plural': '客户信息'},
),
]
|
normal
|
{
"blob_id": "b90fb1e657d4c7e186a7b889eee586527bec4413",
"index": 2040,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('crm', '0003_auto_20190802_2211')]\n operations = [migrations.AlterModelOptions(name='customerinfo', options\n ={'verbose_name': '客户信息', 'verbose_name_plural': '客户信息'})]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('crm', '0003_auto_20190802_2211')]\n operations = [migrations.AlterModelOptions(name='customerinfo', options\n ={'verbose_name': '客户信息', 'verbose_name_plural': '客户信息'})]\n",
"step-5": "# Generated by Django 2.1.5 on 2019-08-03 23:15\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('crm', '0003_auto_20190802_2211'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='customerinfo',\n options={'verbose_name': '客户信息', 'verbose_name_plural': '客户信息'},\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from chalicelib.utilities import *
def Error(app):
@app.route('/errors', cors=True, methods=['POST'])
@printError
def errors():
request = app.current_request
data = request.json_body
print(data)
return data
|
normal
|
{
"blob_id": "f100757fcb1bef334f9f8eacae83af551d2bac5b",
"index": 3239,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Error(app):\n\n @app.route('/errors', cors=True, methods=['POST'])\n @printError\n def errors():\n request = app.current_request\n data = request.json_body\n print(data)\n return data\n",
"step-3": "from chalicelib.utilities import *\n\n\ndef Error(app):\n\n @app.route('/errors', cors=True, methods=['POST'])\n @printError\n def errors():\n request = app.current_request\n data = request.json_body\n print(data)\n return data\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
from pandas import DataFrame, Series
import os, re
# In[2]:
OUTPUT_EXCEL = '월별원내약품사용현황.xlsx'
# In[3]:
# 데이타셋 준비
data_source_dir = '사용량월별통계/원내'
dfs = []
for fname in os.listdir(data_source_dir):
fn, ext = os.path.splitext(fname)
if ext in ['.xls', '.xlsx']:
df = pd.read_excel(os.path.join(data_source_dir, fname))
df['사용(개시)년월'] = fn
dfs.append(df)
use_amount_df = pd.concat(dfs, ignore_index=True)
# In[4]:
drug_standard_df = pd.read_json('drug.json').T
drug_info_df = pd.read_excel('약품정보.xls')
use_amount_df = pd.merge(drug_info_df, use_amount_df[['사용량', '약품코드', '사용(개시)년월']], on='약품코드', how='left')
use_amount_df = pd.merge(use_amount_df, drug_standard_df[['보험코드', '제품명', '판매사', '성분/함량']], left_on='EDI코드', right_on='보험코드', how='left')
use_amount_df['제품명'] = use_amount_df['제품명'].fillna(use_amount_df['약품명(한글)'])
use_amount_df['사용개시년월'] = use_amount_df['수가시작일자'].map(lambda x: str(x)[0:4]+'-'+str(x)[4:6])
use_amount_df['사용(개시)년월'] = use_amount_df['사용(개시)년월'].fillna(use_amount_df['사용개시년월'])
use_amount_df['성분명'] = use_amount_df['성분명'].fillna(use_amount_df['성분/함량'])
use_amount_df['원내/원외 처방구분'] = use_amount_df['원내/원외 처방구분'].map({1: '원외', 2: '원외/원내', 3: '원내'})
use_amount_df['약품법적구분'] = use_amount_df['약품법적구분'].map({0: '일반', 1: '마약', 2: '향정약', 3: '독약', 4: '한방약', 5: '고가약'})
# In[5]:
def get_last(s):
try:
return max(s)
except:
return s
# In[6]:
months = use_amount_df['사용(개시)년월'].unique()
months = sorted(months.tolist(), reverse=1)
use_amount_df['최후사용월'] = use_amount_df.groupby(['제품명'])['사용(개시)년월'].transform(get_last)
use_amount_df['최근미사용월수'] = use_amount_df['최후사용월'].map(lambda x: months.index(x) if x in months else -1)
# In[7]:
use_amount_in_df = use_amount_df[use_amount_df['원내/원외 처방구분'] != '원외']
# In[8]:
use_amount_in_df['사용량'] = use_amount_in_df['사용량'].fillna('오픈후미사용')
# In[9]:
pat = '(\(([^\d].*?)\)+\s*)|퇴장방지\s*|생산원가보전,*\s*|사용장려(비\s*\d+원|및|비용지급,*\s*)'
use_amount_in_df = use_amount_in_df.rename(columns={'제품명': '약품명(드럭인포)', '약품명(한글)': '약품명(원내)'})
use_amount_in_df['약품명(드럭인포)'] = use_amount_in_df['약품명(드럭인포)'].str.replace(pat, '')
# In[10]:
pvt = use_amount_in_df.pivot_table(index = ['EDI코드','약품명(드럭인포)', '성분명','약품코드','약품명(원내)','효능코드명','규격단위', '최근미사용월수'], columns=['사용(개시)년월'], values=['사용량'], aggfunc=sum)
# In[11]:
pvt.to_excel(OUTPUT_EXCEL)
os.startfile(OUTPUT_EXCEL)
# In[ ]:
|
normal
|
{
"blob_id": "16b425d7b8cde1aabe038ccae6922091afb84415",
"index": 411,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_last(s):\n try:\n return max(s)\n except:\n return s\n\n\n<mask token>\n",
"step-3": "<mask token>\nOUTPUT_EXCEL = '월별원내약품사용현황.xlsx'\ndata_source_dir = '사용량월별통계/원내'\ndfs = []\nfor fname in os.listdir(data_source_dir):\n fn, ext = os.path.splitext(fname)\n if ext in ['.xls', '.xlsx']:\n df = pd.read_excel(os.path.join(data_source_dir, fname))\n df['사용(개시)년월'] = fn\n dfs.append(df)\nuse_amount_df = pd.concat(dfs, ignore_index=True)\ndrug_standard_df = pd.read_json('drug.json').T\ndrug_info_df = pd.read_excel('약품정보.xls')\nuse_amount_df = pd.merge(drug_info_df, use_amount_df[['사용량', '약품코드',\n '사용(개시)년월']], on='약품코드', how='left')\nuse_amount_df = pd.merge(use_amount_df, drug_standard_df[['보험코드', '제품명',\n '판매사', '성분/함량']], left_on='EDI코드', right_on='보험코드', how='left')\nuse_amount_df['제품명'] = use_amount_df['제품명'].fillna(use_amount_df['약품명(한글)'])\nuse_amount_df['사용개시년월'] = use_amount_df['수가시작일자'].map(lambda x: str(x)[0:4] +\n '-' + str(x)[4:6])\nuse_amount_df['사용(개시)년월'] = use_amount_df['사용(개시)년월'].fillna(use_amount_df[\n '사용개시년월'])\nuse_amount_df['성분명'] = use_amount_df['성분명'].fillna(use_amount_df['성분/함량'])\nuse_amount_df['원내/원외 처방구분'] = use_amount_df['원내/원외 처방구분'].map({(1): '원외', (\n 2): '원외/원내', (3): '원내'})\nuse_amount_df['약품법적구분'] = use_amount_df['약품법적구분'].map({(0): '일반', (1): '마약',\n (2): '향정약', (3): '독약', (4): '한방약', (5): '고가약'})\n\n\ndef get_last(s):\n try:\n return max(s)\n except:\n return s\n\n\nmonths = use_amount_df['사용(개시)년월'].unique()\nmonths = sorted(months.tolist(), reverse=1)\nuse_amount_df['최후사용월'] = use_amount_df.groupby(['제품명'])['사용(개시)년월'].transform(\n get_last)\nuse_amount_df['최근미사용월수'] = use_amount_df['최후사용월'].map(lambda x: months.\n index(x) if x in months else -1)\nuse_amount_in_df = use_amount_df[use_amount_df['원내/원외 처방구분'] != '원외']\nuse_amount_in_df['사용량'] = use_amount_in_df['사용량'].fillna('오픈후미사용')\npat = (\n '(\\\\(([^\\\\d].*?)\\\\)+\\\\s*)|퇴장방지\\\\s*|생산원가보전,*\\\\s*|사용장려(비\\\\s*\\\\d+원|및|비용지급,*\\\\s*)'\n )\nuse_amount_in_df = use_amount_in_df.rename(columns={'제품명': '약품명(드럭인포)',\n '약품명(한글)': '약품명(원내)'})\nuse_amount_in_df['약품명(드럭인포)'] = use_amount_in_df['약품명(드럭인포)'].str.replace(pat,\n '')\npvt = use_amount_in_df.pivot_table(index=['EDI코드', '약품명(드럭인포)', '성분명',\n '약품코드', '약품명(원내)', '효능코드명', '규격단위', '최근미사용월수'], columns=['사용(개시)년월'],\n values=['사용량'], aggfunc=sum)\npvt.to_excel(OUTPUT_EXCEL)\nos.startfile(OUTPUT_EXCEL)\n",
"step-4": "import matplotlib.pyplot as plt\nimport matplotlib\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame, Series\nimport os, re\nOUTPUT_EXCEL = '월별원내약품사용현황.xlsx'\ndata_source_dir = '사용량월별통계/원내'\ndfs = []\nfor fname in os.listdir(data_source_dir):\n fn, ext = os.path.splitext(fname)\n if ext in ['.xls', '.xlsx']:\n df = pd.read_excel(os.path.join(data_source_dir, fname))\n df['사용(개시)년월'] = fn\n dfs.append(df)\nuse_amount_df = pd.concat(dfs, ignore_index=True)\ndrug_standard_df = pd.read_json('drug.json').T\ndrug_info_df = pd.read_excel('약품정보.xls')\nuse_amount_df = pd.merge(drug_info_df, use_amount_df[['사용량', '약품코드',\n '사용(개시)년월']], on='약품코드', how='left')\nuse_amount_df = pd.merge(use_amount_df, drug_standard_df[['보험코드', '제품명',\n '판매사', '성분/함량']], left_on='EDI코드', right_on='보험코드', how='left')\nuse_amount_df['제품명'] = use_amount_df['제품명'].fillna(use_amount_df['약품명(한글)'])\nuse_amount_df['사용개시년월'] = use_amount_df['수가시작일자'].map(lambda x: str(x)[0:4] +\n '-' + str(x)[4:6])\nuse_amount_df['사용(개시)년월'] = use_amount_df['사용(개시)년월'].fillna(use_amount_df[\n '사용개시년월'])\nuse_amount_df['성분명'] = use_amount_df['성분명'].fillna(use_amount_df['성분/함량'])\nuse_amount_df['원내/원외 처방구분'] = use_amount_df['원내/원외 처방구분'].map({(1): '원외', (\n 2): '원외/원내', (3): '원내'})\nuse_amount_df['약품법적구분'] = use_amount_df['약품법적구분'].map({(0): '일반', (1): '마약',\n (2): '향정약', (3): '독약', (4): '한방약', (5): '고가약'})\n\n\ndef get_last(s):\n try:\n return max(s)\n except:\n return s\n\n\nmonths = use_amount_df['사용(개시)년월'].unique()\nmonths = sorted(months.tolist(), reverse=1)\nuse_amount_df['최후사용월'] = use_amount_df.groupby(['제품명'])['사용(개시)년월'].transform(\n get_last)\nuse_amount_df['최근미사용월수'] = use_amount_df['최후사용월'].map(lambda x: months.\n index(x) if x in months else -1)\nuse_amount_in_df = use_amount_df[use_amount_df['원내/원외 처방구분'] != '원외']\nuse_amount_in_df['사용량'] = use_amount_in_df['사용량'].fillna('오픈후미사용')\npat = (\n '(\\\\(([^\\\\d].*?)\\\\)+\\\\s*)|퇴장방지\\\\s*|생산원가보전,*\\\\s*|사용장려(비\\\\s*\\\\d+원|및|비용지급,*\\\\s*)'\n )\nuse_amount_in_df = use_amount_in_df.rename(columns={'제품명': '약품명(드럭인포)',\n '약품명(한글)': '약품명(원내)'})\nuse_amount_in_df['약품명(드럭인포)'] = use_amount_in_df['약품명(드럭인포)'].str.replace(pat,\n '')\npvt = use_amount_in_df.pivot_table(index=['EDI코드', '약품명(드럭인포)', '성분명',\n '약품코드', '약품명(원내)', '효능코드명', '규격단위', '최근미사용월수'], columns=['사용(개시)년월'],\n values=['사용량'], aggfunc=sum)\npvt.to_excel(OUTPUT_EXCEL)\nos.startfile(OUTPUT_EXCEL)\n",
"step-5": "\n# coding: utf-8\n\n# In[1]:\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame, Series\nimport os, re\n\n\n# In[2]:\n\nOUTPUT_EXCEL = '월별원내약품사용현황.xlsx'\n\n\n# In[3]:\n\n# 데이타셋 준비\ndata_source_dir = '사용량월별통계/원내'\ndfs = []\nfor fname in os.listdir(data_source_dir):\n fn, ext = os.path.splitext(fname)\n if ext in ['.xls', '.xlsx']:\n df = pd.read_excel(os.path.join(data_source_dir, fname))\n df['사용(개시)년월'] = fn\n dfs.append(df)\nuse_amount_df = pd.concat(dfs, ignore_index=True)\n\n\n# In[4]:\n\ndrug_standard_df = pd.read_json('drug.json').T\n\ndrug_info_df = pd.read_excel('약품정보.xls')\n\nuse_amount_df = pd.merge(drug_info_df, use_amount_df[['사용량', '약품코드', '사용(개시)년월']], on='약품코드', how='left')\n\nuse_amount_df = pd.merge(use_amount_df, drug_standard_df[['보험코드', '제품명', '판매사', '성분/함량']], left_on='EDI코드', right_on='보험코드', how='left')\n\nuse_amount_df['제품명'] = use_amount_df['제품명'].fillna(use_amount_df['약품명(한글)'])\n\nuse_amount_df['사용개시년월'] = use_amount_df['수가시작일자'].map(lambda x: str(x)[0:4]+'-'+str(x)[4:6])\n\nuse_amount_df['사용(개시)년월'] = use_amount_df['사용(개시)년월'].fillna(use_amount_df['사용개시년월'])\nuse_amount_df['성분명'] = use_amount_df['성분명'].fillna(use_amount_df['성분/함량'])\n\nuse_amount_df['원내/원외 처방구분'] = use_amount_df['원내/원외 처방구분'].map({1: '원외', 2: '원외/원내', 3: '원내'})\nuse_amount_df['약품법적구분'] = use_amount_df['약품법적구분'].map({0: '일반', 1: '마약', 2: '향정약', 3: '독약', 4: '한방약', 5: '고가약'})\n\n\n# In[5]:\n\ndef get_last(s):\n try:\n return max(s)\n except:\n return s\n\n\n# In[6]:\n\nmonths = use_amount_df['사용(개시)년월'].unique()\nmonths = sorted(months.tolist(), reverse=1)\nuse_amount_df['최후사용월'] = use_amount_df.groupby(['제품명'])['사용(개시)년월'].transform(get_last)\nuse_amount_df['최근미사용월수'] = use_amount_df['최후사용월'].map(lambda x: months.index(x) if x in months else -1)\n\n\n# In[7]:\n\nuse_amount_in_df = use_amount_df[use_amount_df['원내/원외 처방구분'] != '원외']\n\n\n# In[8]:\n\nuse_amount_in_df['사용량'] = use_amount_in_df['사용량'].fillna('오픈후미사용')\n\n\n# In[9]:\n\npat = '(\\(([^\\d].*?)\\)+\\s*)|퇴장방지\\s*|생산원가보전,*\\s*|사용장려(비\\s*\\d+원|및|비용지급,*\\s*)'\nuse_amount_in_df = use_amount_in_df.rename(columns={'제품명': '약품명(드럭인포)', '약품명(한글)': '약품명(원내)'})\nuse_amount_in_df['약품명(드럭인포)'] = use_amount_in_df['약품명(드럭인포)'].str.replace(pat, '')\n\n\n# In[10]:\n\npvt = use_amount_in_df.pivot_table(index = ['EDI코드','약품명(드럭인포)', '성분명','약품코드','약품명(원내)','효능코드명','규격단위', '최근미사용월수'], columns=['사용(개시)년월'], values=['사용량'], aggfunc=sum)\n\n\n# In[11]:\n\npvt.to_excel(OUTPUT_EXCEL)\nos.startfile(OUTPUT_EXCEL)\n\n\n# In[ ]:\n\n\n\n",
"step-ids": [
0,
1,
3,
4,
5
]
}
|
[
0,
1,
3,
4,
5
] |
from typing import *
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
map_: List[List[int]] = [[0 if (i > 0 and j > 0) else 1 for j in range(m)] for i in range(n)]
for row in range(1, n):
for col in range(1, m):
map_[row][col] = map_[row][col - 1] + map_[row - 1][col]
# [map_[row][col] := map_[row][col - 1] + map_[row - 1][col] for col in range(1, m) for row in range(1, n)]
return map_[-1][-1]
print(Solution().uniquePaths(7, 3))
|
normal
|
{
"blob_id": "e2a38d38d2ab750cf775ed0fbdb56bc6fc7300c4",
"index": 8934,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def uniquePaths(self, m: int, n: int) ->int:\n map_: List[List[int]] = [[(0 if i > 0 and j > 0 else 1) for j in\n range(m)] for i in range(n)]\n for row in range(1, n):\n for col in range(1, m):\n map_[row][col] = map_[row][col - 1] + map_[row - 1][col]\n return map_[-1][-1]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def uniquePaths(self, m: int, n: int) ->int:\n map_: List[List[int]] = [[(0 if i > 0 and j > 0 else 1) for j in\n range(m)] for i in range(n)]\n for row in range(1, n):\n for col in range(1, m):\n map_[row][col] = map_[row][col - 1] + map_[row - 1][col]\n return map_[-1][-1]\n\n\nprint(Solution().uniquePaths(7, 3))\n",
"step-4": "from typing import *\n\n\nclass Solution:\n\n def uniquePaths(self, m: int, n: int) ->int:\n map_: List[List[int]] = [[(0 if i > 0 and j > 0 else 1) for j in\n range(m)] for i in range(n)]\n for row in range(1, n):\n for col in range(1, m):\n map_[row][col] = map_[row][col - 1] + map_[row - 1][col]\n return map_[-1][-1]\n\n\nprint(Solution().uniquePaths(7, 3))\n",
"step-5": "from typing import *\n\n\nclass Solution:\n def uniquePaths(self, m: int, n: int) -> int:\n map_: List[List[int]] = [[0 if (i > 0 and j > 0) else 1 for j in range(m)] for i in range(n)]\n for row in range(1, n):\n for col in range(1, m):\n map_[row][col] = map_[row][col - 1] + map_[row - 1][col]\n\n # [map_[row][col] := map_[row][col - 1] + map_[row - 1][col] for col in range(1, m) for row in range(1, n)]\n return map_[-1][-1]\n\n\nprint(Solution().uniquePaths(7, 3))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from nonebot_plugin_datastore import get_plugin_data
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import Mapped, MappedAsDataclass, mapped_column
Model = get_plugin_data().Model
class MorningGreeting(MappedAsDataclass, Model):
__table_args__ = (
UniqueConstraint(
"platform",
"bot_id",
"group_id",
"guild_id",
"channel_id",
name="unique_morning_greeting",
),
)
id: Mapped[int] = mapped_column(init=False, primary_key=True)
platform: Mapped[str]
bot_id: Mapped[str]
group_id: Mapped[str] = mapped_column(default="")
guild_id: Mapped[str] = mapped_column(default="")
channel_id: Mapped[str] = mapped_column(default="")
|
normal
|
{
"blob_id": "28e5667db4a620ec627cd94154a024b4c8dbc5f7",
"index": 6171,
"step-1": "<mask token>\n\n\nclass MorningGreeting(MappedAsDataclass, Model):\n <mask token>\n id: Mapped[int] = mapped_column(init=False, primary_key=True)\n platform: Mapped[str]\n bot_id: Mapped[str]\n group_id: Mapped[str] = mapped_column(default='')\n guild_id: Mapped[str] = mapped_column(default='')\n channel_id: Mapped[str] = mapped_column(default='')\n",
"step-2": "<mask token>\n\n\nclass MorningGreeting(MappedAsDataclass, Model):\n __table_args__ = UniqueConstraint('platform', 'bot_id', 'group_id',\n 'guild_id', 'channel_id', name='unique_morning_greeting'),\n id: Mapped[int] = mapped_column(init=False, primary_key=True)\n platform: Mapped[str]\n bot_id: Mapped[str]\n group_id: Mapped[str] = mapped_column(default='')\n guild_id: Mapped[str] = mapped_column(default='')\n channel_id: Mapped[str] = mapped_column(default='')\n",
"step-3": "<mask token>\nModel = get_plugin_data().Model\n\n\nclass MorningGreeting(MappedAsDataclass, Model):\n __table_args__ = UniqueConstraint('platform', 'bot_id', 'group_id',\n 'guild_id', 'channel_id', name='unique_morning_greeting'),\n id: Mapped[int] = mapped_column(init=False, primary_key=True)\n platform: Mapped[str]\n bot_id: Mapped[str]\n group_id: Mapped[str] = mapped_column(default='')\n guild_id: Mapped[str] = mapped_column(default='')\n channel_id: Mapped[str] = mapped_column(default='')\n",
"step-4": "from nonebot_plugin_datastore import get_plugin_data\nfrom sqlalchemy import UniqueConstraint\nfrom sqlalchemy.orm import Mapped, MappedAsDataclass, mapped_column\nModel = get_plugin_data().Model\n\n\nclass MorningGreeting(MappedAsDataclass, Model):\n __table_args__ = UniqueConstraint('platform', 'bot_id', 'group_id',\n 'guild_id', 'channel_id', name='unique_morning_greeting'),\n id: Mapped[int] = mapped_column(init=False, primary_key=True)\n platform: Mapped[str]\n bot_id: Mapped[str]\n group_id: Mapped[str] = mapped_column(default='')\n guild_id: Mapped[str] = mapped_column(default='')\n channel_id: Mapped[str] = mapped_column(default='')\n",
"step-5": "from nonebot_plugin_datastore import get_plugin_data\nfrom sqlalchemy import UniqueConstraint\nfrom sqlalchemy.orm import Mapped, MappedAsDataclass, mapped_column\n\nModel = get_plugin_data().Model\n\n\nclass MorningGreeting(MappedAsDataclass, Model):\n __table_args__ = (\n UniqueConstraint(\n \"platform\",\n \"bot_id\",\n \"group_id\",\n \"guild_id\",\n \"channel_id\",\n name=\"unique_morning_greeting\",\n ),\n )\n\n id: Mapped[int] = mapped_column(init=False, primary_key=True)\n platform: Mapped[str]\n bot_id: Mapped[str]\n group_id: Mapped[str] = mapped_column(default=\"\")\n guild_id: Mapped[str] = mapped_column(default=\"\")\n channel_id: Mapped[str] = mapped_column(default=\"\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os
from pathlib import Path
import Algorithmia
API_KEY = os.environ.get('ALGO_API_KEY')
DATA_DIR_BASE = os.environ.get('DATA_DIR')
ORIGINAL_DATA_DIR = DATA_DIR_BASE + 'original/'
TRANSFERD_DATA_DIR = DATA_DIR_BASE + 'transferd/'
def upload(client, fnames):
for im in fnames:
im = Path(im)
client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())
def download(client, folder):
folder = Path(folder)
transfered = client.dir(TRANSFERD_DATA_DIR)
for im in transfered.files():
(folder / Path(im.url).name).write_bytes(im.getBytes())
def style_transfer(fnames, out_folder, filter_name):
client = Algorithmia.client(API_KEY)
client.dir(ORIGINAL_DATA_DIR).create()
client.dir(TRANSFERD_DATA_DIR).create()
upload(client, fnames)
inputs = {
"images": [ORIGINAL_DATA_DIR + Path(im).name for im in fnames],
"savePaths": [TRANSFERD_DATA_DIR + Path(im).name for im in fnames],
"filterName": filter_name
}
algorithm_name = 'deeplearning/DeepFilter/0.6.0'
algo = client.algo(algorithm_name)
result = algo.pipe(inputs).result
download(client, out_folder)
return result
|
normal
|
{
"blob_id": "2536b22c2d154e87bdecb72cc967d8c56ddb73fb",
"index": 609,
"step-1": "<mask token>\n\n\ndef upload(client, fnames):\n for im in fnames:\n im = Path(im)\n client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())\n\n\n<mask token>\n\n\ndef style_transfer(fnames, out_folder, filter_name):\n client = Algorithmia.client(API_KEY)\n client.dir(ORIGINAL_DATA_DIR).create()\n client.dir(TRANSFERD_DATA_DIR).create()\n upload(client, fnames)\n inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in\n fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in\n fnames], 'filterName': filter_name}\n algorithm_name = 'deeplearning/DeepFilter/0.6.0'\n algo = client.algo(algorithm_name)\n result = algo.pipe(inputs).result\n download(client, out_folder)\n return result\n",
"step-2": "<mask token>\n\n\ndef upload(client, fnames):\n for im in fnames:\n im = Path(im)\n client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())\n\n\ndef download(client, folder):\n folder = Path(folder)\n transfered = client.dir(TRANSFERD_DATA_DIR)\n for im in transfered.files():\n (folder / Path(im.url).name).write_bytes(im.getBytes())\n\n\ndef style_transfer(fnames, out_folder, filter_name):\n client = Algorithmia.client(API_KEY)\n client.dir(ORIGINAL_DATA_DIR).create()\n client.dir(TRANSFERD_DATA_DIR).create()\n upload(client, fnames)\n inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in\n fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in\n fnames], 'filterName': filter_name}\n algorithm_name = 'deeplearning/DeepFilter/0.6.0'\n algo = client.algo(algorithm_name)\n result = algo.pipe(inputs).result\n download(client, out_folder)\n return result\n",
"step-3": "<mask token>\nAPI_KEY = os.environ.get('ALGO_API_KEY')\nDATA_DIR_BASE = os.environ.get('DATA_DIR')\nORIGINAL_DATA_DIR = DATA_DIR_BASE + 'original/'\nTRANSFERD_DATA_DIR = DATA_DIR_BASE + 'transferd/'\n\n\ndef upload(client, fnames):\n for im in fnames:\n im = Path(im)\n client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())\n\n\ndef download(client, folder):\n folder = Path(folder)\n transfered = client.dir(TRANSFERD_DATA_DIR)\n for im in transfered.files():\n (folder / Path(im.url).name).write_bytes(im.getBytes())\n\n\ndef style_transfer(fnames, out_folder, filter_name):\n client = Algorithmia.client(API_KEY)\n client.dir(ORIGINAL_DATA_DIR).create()\n client.dir(TRANSFERD_DATA_DIR).create()\n upload(client, fnames)\n inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in\n fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in\n fnames], 'filterName': filter_name}\n algorithm_name = 'deeplearning/DeepFilter/0.6.0'\n algo = client.algo(algorithm_name)\n result = algo.pipe(inputs).result\n download(client, out_folder)\n return result\n",
"step-4": "import os\nfrom pathlib import Path\nimport Algorithmia\nAPI_KEY = os.environ.get('ALGO_API_KEY')\nDATA_DIR_BASE = os.environ.get('DATA_DIR')\nORIGINAL_DATA_DIR = DATA_DIR_BASE + 'original/'\nTRANSFERD_DATA_DIR = DATA_DIR_BASE + 'transferd/'\n\n\ndef upload(client, fnames):\n for im in fnames:\n im = Path(im)\n client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())\n\n\ndef download(client, folder):\n folder = Path(folder)\n transfered = client.dir(TRANSFERD_DATA_DIR)\n for im in transfered.files():\n (folder / Path(im.url).name).write_bytes(im.getBytes())\n\n\ndef style_transfer(fnames, out_folder, filter_name):\n client = Algorithmia.client(API_KEY)\n client.dir(ORIGINAL_DATA_DIR).create()\n client.dir(TRANSFERD_DATA_DIR).create()\n upload(client, fnames)\n inputs = {'images': [(ORIGINAL_DATA_DIR + Path(im).name) for im in\n fnames], 'savePaths': [(TRANSFERD_DATA_DIR + Path(im).name) for im in\n fnames], 'filterName': filter_name}\n algorithm_name = 'deeplearning/DeepFilter/0.6.0'\n algo = client.algo(algorithm_name)\n result = algo.pipe(inputs).result\n download(client, out_folder)\n return result\n",
"step-5": "import os\nfrom pathlib import Path\n\nimport Algorithmia\n\n\nAPI_KEY = os.environ.get('ALGO_API_KEY')\nDATA_DIR_BASE = os.environ.get('DATA_DIR')\nORIGINAL_DATA_DIR = DATA_DIR_BASE + 'original/'\nTRANSFERD_DATA_DIR = DATA_DIR_BASE + 'transferd/'\n\n\ndef upload(client, fnames):\n for im in fnames:\n im = Path(im)\n client.file(ORIGINAL_DATA_DIR + str(im.name)).put(im.read_bytes())\n\n\ndef download(client, folder):\n folder = Path(folder)\n transfered = client.dir(TRANSFERD_DATA_DIR)\n for im in transfered.files():\n (folder / Path(im.url).name).write_bytes(im.getBytes())\n\n\ndef style_transfer(fnames, out_folder, filter_name):\n client = Algorithmia.client(API_KEY)\n client.dir(ORIGINAL_DATA_DIR).create()\n client.dir(TRANSFERD_DATA_DIR).create()\n\n upload(client, fnames)\n inputs = {\n \"images\": [ORIGINAL_DATA_DIR + Path(im).name for im in fnames],\n \"savePaths\": [TRANSFERD_DATA_DIR + Path(im).name for im in fnames],\n \"filterName\": filter_name\n }\n\n algorithm_name = 'deeplearning/DeepFilter/0.6.0'\n algo = client.algo(algorithm_name)\n result = algo.pipe(inputs).result\n\n download(client, out_folder)\n return result\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
import re
pdfs_file = './pdf_names_2017.txt'
sessions_file = './session_names_2017.txt'
with open(pdfs_file) as f:
pdf_names = f.read().splitlines()
with open(sessions_file) as f:
session_names = f.read().splitlines()
#for i in xrange(0,len(pdf_names)):
# print str(i+1).zfill(3) + '_-_' + pdf_names[i][:-4] + '_-_' + session_names[i] + pdf_names[i][-4:]
card_pre = """
<section class="section--center mdl-grid mdl-grid--no-spacing mdl-shadow--2dp">
<header class="section__play-btn mdl-cell mdl-cell--3-col-desktop mdl-cell--2-col-tablet mdl-cell--4-col-phone mdl-color--teal-100 mdl-color-text--white">
<i class="material-icons">record_voice_over</i>
</header>
<div class="mdl-card mdl-cell mdl-cell--9-col-desktop mdl-cell--6-col-tablet mdl-cell--4-col-phone">
<div class="mdl-card__supporting-text">
"""
card_content = """
<h4>Incidental_Findings_-_Introduction_and_Overview</h4>
Monday_0700_LBerland
"""
card_post_1 = """
</div>
<div class="mdl-card__actions">
<a href="pdf/"""
card_post_2 = """" target="_blank" class="mdl-button">Handout</a>
</div>
</div>
</section>
"""
"""
<section class="section--center mdl-grid mdl-grid--no-spacing mdl-shadow--2dp">
<header class="section__play-btn mdl-cell mdl-cell--3-col-desktop mdl-cell--2-col-tablet mdl-cell--4-col-phone mdl-color--teal-100 mdl-color-text--white">
<i class="material-icons">record_voice_over</i>
</header>
<div class="mdl-card mdl-cell mdl-cell--9-col-desktop mdl-cell--6-col-tablet mdl-cell--4-col-phone">
<div class="mdl-card__supporting-text">
<h4>Incidental_Findings_-_Introduction_and_Overview</h4>
Monday_0700_LBerland
</div>
<div class="mdl-card__actions">
<a href="#" class="mdl-button">Handout</a>
</div>
</div>
</section>
"""
for i in xrange(0,len(pdf_names)):
print card_pre + "<h4>" + session_names[i] + "</h4>" + pdf_names[i][:-4].replace("_"," ") + card_post_1 + pdf_names[i] + card_post_2
|
normal
|
{
"blob_id": "e686d8617360c5a3ce35bd4d2bdeb2376b33f53a",
"index": 9726,
"step-1": "#!/usr/bin/env python\n\nimport re\n\n\npdfs_file = './pdf_names_2017.txt'\nsessions_file = './session_names_2017.txt'\n\nwith open(pdfs_file) as f:\n pdf_names = f.read().splitlines()\n\nwith open(sessions_file) as f:\n session_names = f.read().splitlines()\n\n#for i in xrange(0,len(pdf_names)):\n# print str(i+1).zfill(3) + '_-_' + pdf_names[i][:-4] + '_-_' + session_names[i] + pdf_names[i][-4:]\n\n\ncard_pre = \"\"\"\n<section class=\"section--center mdl-grid mdl-grid--no-spacing mdl-shadow--2dp\">\n <header class=\"section__play-btn mdl-cell mdl-cell--3-col-desktop mdl-cell--2-col-tablet mdl-cell--4-col-phone mdl-color--teal-100 mdl-color-text--white\">\n <i class=\"material-icons\">record_voice_over</i>\n </header>\n <div class=\"mdl-card mdl-cell mdl-cell--9-col-desktop mdl-cell--6-col-tablet mdl-cell--4-col-phone\">\n <div class=\"mdl-card__supporting-text\">\n\"\"\"\n\ncard_content = \"\"\" \n<h4>Incidental_Findings_-_Introduction_and_Overview</h4>\n Monday_0700_LBerland\n\"\"\"\n\ncard_post_1 = \"\"\"\n </div>\n <div class=\"mdl-card__actions\">\n <a href=\"pdf/\"\"\"\n\n\ncard_post_2 = \"\"\"\" target=\"_blank\" class=\"mdl-button\">Handout</a>\n </div>\n </div>\n</section>\n\"\"\"\n\n\"\"\"\n<section class=\"section--center mdl-grid mdl-grid--no-spacing mdl-shadow--2dp\">\n <header class=\"section__play-btn mdl-cell mdl-cell--3-col-desktop mdl-cell--2-col-tablet mdl-cell--4-col-phone mdl-color--teal-100 mdl-color-text--white\">\n <i class=\"material-icons\">record_voice_over</i>\n </header>\n <div class=\"mdl-card mdl-cell mdl-cell--9-col-desktop mdl-cell--6-col-tablet mdl-cell--4-col-phone\">\n <div class=\"mdl-card__supporting-text\">\n <h4>Incidental_Findings_-_Introduction_and_Overview</h4>\n Monday_0700_LBerland\n </div>\n <div class=\"mdl-card__actions\">\n <a href=\"#\" class=\"mdl-button\">Handout</a>\n </div>\n </div>\n</section>\n\"\"\"\n\nfor i in xrange(0,len(pdf_names)):\n print card_pre + \"<h4>\" + session_names[i] + \"</h4>\" + pdf_names[i][:-4].replace(\"_\",\" \") + card_post_1 + pdf_names[i] + card_post_2\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from time import sleep
import pytest
import allure
from app.debug_api import DebugAPI
from app.check_api import HandlersAPI
from locators.movies_details_locators import MoviesDetailsPageLocators
from locators.movies_locators import MoviesPageLocators
from locators.shedule_locators import ShedulePageLocators
from screens.MoviesPage import MoviesPage
from screens.MoviesDetailsPage import MoviesDetailsPage
from screens.ShedulePage import ShedulePage
from utils.internet import enable_proxy
@pytest.mark.usefixtures('driver')
class Test_001_ShedulePage:
@classmethod
def setup_class(cls):
cls.movies_locators = MoviesPageLocators()
cls.shedule_locators = ShedulePageLocators()
cls.event_detail_page_locators = MoviesDetailsPageLocators()
@staticmethod
def teardown_class(cls):
enable_proxy(mode=False)
def test_001_elements_exists(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
найти кнопку отмены, кнопку карты, поле поиска"""
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
self.event_detail_page.click(*self.event_detail_page_locators.
btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
self.shedule_page.find_element(*self.shedule_locators.btn_back)
self.shedule_page.find_element(*self.shedule_locators.btn_map)
self.shedule_page.find_element(*self.shedule_locators.search_field)
def test_002_valid_filters(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
проверить соответствие фильтров и ответа сервера
проверить порядок фильтров"""
dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.
url_creations_movie_schedule_filter)
try:
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
sleep(5)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
sleep(5)
self.event_detail_page.click(*self.
event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
sleep(5)
self.shedule_page.check_rows_filters(dbg_api)
finally:
dbg_api.kill()
def test_003_check_time_ticket_filter(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
проверять соответствие времени на билетах с выставленными фильтрами"""
dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.
url_creations_movie_schedule_filter)
try:
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
sleep(10)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
self.event_detail_page.click(*self.
event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
sleep(2)
self.shedule_page.compare_tickets_datetime_options_second_filter(
dbg_api)
finally:
dbg_api.kill()
|
normal
|
{
"blob_id": "c7c412fe4e2d53af1b4f2a55bd3453496767890d",
"index": 975,
"step-1": "<mask token>\n\n\n@pytest.mark.usefixtures('driver')\nclass Test_001_ShedulePage:\n <mask token>\n <mask token>\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(\n dbg_api)\n finally:\n dbg_api.kill()\n",
"step-2": "<mask token>\n\n\n@pytest.mark.usefixtures('driver')\nclass Test_001_ShedulePage:\n <mask token>\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(\n dbg_api)\n finally:\n dbg_api.kill()\n",
"step-3": "<mask token>\n\n\n@pytest.mark.usefixtures('driver')\nclass Test_001_ShedulePage:\n\n @classmethod\n def setup_class(cls):\n cls.movies_locators = MoviesPageLocators()\n cls.shedule_locators = ShedulePageLocators()\n cls.event_detail_page_locators = MoviesDetailsPageLocators()\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(\n dbg_api)\n finally:\n dbg_api.kill()\n",
"step-4": "from time import sleep\nimport pytest\nimport allure\nfrom app.debug_api import DebugAPI\nfrom app.check_api import HandlersAPI\nfrom locators.movies_details_locators import MoviesDetailsPageLocators\nfrom locators.movies_locators import MoviesPageLocators\nfrom locators.shedule_locators import ShedulePageLocators\nfrom screens.MoviesPage import MoviesPage\nfrom screens.MoviesDetailsPage import MoviesDetailsPage\nfrom screens.ShedulePage import ShedulePage\nfrom utils.internet import enable_proxy\n\n\n@pytest.mark.usefixtures('driver')\nclass Test_001_ShedulePage:\n\n @classmethod\n def setup_class(cls):\n cls.movies_locators = MoviesPageLocators()\n cls.shedule_locators = ShedulePageLocators()\n cls.event_detail_page_locators = MoviesDetailsPageLocators()\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(\n dbg_api)\n finally:\n dbg_api.kill()\n",
"step-5": null,
"step-ids": [
4,
5,
6,
7
]
}
|
[
4,
5,
6,
7
] |
import logging
formatter = logging.Formatter("%(asctime)s [%(levelname)s] : %(message)s")
log = logging.getLogger("othello")
log.setLevel(logging.DEBUG)
stream_hander = logging.StreamHandler()
stream_hander.setFormatter(formatter)
log.addHandler(stream_hander)
|
normal
|
{
"blob_id": "675fbdfd519d00ab10bf613e8abb7338e484fe65",
"index": 57,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlog.setLevel(logging.DEBUG)\n<mask token>\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n",
"step-3": "<mask token>\nformatter = logging.Formatter('%(asctime)s [%(levelname)s] : %(message)s')\nlog = logging.getLogger('othello')\nlog.setLevel(logging.DEBUG)\nstream_hander = logging.StreamHandler()\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n",
"step-4": "import logging\nformatter = logging.Formatter('%(asctime)s [%(levelname)s] : %(message)s')\nlog = logging.getLogger('othello')\nlog.setLevel(logging.DEBUG)\nstream_hander = logging.StreamHandler()\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n",
"step-5": "import logging\n\n\nformatter = logging.Formatter(\"%(asctime)s [%(levelname)s] : %(message)s\")\n\nlog = logging.getLogger(\"othello\")\nlog.setLevel(logging.DEBUG)\n\nstream_hander = logging.StreamHandler()\nstream_hander.setFormatter(formatter)\nlog.addHandler(stream_hander)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import logging
from unittest.mock import patch, Mock
from intake.tests.base_testcases import ExternalNotificationsPatchTestCase
from intake.tests import mock, factories
from intake.tests.mock_org_answers import get_answers_for_orgs
from intake.management.commands import send_followups
from user_accounts.models import Organization
from project.tests.assertions import assertInLogsCount
class TestCommand(ExternalNotificationsPatchTestCase):
fixtures = [
'counties', 'organizations']
@patch('intake.management.commands.send_followups.is_the_weekend')
@patch('intake.management.commands.send_followups.FollowupsService')
def test_doesnt_do_anything_on_the_weekend(
self, FollowupsService, is_the_weekend):
is_the_weekend.return_value = True
command = send_followups.Command()
command.stdout = Mock()
command.handle()
FollowupsService.assert_not_called()
@patch('intake.management.commands.send_followups.is_the_weekend')
def test_expected_weekday_run(self, is_the_weekend):
is_the_weekend.return_value = False
org = Organization.objects.get(slug='ebclc')
dates = sorted([mock.get_old_date() for i in range(464, 469)])
for date, pk in zip(dates, range(464, 469)):
factories.FormSubmissionWithOrgsFactory.create(
id=pk,
date_received=date,
organizations=[org],
answers=get_answers_for_orgs(
[org],
contact_preferences=[
'prefers_email',
'prefers_sms'],
phone='4445551111',
email='test@test.com',
))
command = send_followups.Command()
command.stdout = Mock()
with self.assertLogs(
'project.services.logging_service', logging.INFO) as logs:
command.handle()
self.assertEqual(
len(self.notifications.email_followup.send.mock_calls), 4)
assertInLogsCount(logs, {'event_name=app_followup_sent': 4})
|
normal
|
{
"blob_id": "5cb67e5fcedafca4ce124e4094cbd8e1e9d95bb4",
"index": 3740,
"step-1": "<mask token>\n\n\nclass TestCommand(ExternalNotificationsPatchTestCase):\n <mask token>\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n @patch('intake.management.commands.send_followups.FollowupsService')\n def test_doesnt_do_anything_on_the_weekend(self, FollowupsService,\n is_the_weekend):\n is_the_weekend.return_value = True\n command = send_followups.Command()\n command.stdout = Mock()\n command.handle()\n FollowupsService.assert_not_called()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCommand(ExternalNotificationsPatchTestCase):\n <mask token>\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n @patch('intake.management.commands.send_followups.FollowupsService')\n def test_doesnt_do_anything_on_the_weekend(self, FollowupsService,\n is_the_weekend):\n is_the_weekend.return_value = True\n command = send_followups.Command()\n command.stdout = Mock()\n command.handle()\n FollowupsService.assert_not_called()\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n def test_expected_weekday_run(self, is_the_weekend):\n is_the_weekend.return_value = False\n org = Organization.objects.get(slug='ebclc')\n dates = sorted([mock.get_old_date() for i in range(464, 469)])\n for date, pk in zip(dates, range(464, 469)):\n factories.FormSubmissionWithOrgsFactory.create(id=pk,\n date_received=date, organizations=[org], answers=\n get_answers_for_orgs([org], contact_preferences=[\n 'prefers_email', 'prefers_sms'], phone='4445551111', email=\n 'test@test.com'))\n command = send_followups.Command()\n command.stdout = Mock()\n with self.assertLogs('project.services.logging_service', logging.INFO\n ) as logs:\n command.handle()\n self.assertEqual(len(self.notifications.email_followup.send.\n mock_calls), 4)\n assertInLogsCount(logs, {'event_name=app_followup_sent': 4})\n",
"step-3": "<mask token>\n\n\nclass TestCommand(ExternalNotificationsPatchTestCase):\n fixtures = ['counties', 'organizations']\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n @patch('intake.management.commands.send_followups.FollowupsService')\n def test_doesnt_do_anything_on_the_weekend(self, FollowupsService,\n is_the_weekend):\n is_the_weekend.return_value = True\n command = send_followups.Command()\n command.stdout = Mock()\n command.handle()\n FollowupsService.assert_not_called()\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n def test_expected_weekday_run(self, is_the_weekend):\n is_the_weekend.return_value = False\n org = Organization.objects.get(slug='ebclc')\n dates = sorted([mock.get_old_date() for i in range(464, 469)])\n for date, pk in zip(dates, range(464, 469)):\n factories.FormSubmissionWithOrgsFactory.create(id=pk,\n date_received=date, organizations=[org], answers=\n get_answers_for_orgs([org], contact_preferences=[\n 'prefers_email', 'prefers_sms'], phone='4445551111', email=\n 'test@test.com'))\n command = send_followups.Command()\n command.stdout = Mock()\n with self.assertLogs('project.services.logging_service', logging.INFO\n ) as logs:\n command.handle()\n self.assertEqual(len(self.notifications.email_followup.send.\n mock_calls), 4)\n assertInLogsCount(logs, {'event_name=app_followup_sent': 4})\n",
"step-4": "import logging\nfrom unittest.mock import patch, Mock\nfrom intake.tests.base_testcases import ExternalNotificationsPatchTestCase\nfrom intake.tests import mock, factories\nfrom intake.tests.mock_org_answers import get_answers_for_orgs\nfrom intake.management.commands import send_followups\nfrom user_accounts.models import Organization\nfrom project.tests.assertions import assertInLogsCount\n\n\nclass TestCommand(ExternalNotificationsPatchTestCase):\n fixtures = ['counties', 'organizations']\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n @patch('intake.management.commands.send_followups.FollowupsService')\n def test_doesnt_do_anything_on_the_weekend(self, FollowupsService,\n is_the_weekend):\n is_the_weekend.return_value = True\n command = send_followups.Command()\n command.stdout = Mock()\n command.handle()\n FollowupsService.assert_not_called()\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n def test_expected_weekday_run(self, is_the_weekend):\n is_the_weekend.return_value = False\n org = Organization.objects.get(slug='ebclc')\n dates = sorted([mock.get_old_date() for i in range(464, 469)])\n for date, pk in zip(dates, range(464, 469)):\n factories.FormSubmissionWithOrgsFactory.create(id=pk,\n date_received=date, organizations=[org], answers=\n get_answers_for_orgs([org], contact_preferences=[\n 'prefers_email', 'prefers_sms'], phone='4445551111', email=\n 'test@test.com'))\n command = send_followups.Command()\n command.stdout = Mock()\n with self.assertLogs('project.services.logging_service', logging.INFO\n ) as logs:\n command.handle()\n self.assertEqual(len(self.notifications.email_followup.send.\n mock_calls), 4)\n assertInLogsCount(logs, {'event_name=app_followup_sent': 4})\n",
"step-5": "import logging\nfrom unittest.mock import patch, Mock\nfrom intake.tests.base_testcases import ExternalNotificationsPatchTestCase\nfrom intake.tests import mock, factories\nfrom intake.tests.mock_org_answers import get_answers_for_orgs\nfrom intake.management.commands import send_followups\nfrom user_accounts.models import Organization\nfrom project.tests.assertions import assertInLogsCount\n\n\nclass TestCommand(ExternalNotificationsPatchTestCase):\n\n fixtures = [\n 'counties', 'organizations']\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n @patch('intake.management.commands.send_followups.FollowupsService')\n def test_doesnt_do_anything_on_the_weekend(\n self, FollowupsService, is_the_weekend):\n is_the_weekend.return_value = True\n command = send_followups.Command()\n command.stdout = Mock()\n command.handle()\n FollowupsService.assert_not_called()\n\n @patch('intake.management.commands.send_followups.is_the_weekend')\n def test_expected_weekday_run(self, is_the_weekend):\n is_the_weekend.return_value = False\n org = Organization.objects.get(slug='ebclc')\n dates = sorted([mock.get_old_date() for i in range(464, 469)])\n for date, pk in zip(dates, range(464, 469)):\n factories.FormSubmissionWithOrgsFactory.create(\n id=pk,\n date_received=date,\n organizations=[org],\n answers=get_answers_for_orgs(\n [org],\n contact_preferences=[\n 'prefers_email',\n 'prefers_sms'],\n phone='4445551111',\n email='test@test.com',\n ))\n command = send_followups.Command()\n command.stdout = Mock()\n with self.assertLogs(\n 'project.services.logging_service', logging.INFO) as logs:\n command.handle()\n self.assertEqual(\n len(self.notifications.email_followup.send.mock_calls), 4)\n assertInLogsCount(logs, {'event_name=app_followup_sent': 4})\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import csv
import sqlite3
import time
from datetime import datetime, timedelta
import pandas as pd
import pytz
import json
import urllib
import numpy as np
DATABASE = '/var/www/html/citibikeapp/citibikeapp/citibike_change.db'
def execute_query(cur,query, args=()):
cur = cur.execute(query, args)
rows = cur.fetchall()
# cur.close()
return rows
def convertTime(et):
"""'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' """
hour = int(et[11:13])
if et.find('PM') != -1 and hour != 12:
dateString = et[:10]
hour = hour + 12
et = dateString + ' ' + str(hour) + et[13:19]
elif et.find('AM') != -1 and hour == 12:
dateString = et[:10]
hour = 0
et = dateString + ' ' + '0'+str(hour) + et[13:19]
else:
et = et[:19]
return et
def getNYtimenow():
tz = pytz.timezone('America/New_York')
time = str(datetime.now(tz))[:19]
return time
def datetimeStringToObject(timeString):
"""convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object"""
try:
year = int(timeString[:4])
month = int(timeString[5:7])
day = int(timeString[8:10])
hour = int(timeString[11:13])
minute = int(timeString[14:16])
result = datetime(year, month, day, hour, minute)
return result
except:
return None
def timeStringToObject(timeString):
"""convert a string in format hh:mm:ss to a datetime object with current date"""
try:
# year = datetime.now().year
# month = datetime.now().month
# day = datetime.now().day
hour = int(timeString[:2])
minute = int(timeString[3:5])
result = datetime.today().replace(hour=hour, minute=minute, second=0, microsecond=0)
return result
except:
return None
def notSignedIn(vID):
"""Return true is the drivers did not enter vehicle ID,
return False if the drivers have entered the vehicle ID"""
if str(vID) == '0':
return True
return False
def resetEstComp(cur, vID):
"""estimate completion time goes to 0"""
cur.execute("""UPDATE OpenTasks SET estComplete = null WHERE vID = ? """,[vID])
def getNextFixOrderNum(cur,vID):
"""return the integer which is one larger than the order number of the last fixed task"""
orderNum = execute_query(cur, """SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1""", [vID])[0][0]
orderNum = int(orderNum) + 1
return orderNum
def getNextOrderNum(cur,vID):
"""return the integer which is one larger than the order number of the last task"""
orderNum = execute_query(cur,"""SELECT Count(*) FROM OpenTasks where vID = ?""", [vID])[0][0]
orderNum = int(orderNum) + 1
return orderNum
def fixOrderBeforeInsert(cur,vID,orderNum):
"""Increment later tasks' order number by 1, orderNum is the order of the inserted task
should be called before inserting the task """
cur.execute("""UPDATE OpenTasks SET orderNum = orderNum + 1 WHERE vID = ? and orderNum >= ?""",[vID, orderNum])
|
normal
|
{
"blob_id": "9b8b196e1ad845ab745dabe5abe3be7bea0d5695",
"index": 4835,
"step-1": "<mask token>\n\n\ndef convertTime(et):\n \"\"\"'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' \"\"\"\n hour = int(et[11:13])\n if et.find('PM') != -1 and hour != 12:\n dateString = et[:10]\n hour = hour + 12\n et = dateString + ' ' + str(hour) + et[13:19]\n elif et.find('AM') != -1 and hour == 12:\n dateString = et[:10]\n hour = 0\n et = dateString + ' ' + '0' + str(hour) + et[13:19]\n else:\n et = et[:19]\n return et\n\n\ndef getNYtimenow():\n tz = pytz.timezone('America/New_York')\n time = str(datetime.now(tz))[:19]\n return time\n\n\ndef datetimeStringToObject(timeString):\n \"\"\"convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object\"\"\"\n try:\n year = int(timeString[:4])\n month = int(timeString[5:7])\n day = int(timeString[8:10])\n hour = int(timeString[11:13])\n minute = int(timeString[14:16])\n result = datetime(year, month, day, hour, minute)\n return result\n except:\n return None\n\n\n<mask token>\n\n\ndef resetEstComp(cur, vID):\n \"\"\"estimate completion time goes to 0\"\"\"\n cur.execute('UPDATE OpenTasks SET estComplete = null WHERE vID = ? ', [vID]\n )\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef execute_query(cur, query, args=()):\n cur = cur.execute(query, args)\n rows = cur.fetchall()\n return rows\n\n\ndef convertTime(et):\n \"\"\"'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' \"\"\"\n hour = int(et[11:13])\n if et.find('PM') != -1 and hour != 12:\n dateString = et[:10]\n hour = hour + 12\n et = dateString + ' ' + str(hour) + et[13:19]\n elif et.find('AM') != -1 and hour == 12:\n dateString = et[:10]\n hour = 0\n et = dateString + ' ' + '0' + str(hour) + et[13:19]\n else:\n et = et[:19]\n return et\n\n\ndef getNYtimenow():\n tz = pytz.timezone('America/New_York')\n time = str(datetime.now(tz))[:19]\n return time\n\n\ndef datetimeStringToObject(timeString):\n \"\"\"convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object\"\"\"\n try:\n year = int(timeString[:4])\n month = int(timeString[5:7])\n day = int(timeString[8:10])\n hour = int(timeString[11:13])\n minute = int(timeString[14:16])\n result = datetime(year, month, day, hour, minute)\n return result\n except:\n return None\n\n\n<mask token>\n\n\ndef notSignedIn(vID):\n \"\"\"Return true is the drivers did not enter vehicle ID, \n return False if the drivers have entered the vehicle ID\"\"\"\n if str(vID) == '0':\n return True\n return False\n\n\ndef resetEstComp(cur, vID):\n \"\"\"estimate completion time goes to 0\"\"\"\n cur.execute('UPDATE OpenTasks SET estComplete = null WHERE vID = ? ', [vID]\n )\n\n\ndef getNextFixOrderNum(cur, vID):\n \"\"\"return the integer which is one larger than the order number of the last fixed task\"\"\"\n orderNum = execute_query(cur,\n 'SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1', [vID])[\n 0][0]\n orderNum = int(orderNum) + 1\n return orderNum\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef execute_query(cur, query, args=()):\n cur = cur.execute(query, args)\n rows = cur.fetchall()\n return rows\n\n\ndef convertTime(et):\n \"\"\"'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' \"\"\"\n hour = int(et[11:13])\n if et.find('PM') != -1 and hour != 12:\n dateString = et[:10]\n hour = hour + 12\n et = dateString + ' ' + str(hour) + et[13:19]\n elif et.find('AM') != -1 and hour == 12:\n dateString = et[:10]\n hour = 0\n et = dateString + ' ' + '0' + str(hour) + et[13:19]\n else:\n et = et[:19]\n return et\n\n\ndef getNYtimenow():\n tz = pytz.timezone('America/New_York')\n time = str(datetime.now(tz))[:19]\n return time\n\n\ndef datetimeStringToObject(timeString):\n \"\"\"convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object\"\"\"\n try:\n year = int(timeString[:4])\n month = int(timeString[5:7])\n day = int(timeString[8:10])\n hour = int(timeString[11:13])\n minute = int(timeString[14:16])\n result = datetime(year, month, day, hour, minute)\n return result\n except:\n return None\n\n\ndef timeStringToObject(timeString):\n \"\"\"convert a string in format hh:mm:ss to a datetime object with current date\"\"\"\n try:\n hour = int(timeString[:2])\n minute = int(timeString[3:5])\n result = datetime.today().replace(hour=hour, minute=minute, second=\n 0, microsecond=0)\n return result\n except:\n return None\n\n\ndef notSignedIn(vID):\n \"\"\"Return true is the drivers did not enter vehicle ID, \n return False if the drivers have entered the vehicle ID\"\"\"\n if str(vID) == '0':\n return True\n return False\n\n\ndef resetEstComp(cur, vID):\n \"\"\"estimate completion time goes to 0\"\"\"\n cur.execute('UPDATE OpenTasks SET estComplete = null WHERE vID = ? ', [vID]\n )\n\n\ndef getNextFixOrderNum(cur, vID):\n \"\"\"return the integer which is one larger than the order number of the last fixed task\"\"\"\n orderNum = execute_query(cur,\n 'SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1', [vID])[\n 0][0]\n orderNum = int(orderNum) + 1\n return orderNum\n\n\ndef getNextOrderNum(cur, vID):\n \"\"\"return the integer which is one larger than the order number of the last task\"\"\"\n orderNum = execute_query(cur,\n 'SELECT Count(*) FROM OpenTasks where vID = ?', [vID])[0][0]\n orderNum = int(orderNum) + 1\n return orderNum\n\n\ndef fixOrderBeforeInsert(cur, vID, orderNum):\n \"\"\"Increment later tasks' order number by 1, orderNum is the order of the inserted task\n should be called before inserting the task \"\"\"\n cur.execute(\n 'UPDATE OpenTasks SET orderNum = orderNum + 1 WHERE vID = ? and orderNum >= ?'\n , [vID, orderNum])\n",
"step-4": "import csv\nimport sqlite3\nimport time\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport pytz\nimport json\nimport urllib\nimport numpy as np\nDATABASE = '/var/www/html/citibikeapp/citibikeapp/citibike_change.db'\n\n\ndef execute_query(cur, query, args=()):\n cur = cur.execute(query, args)\n rows = cur.fetchall()\n return rows\n\n\ndef convertTime(et):\n \"\"\"'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' \"\"\"\n hour = int(et[11:13])\n if et.find('PM') != -1 and hour != 12:\n dateString = et[:10]\n hour = hour + 12\n et = dateString + ' ' + str(hour) + et[13:19]\n elif et.find('AM') != -1 and hour == 12:\n dateString = et[:10]\n hour = 0\n et = dateString + ' ' + '0' + str(hour) + et[13:19]\n else:\n et = et[:19]\n return et\n\n\ndef getNYtimenow():\n tz = pytz.timezone('America/New_York')\n time = str(datetime.now(tz))[:19]\n return time\n\n\ndef datetimeStringToObject(timeString):\n \"\"\"convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object\"\"\"\n try:\n year = int(timeString[:4])\n month = int(timeString[5:7])\n day = int(timeString[8:10])\n hour = int(timeString[11:13])\n minute = int(timeString[14:16])\n result = datetime(year, month, day, hour, minute)\n return result\n except:\n return None\n\n\ndef timeStringToObject(timeString):\n \"\"\"convert a string in format hh:mm:ss to a datetime object with current date\"\"\"\n try:\n hour = int(timeString[:2])\n minute = int(timeString[3:5])\n result = datetime.today().replace(hour=hour, minute=minute, second=\n 0, microsecond=0)\n return result\n except:\n return None\n\n\ndef notSignedIn(vID):\n \"\"\"Return true is the drivers did not enter vehicle ID, \n return False if the drivers have entered the vehicle ID\"\"\"\n if str(vID) == '0':\n return True\n return False\n\n\ndef resetEstComp(cur, vID):\n \"\"\"estimate completion time goes to 0\"\"\"\n cur.execute('UPDATE OpenTasks SET estComplete = null WHERE vID = ? ', [vID]\n )\n\n\ndef getNextFixOrderNum(cur, vID):\n \"\"\"return the integer which is one larger than the order number of the last fixed task\"\"\"\n orderNum = execute_query(cur,\n 'SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1', [vID])[\n 0][0]\n orderNum = int(orderNum) + 1\n return orderNum\n\n\ndef getNextOrderNum(cur, vID):\n \"\"\"return the integer which is one larger than the order number of the last task\"\"\"\n orderNum = execute_query(cur,\n 'SELECT Count(*) FROM OpenTasks where vID = ?', [vID])[0][0]\n orderNum = int(orderNum) + 1\n return orderNum\n\n\ndef fixOrderBeforeInsert(cur, vID, orderNum):\n \"\"\"Increment later tasks' order number by 1, orderNum is the order of the inserted task\n should be called before inserting the task \"\"\"\n cur.execute(\n 'UPDATE OpenTasks SET orderNum = orderNum + 1 WHERE vID = ? and orderNum >= ?'\n , [vID, orderNum])\n",
"step-5": "import csv\nimport sqlite3\nimport time\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport pytz\nimport json\nimport urllib\nimport numpy as np\n\nDATABASE = '/var/www/html/citibikeapp/citibikeapp/citibike_change.db'\n\ndef execute_query(cur,query, args=()):\n cur = cur.execute(query, args)\n rows = cur.fetchall()\n # cur.close()\n return rows\n\n\ndef convertTime(et):\n \"\"\"'2017-06-01 11:41:53 AM' to '2017-06-01 11:41:53' \"\"\" \n hour = int(et[11:13])\n if et.find('PM') != -1 and hour != 12:\n dateString = et[:10]\n hour = hour + 12\n et = dateString + ' ' + str(hour) + et[13:19]\n elif et.find('AM') != -1 and hour == 12:\n dateString = et[:10]\n hour = 0\n et = dateString + ' ' + '0'+str(hour) + et[13:19]\n else:\n et = et[:19]\n\n return et\n\n\ndef getNYtimenow():\n tz = pytz.timezone('America/New_York')\n time = str(datetime.now(tz))[:19]\n return time\n\ndef datetimeStringToObject(timeString):\n \"\"\"convert a string in format YYYY-MM-DD hh:mm:ss to a datetime object\"\"\"\n try:\n year = int(timeString[:4])\n month = int(timeString[5:7])\n day = int(timeString[8:10])\n hour = int(timeString[11:13])\n minute = int(timeString[14:16])\n result = datetime(year, month, day, hour, minute)\n return result\n except:\n return None\n\ndef timeStringToObject(timeString):\n \"\"\"convert a string in format hh:mm:ss to a datetime object with current date\"\"\"\n try:\n # year = datetime.now().year\n # month = datetime.now().month\n # day = datetime.now().day\n hour = int(timeString[:2])\n minute = int(timeString[3:5])\n result = datetime.today().replace(hour=hour, minute=minute, second=0, microsecond=0)\n return result\n except:\n return None\n\ndef notSignedIn(vID):\n \"\"\"Return true is the drivers did not enter vehicle ID, \n return False if the drivers have entered the vehicle ID\"\"\"\n if str(vID) == '0':\n return True\n return False\n\n\ndef resetEstComp(cur, vID):\n \"\"\"estimate completion time goes to 0\"\"\" \n cur.execute(\"\"\"UPDATE OpenTasks SET estComplete = null WHERE vID = ? \"\"\",[vID])\n\ndef getNextFixOrderNum(cur,vID):\n \"\"\"return the integer which is one larger than the order number of the last fixed task\"\"\"\n orderNum = execute_query(cur, \"\"\"SELECT Count(*) FROM OpenTasks where vID = ? and fixTask = 1\"\"\", [vID])[0][0]\n orderNum = int(orderNum) + 1\n return orderNum\n\ndef getNextOrderNum(cur,vID):\n \"\"\"return the integer which is one larger than the order number of the last task\"\"\"\n orderNum = execute_query(cur,\"\"\"SELECT Count(*) FROM OpenTasks where vID = ?\"\"\", [vID])[0][0]\n orderNum = int(orderNum) + 1\n return orderNum\n\ndef fixOrderBeforeInsert(cur,vID,orderNum):\n \"\"\"Increment later tasks' order number by 1, orderNum is the order of the inserted task\n should be called before inserting the task \"\"\"\n cur.execute(\"\"\"UPDATE OpenTasks SET orderNum = orderNum + 1 WHERE vID = ? and orderNum >= ?\"\"\",[vID, orderNum])",
"step-ids": [
4,
7,
10,
12,
13
]
}
|
[
4,
7,
10,
12,
13
] |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import config
import os
db = SQLAlchemy()
static_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'static')
def create_app(config_name):
app = Flask(__name__, static_folder=static_file_dir)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
db.init_app(app)
return app
|
normal
|
{
"blob_id": "bee6ba1db608c1d9c8114f89d4b3abab795a6b86",
"index": 3843,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_app(config_name):\n app = Flask(__name__, static_folder=static_file_dir)\n app.config.from_object(config[config_name])\n config[config_name].init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n db.init_app(app)\n return app\n",
"step-3": "<mask token>\ndb = SQLAlchemy()\nstatic_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'static')\n\n\ndef create_app(config_name):\n app = Flask(__name__, static_folder=static_file_dir)\n app.config.from_object(config[config_name])\n config[config_name].init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n db.init_app(app)\n return app\n",
"step-4": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom config import config\nimport os\ndb = SQLAlchemy()\nstatic_file_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'static')\n\n\ndef create_app(config_name):\n app = Flask(__name__, static_folder=static_file_dir)\n app.config.from_object(config[config_name])\n config[config_name].init_app(app)\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n db.init_app(app)\n return app\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.1.7 on 2021-03-24 14:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Products_Table',
fields=[
('product_id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),
('product_name', models.CharField(max_length=50)),
('product_details', models.TextField()),
('product_price', models.IntegerField()),
('product_release_date', models.DateTimeField()),
],
),
]
|
normal
|
{
"blob_id": "90b9dcd2dfc28446d1979d58ed49a12a85ce5b98",
"index": 7429,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Products_Table', fields=[(\n 'product_id', models.IntegerField(auto_created=True, primary_key=\n True, serialize=False)), ('product_name', models.CharField(\n max_length=50)), ('product_details', models.TextField()), (\n 'product_price', models.IntegerField()), ('product_release_date',\n models.DateTimeField())])]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Products_Table', fields=[(\n 'product_id', models.IntegerField(auto_created=True, primary_key=\n True, serialize=False)), ('product_name', models.CharField(\n max_length=50)), ('product_details', models.TextField()), (\n 'product_price', models.IntegerField()), ('product_release_date',\n models.DateTimeField())])]\n",
"step-5": "# Generated by Django 3.1.7 on 2021-03-24 14:51\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Products_Table',\n fields=[\n ('product_id', models.IntegerField(auto_created=True, primary_key=True, serialize=False)),\n ('product_name', models.CharField(max_length=50)),\n ('product_details', models.TextField()),\n ('product_price', models.IntegerField()),\n ('product_release_date', models.DateTimeField()),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 18 13:39:05 2017
@author: jaredhaeme15
"""
import cv2
import numpy as np
from collections import deque
import imutils
import misc_image_tools
frameFileName = r"H:\Summer Research 2017\Whirligig Beetle pictures and videos\large1.mp4"
cap = cv2.VideoCapture(r"H:\Summer Research 2017\Whirligig Beetle pictures and videos\large1.mp4")
while(1):
successFlag, frame = cap.read()
if not successFlag:
cv2.waitKey(0)
break
lower_hsv_thresholdcr = np.array([0,250,250])
upper_hsv_thresholdcr = np.array([10,255,255])
gray = np.float32(cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY))
dst = cv2.cornerHarris(gray,2,3,0.04)
#result is dilated for marking the corners, not important
dst = cv2.dilate(dst,None)
frameWithRedCorners = np.copy(frame)
# Threshold for an optimal value, it may vary depending on the image.
frameWithRedCorners[dst>0.005*dst.max()]=[0,0,255]
hsv = cv2.cvtColor(frameWithRedCorners, cv2.COLOR_BGR2HSV)
#construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
crmask = cv2.inRange(hsv, lower_hsv_thresholdcr, upper_hsv_thresholdcr)
cntscr = cv2.findContours(crmask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
cv2.imshow("Frame", frameWithRedCorners)
k = cv2.waitKey(10000) & 0xFF
if k == 27: # esc key
break
cv2.destroyAllWindows()
cap.release()
|
normal
|
{
"blob_id": "5ccfad17ede9f685ea9ef9c514c0108a61c2dfd6",
"index": 8699,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile 1:\n successFlag, frame = cap.read()\n if not successFlag:\n cv2.waitKey(0)\n break\n lower_hsv_thresholdcr = np.array([0, 250, 250])\n upper_hsv_thresholdcr = np.array([10, 255, 255])\n gray = np.float32(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))\n dst = cv2.cornerHarris(gray, 2, 3, 0.04)\n dst = cv2.dilate(dst, None)\n frameWithRedCorners = np.copy(frame)\n frameWithRedCorners[dst > 0.005 * dst.max()] = [0, 0, 255]\n hsv = cv2.cvtColor(frameWithRedCorners, cv2.COLOR_BGR2HSV)\n crmask = cv2.inRange(hsv, lower_hsv_thresholdcr, upper_hsv_thresholdcr)\n cntscr = cv2.findContours(crmask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n cv2.imshow('Frame', frameWithRedCorners)\n k = cv2.waitKey(10000) & 255\n if k == 27:\n break\ncv2.destroyAllWindows()\ncap.release()\n",
"step-3": "<mask token>\nframeFileName = (\n 'H:\\\\Summer Research 2017\\\\Whirligig Beetle pictures and videos\\\\large1.mp4'\n )\ncap = cv2.VideoCapture(\n 'H:\\\\Summer Research 2017\\\\Whirligig Beetle pictures and videos\\\\large1.mp4'\n )\nwhile 1:\n successFlag, frame = cap.read()\n if not successFlag:\n cv2.waitKey(0)\n break\n lower_hsv_thresholdcr = np.array([0, 250, 250])\n upper_hsv_thresholdcr = np.array([10, 255, 255])\n gray = np.float32(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))\n dst = cv2.cornerHarris(gray, 2, 3, 0.04)\n dst = cv2.dilate(dst, None)\n frameWithRedCorners = np.copy(frame)\n frameWithRedCorners[dst > 0.005 * dst.max()] = [0, 0, 255]\n hsv = cv2.cvtColor(frameWithRedCorners, cv2.COLOR_BGR2HSV)\n crmask = cv2.inRange(hsv, lower_hsv_thresholdcr, upper_hsv_thresholdcr)\n cntscr = cv2.findContours(crmask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n cv2.imshow('Frame', frameWithRedCorners)\n k = cv2.waitKey(10000) & 255\n if k == 27:\n break\ncv2.destroyAllWindows()\ncap.release()\n",
"step-4": "<mask token>\nimport cv2\nimport numpy as np\nfrom collections import deque\nimport imutils\nimport misc_image_tools\nframeFileName = (\n 'H:\\\\Summer Research 2017\\\\Whirligig Beetle pictures and videos\\\\large1.mp4'\n )\ncap = cv2.VideoCapture(\n 'H:\\\\Summer Research 2017\\\\Whirligig Beetle pictures and videos\\\\large1.mp4'\n )\nwhile 1:\n successFlag, frame = cap.read()\n if not successFlag:\n cv2.waitKey(0)\n break\n lower_hsv_thresholdcr = np.array([0, 250, 250])\n upper_hsv_thresholdcr = np.array([10, 255, 255])\n gray = np.float32(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))\n dst = cv2.cornerHarris(gray, 2, 3, 0.04)\n dst = cv2.dilate(dst, None)\n frameWithRedCorners = np.copy(frame)\n frameWithRedCorners[dst > 0.005 * dst.max()] = [0, 0, 255]\n hsv = cv2.cvtColor(frameWithRedCorners, cv2.COLOR_BGR2HSV)\n crmask = cv2.inRange(hsv, lower_hsv_thresholdcr, upper_hsv_thresholdcr)\n cntscr = cv2.findContours(crmask.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)[-2]\n cv2.imshow('Frame', frameWithRedCorners)\n k = cv2.waitKey(10000) & 255\n if k == 27:\n break\ncv2.destroyAllWindows()\ncap.release()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 18 13:39:05 2017\n\n@author: jaredhaeme15\n\"\"\"\n\n\nimport cv2\nimport numpy as np\nfrom collections import deque\nimport imutils\nimport misc_image_tools \n\nframeFileName = r\"H:\\Summer Research 2017\\Whirligig Beetle pictures and videos\\large1.mp4\"\ncap = cv2.VideoCapture(r\"H:\\Summer Research 2017\\Whirligig Beetle pictures and videos\\large1.mp4\")\n \nwhile(1): \n \n successFlag, frame = cap.read()\n if not successFlag:\n cv2.waitKey(0)\n break \n lower_hsv_thresholdcr = np.array([0,250,250])\n upper_hsv_thresholdcr = np.array([10,255,255])\n gray = np.float32(cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY))\n dst = cv2.cornerHarris(gray,2,3,0.04)\n #result is dilated for marking the corners, not important\n dst = cv2.dilate(dst,None)\n frameWithRedCorners = np.copy(frame)\n # Threshold for an optimal value, it may vary depending on the image.\n frameWithRedCorners[dst>0.005*dst.max()]=[0,0,255]\n hsv = cv2.cvtColor(frameWithRedCorners, cv2.COLOR_BGR2HSV)\n #construct a mask for the color \"green\", then perform\n # a series of dilations and erosions to remove any small\n # blobs left in the mask\n crmask = cv2.inRange(hsv, lower_hsv_thresholdcr, upper_hsv_thresholdcr)\n cntscr = cv2.findContours(crmask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n cv2.imshow(\"Frame\", frameWithRedCorners)\n k = cv2.waitKey(10000) & 0xFF\n if k == 27: # esc key\n break\ncv2.destroyAllWindows()\ncap.release()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# put your python code here
a = int(input())
b = int(input())
# and
i = 1
if a == b:
print(a)
else:
while True:
if i // a > 0 and i % a == 0 and i // b > 0 and i % b == 0:
print(i)
break
else:
i += 1
|
normal
|
{
"blob_id": "af5ebdcd818fdf9c607240733b7b5dbb793cf55e",
"index": 7328,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif a == b:\n print(a)\nelse:\n while True:\n if i // a > 0 and i % a == 0 and i // b > 0 and i % b == 0:\n print(i)\n break\n else:\n i += 1\n",
"step-3": "a = int(input())\nb = int(input())\ni = 1\nif a == b:\n print(a)\nelse:\n while True:\n if i // a > 0 and i % a == 0 and i // b > 0 and i % b == 0:\n print(i)\n break\n else:\n i += 1\n",
"step-4": "# put your python code here\na = int(input())\nb = int(input())\n\n# and\ni = 1\nif a == b:\n print(a)\nelse:\n while True:\n if i // a > 0 and i % a == 0 and i // b > 0 and i % b == 0:\n print(i)\n break\n else:\n i += 1\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'KEY.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_KEY(object):
def setupUi(self, KEY):
KEY.setObjectName("KEY")
KEY.resize(419, 106)
self.Key1 = QtWidgets.QLineEdit(KEY)
self.Key1.setGeometry(QtCore.QRect(76, 20, 241, 31))
self.Key1.setText("")
self.Key1.setObjectName("Key1")
self.Key2 = QtWidgets.QLineEdit(KEY)
self.Key2.setGeometry(QtCore.QRect(76, 60, 241, 31))
self.Key2.setObjectName("Key2")
self.layoutWidget = QtWidgets.QWidget(KEY)
self.layoutWidget.setGeometry(QtCore.QRect(16, 20, 50, 71))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.layoutWidget)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.label_2 = QtWidgets.QLabel(self.layoutWidget)
self.label_2.setObjectName("label_2")
self.verticalLayout.addWidget(self.label_2)
self.enter = QtWidgets.QPushButton(KEY)
self.enter.setGeometry(QtCore.QRect(330, 20, 71, 31))
self.enter.setObjectName("enter")
self.quxiao = QtWidgets.QPushButton(KEY)
self.quxiao.setGeometry(QtCore.QRect(330, 60, 71, 31))
self.quxiao.setObjectName("quxiao")
self.retranslateUi(KEY)
self.quxiao.clicked.connect(KEY.close)
QtCore.QMetaObject.connectSlotsByName(KEY)
def retranslateUi(self, KEY):
_translate = QtCore.QCoreApplication.translate
KEY.setWindowTitle(_translate("KEY", "KEY"))
self.label.setText(_translate("KEY", "Keys 1"))
self.label_2.setText(_translate("KEY", "Keys 2"))
self.enter.setText(_translate("KEY", "确定"))
self.quxiao.setText(_translate("KEY", "取消"))
|
normal
|
{
"blob_id": "1dab0084666588f61d0f9f95f88f06ed9d884e5b",
"index": 3892,
"step-1": "<mask token>\n\n\nclass Ui_KEY(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_KEY(object):\n\n def setupUi(self, KEY):\n KEY.setObjectName('KEY')\n KEY.resize(419, 106)\n self.Key1 = QtWidgets.QLineEdit(KEY)\n self.Key1.setGeometry(QtCore.QRect(76, 20, 241, 31))\n self.Key1.setText('')\n self.Key1.setObjectName('Key1')\n self.Key2 = QtWidgets.QLineEdit(KEY)\n self.Key2.setGeometry(QtCore.QRect(76, 60, 241, 31))\n self.Key2.setObjectName('Key2')\n self.layoutWidget = QtWidgets.QWidget(KEY)\n self.layoutWidget.setGeometry(QtCore.QRect(16, 20, 50, 71))\n self.layoutWidget.setObjectName('layoutWidget')\n self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName('verticalLayout')\n self.label = QtWidgets.QLabel(self.layoutWidget)\n self.label.setObjectName('label')\n self.verticalLayout.addWidget(self.label)\n self.label_2 = QtWidgets.QLabel(self.layoutWidget)\n self.label_2.setObjectName('label_2')\n self.verticalLayout.addWidget(self.label_2)\n self.enter = QtWidgets.QPushButton(KEY)\n self.enter.setGeometry(QtCore.QRect(330, 20, 71, 31))\n self.enter.setObjectName('enter')\n self.quxiao = QtWidgets.QPushButton(KEY)\n self.quxiao.setGeometry(QtCore.QRect(330, 60, 71, 31))\n self.quxiao.setObjectName('quxiao')\n self.retranslateUi(KEY)\n self.quxiao.clicked.connect(KEY.close)\n QtCore.QMetaObject.connectSlotsByName(KEY)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Ui_KEY(object):\n\n def setupUi(self, KEY):\n KEY.setObjectName('KEY')\n KEY.resize(419, 106)\n self.Key1 = QtWidgets.QLineEdit(KEY)\n self.Key1.setGeometry(QtCore.QRect(76, 20, 241, 31))\n self.Key1.setText('')\n self.Key1.setObjectName('Key1')\n self.Key2 = QtWidgets.QLineEdit(KEY)\n self.Key2.setGeometry(QtCore.QRect(76, 60, 241, 31))\n self.Key2.setObjectName('Key2')\n self.layoutWidget = QtWidgets.QWidget(KEY)\n self.layoutWidget.setGeometry(QtCore.QRect(16, 20, 50, 71))\n self.layoutWidget.setObjectName('layoutWidget')\n self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName('verticalLayout')\n self.label = QtWidgets.QLabel(self.layoutWidget)\n self.label.setObjectName('label')\n self.verticalLayout.addWidget(self.label)\n self.label_2 = QtWidgets.QLabel(self.layoutWidget)\n self.label_2.setObjectName('label_2')\n self.verticalLayout.addWidget(self.label_2)\n self.enter = QtWidgets.QPushButton(KEY)\n self.enter.setGeometry(QtCore.QRect(330, 20, 71, 31))\n self.enter.setObjectName('enter')\n self.quxiao = QtWidgets.QPushButton(KEY)\n self.quxiao.setGeometry(QtCore.QRect(330, 60, 71, 31))\n self.quxiao.setObjectName('quxiao')\n self.retranslateUi(KEY)\n self.quxiao.clicked.connect(KEY.close)\n QtCore.QMetaObject.connectSlotsByName(KEY)\n\n def retranslateUi(self, KEY):\n _translate = QtCore.QCoreApplication.translate\n KEY.setWindowTitle(_translate('KEY', 'KEY'))\n self.label.setText(_translate('KEY', 'Keys 1'))\n self.label_2.setText(_translate('KEY', 'Keys 2'))\n self.enter.setText(_translate('KEY', '确定'))\n self.quxiao.setText(_translate('KEY', '取消'))\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_KEY(object):\n\n def setupUi(self, KEY):\n KEY.setObjectName('KEY')\n KEY.resize(419, 106)\n self.Key1 = QtWidgets.QLineEdit(KEY)\n self.Key1.setGeometry(QtCore.QRect(76, 20, 241, 31))\n self.Key1.setText('')\n self.Key1.setObjectName('Key1')\n self.Key2 = QtWidgets.QLineEdit(KEY)\n self.Key2.setGeometry(QtCore.QRect(76, 60, 241, 31))\n self.Key2.setObjectName('Key2')\n self.layoutWidget = QtWidgets.QWidget(KEY)\n self.layoutWidget.setGeometry(QtCore.QRect(16, 20, 50, 71))\n self.layoutWidget.setObjectName('layoutWidget')\n self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName('verticalLayout')\n self.label = QtWidgets.QLabel(self.layoutWidget)\n self.label.setObjectName('label')\n self.verticalLayout.addWidget(self.label)\n self.label_2 = QtWidgets.QLabel(self.layoutWidget)\n self.label_2.setObjectName('label_2')\n self.verticalLayout.addWidget(self.label_2)\n self.enter = QtWidgets.QPushButton(KEY)\n self.enter.setGeometry(QtCore.QRect(330, 20, 71, 31))\n self.enter.setObjectName('enter')\n self.quxiao = QtWidgets.QPushButton(KEY)\n self.quxiao.setGeometry(QtCore.QRect(330, 60, 71, 31))\n self.quxiao.setObjectName('quxiao')\n self.retranslateUi(KEY)\n self.quxiao.clicked.connect(KEY.close)\n QtCore.QMetaObject.connectSlotsByName(KEY)\n\n def retranslateUi(self, KEY):\n _translate = QtCore.QCoreApplication.translate\n KEY.setWindowTitle(_translate('KEY', 'KEY'))\n self.label.setText(_translate('KEY', 'Keys 1'))\n self.label_2.setText(_translate('KEY', 'Keys 2'))\n self.enter.setText(_translate('KEY', '确定'))\n self.quxiao.setText(_translate('KEY', '取消'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'KEY.ui'\n#\n# Created by: PyQt5 UI code generator 5.11.3\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_KEY(object):\n def setupUi(self, KEY):\n KEY.setObjectName(\"KEY\")\n KEY.resize(419, 106)\n self.Key1 = QtWidgets.QLineEdit(KEY)\n self.Key1.setGeometry(QtCore.QRect(76, 20, 241, 31))\n self.Key1.setText(\"\")\n self.Key1.setObjectName(\"Key1\")\n self.Key2 = QtWidgets.QLineEdit(KEY)\n self.Key2.setGeometry(QtCore.QRect(76, 60, 241, 31))\n self.Key2.setObjectName(\"Key2\")\n self.layoutWidget = QtWidgets.QWidget(KEY)\n self.layoutWidget.setGeometry(QtCore.QRect(16, 20, 50, 71))\n self.layoutWidget.setObjectName(\"layoutWidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.label = QtWidgets.QLabel(self.layoutWidget)\n self.label.setObjectName(\"label\")\n self.verticalLayout.addWidget(self.label)\n self.label_2 = QtWidgets.QLabel(self.layoutWidget)\n self.label_2.setObjectName(\"label_2\")\n self.verticalLayout.addWidget(self.label_2)\n self.enter = QtWidgets.QPushButton(KEY)\n self.enter.setGeometry(QtCore.QRect(330, 20, 71, 31))\n self.enter.setObjectName(\"enter\")\n self.quxiao = QtWidgets.QPushButton(KEY)\n self.quxiao.setGeometry(QtCore.QRect(330, 60, 71, 31))\n self.quxiao.setObjectName(\"quxiao\")\n\n self.retranslateUi(KEY)\n self.quxiao.clicked.connect(KEY.close)\n QtCore.QMetaObject.connectSlotsByName(KEY)\n\n def retranslateUi(self, KEY):\n _translate = QtCore.QCoreApplication.translate\n KEY.setWindowTitle(_translate(\"KEY\", \"KEY\"))\n self.label.setText(_translate(\"KEY\", \"Keys 1\"))\n self.label_2.setText(_translate(\"KEY\", \"Keys 2\"))\n self.enter.setText(_translate(\"KEY\", \"确定\"))\n self.quxiao.setText(_translate(\"KEY\", \"取消\"))\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import time as t
class Record:
def __init__(self, value=10, name='name'):
self.id = name
self.value = value
def __get__(self, instance, owner):
with open('record.txt', 'a') as f:
msg = '读取变量%s ' % self.id
tmp = t.localtime()[:6]
form = ['年', '月', '日 ', ':', ':', '']
for i in range(6):
msg = msg + str(tmp[i]) + form[i]
f.write('\n')
f.write(msg)
return self.value
def __set__(self, instance, value):
with open('record.txt', 'a') as f:
msg = '更改变量%s为%s ' % (self.id, str(value))
tmp = t.localtime()[:6]
form = ['年', '月', '日 ', ':', ':', '']
for i in range(6):
msg = msg + str(tmp[i]) + form[i]
f.write('\n')
f.write(msg)
self.value = value
|
normal
|
{
"blob_id": "3e1540a06c478d471f6e6a190cadc44d5c4c2467",
"index": 665,
"step-1": "<mask token>\n\n\nclass Record:\n\n def __init__(self, value=10, name='name'):\n self.id = name\n self.value = value\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Record:\n\n def __init__(self, value=10, name='name'):\n self.id = name\n self.value = value\n <mask token>\n\n def __set__(self, instance, value):\n with open('record.txt', 'a') as f:\n msg = '更改变量%s为%s ' % (self.id, str(value))\n tmp = t.localtime()[:6]\n form = ['年', '月', '日 ', ':', ':', '']\n for i in range(6):\n msg = msg + str(tmp[i]) + form[i]\n f.write('\\n')\n f.write(msg)\n self.value = value\n",
"step-3": "<mask token>\n\n\nclass Record:\n\n def __init__(self, value=10, name='name'):\n self.id = name\n self.value = value\n\n def __get__(self, instance, owner):\n with open('record.txt', 'a') as f:\n msg = '读取变量%s ' % self.id\n tmp = t.localtime()[:6]\n form = ['年', '月', '日 ', ':', ':', '']\n for i in range(6):\n msg = msg + str(tmp[i]) + form[i]\n f.write('\\n')\n f.write(msg)\n return self.value\n\n def __set__(self, instance, value):\n with open('record.txt', 'a') as f:\n msg = '更改变量%s为%s ' % (self.id, str(value))\n tmp = t.localtime()[:6]\n form = ['年', '月', '日 ', ':', ':', '']\n for i in range(6):\n msg = msg + str(tmp[i]) + form[i]\n f.write('\\n')\n f.write(msg)\n self.value = value\n",
"step-4": "import time as t\n\n\nclass Record:\n\n def __init__(self, value=10, name='name'):\n self.id = name\n self.value = value\n\n def __get__(self, instance, owner):\n with open('record.txt', 'a') as f:\n msg = '读取变量%s ' % self.id\n tmp = t.localtime()[:6]\n form = ['年', '月', '日 ', ':', ':', '']\n for i in range(6):\n msg = msg + str(tmp[i]) + form[i]\n f.write('\\n')\n f.write(msg)\n return self.value\n\n def __set__(self, instance, value):\n with open('record.txt', 'a') as f:\n msg = '更改变量%s为%s ' % (self.id, str(value))\n tmp = t.localtime()[:6]\n form = ['年', '月', '日 ', ':', ':', '']\n for i in range(6):\n msg = msg + str(tmp[i]) + form[i]\n f.write('\\n')\n f.write(msg)\n self.value = value\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
#!/usr/bin/python
#encoding=utf-8
import os, sys
rules = {
'E': ['A'],
'A': ['A+M', 'M'],
'M': ['M*P', 'P'],
'P': ['(E)', 'N'],
'N': [str(i) for i in range(10)],
}
#st为要扫描的字符串
#target为终止状态,即最后的可接受状态
def back(st, target):
reduced_sets = set()
#cur为当前规约后的字符串,hist为记录的规约规则
def _back(cur, hist, idx):
print "----Enter _back----\n"
print "cur:%s\n" % cur
print "hist:%s\n" % hist
print "idx:%d\n" % idx
ans = []
if cur in reduced_sets:
return []
reduced_sets.add(cur)
hist.append(cur)
if cur == target:
ans.append(hist)
#遍历字符串中每个字符,当根据sub[i:j+1]获取栈中字符串在规约规则中未找到满足条件的规则则递增i,根据子串再次查找规约规则
for i in range(len(cur)):
#从位置j向后扫描token,因为例子中的token都是单字符,所以此处实际省略了词法分析获取token的过程。idx即代表当前栈中的最后一个token的索引。如果当轮查找未找到可以规约的规则则不会递归进入_back函数,递增j,相当于向前看一个token,继续查找规则。如果j遍历到结尾也没有查找到规则则递增i,使用栈中字符串后缀继续查找
for j in range(max(i, idx), len(cur)):
#读取已规约字符串,相当于栈中存在的字符串,j+1位置为lookahead token,sub获取的字符串不包含j+1指向的字符
sub = cur[i:j+1]
print "sub:%s\n" % sub
#遍历每条规则,根据栈中token字符串查找移动或者规约规则
for r in rules:
print "r:%s\n" % r
#查找用于规约的规则,rr为规约规则
for rr in rules[r]:
print "rules[r]:%s rr:%s\n" % (rules[r], rr)
work = False
if i == 0:
#栈中字符串为规约规则的后缀,则shift
work = (work or rr[-(j-i+1):] == sub)
if work:
print "%d|%d|%s|%s|rr[-(j-i+1):] == sub\n" % (i, j, rr, sub)
if j == len(cur) - 1:
#当前栈中已规约字符串是rr规约规则的前缀,则可以进行规约
work = (work or rr[:(j-i+1)] == sub)
if work:
print "%d|%d|%s|%s|rr[:(j-i+1)] == sub\n" % (i, j, rr, sub)
#整个栈中的字符串被包含在某条规约规则中,相当于是一个完整语句中的中间片段,没有头部和尾部,只有整个字符串扫描完毕这种情况才成立,如果字符串还未扫描完,则包含情况不能规约,只有是后缀时才能规约。
if i == 0 and j == len(cur) - 1:
#当前规约规则包含栈中
work = (work or (sub in rr))
if work:
print "%d|%d|%s|%s|sub in rr\n" % (i,j, rr, sub)
#规约规则右边字符串等于栈中字符串,可以规约
work = (work or (sub == rr))
if work:
#cur中需要按照规约规则把规约后字符替换规则右边字符,第三个参数i+len(r)-1决定了_back函数中内层迭代j的起始值。i为当前栈中的token个数,len(r)为规约后的token的长度,i+len(r)即为当前栈中token数目,i+len(r)-1为栈中索引
print "%d|%d|%s|%s|sub == rr\n" % (i,j, rr, sub)
vv = _back(
cur[:i] + r + cur[j+1:]
, hist + ['(%d, %d) %s => %s\n' % (i, j, r,
rr)], i + len(r) -1)
ans += vv
print "\n"
return ans
return _back(st, [], 0)
#1+1) is valid, it can be reduced to E
# 1+1) (0, 0) N => 1
# N+1) (0, 0) P => N
# P+1) (0, 0) M => M*P
# M+1) (0, 0) A => A+M
# A+1) (2, 2) N => 1
# A+N) (2, 2) P => N
# A+P) (2, 2) M => P
# A+M) (0, 2) A => A+M
# A) (0, 0) E => A
# E) (0, 1) P => (E)
# P (0, 0) M => M*P
# M (0, 0) A => A+M
# A (0, 0) E => A
# E
if __name__ == '__main__':
if len(sys.argv) < 2:
print "examples: "
print " %s '1+1)'" % sys.argv[0]
sys.exit()
s = sys.argv[1]
vv = back(s, 'E')
if vv:
print s + ' is valid, it can be reduced to E'
for i in vv:
print '\t' + '\t'.join(map(str, i))
else:
print s + ' is invalid, and cannot be reduced to E'
|
normal
|
{
"blob_id": "93953f025fed2bcabf29433591689c0a7adf9569",
"index": 8757,
"step-1": "#!/usr/bin/python\n#encoding=utf-8\n\nimport os, sys\n\nrules = {\n 'E': ['A'],\n 'A': ['A+M', 'M'],\n 'M': ['M*P', 'P'],\n 'P': ['(E)', 'N'],\n 'N': [str(i) for i in range(10)],\n}\n\n#st为要扫描的字符串\n#target为终止状态,即最后的可接受状态\ndef back(st, target):\n reduced_sets = set()\n #cur为当前规约后的字符串,hist为记录的规约规则\n def _back(cur, hist, idx):\n print \"----Enter _back----\\n\"\n print \"cur:%s\\n\" % cur\n print \"hist:%s\\n\" % hist\n print \"idx:%d\\n\" % idx\n ans = []\n if cur in reduced_sets:\n return []\n reduced_sets.add(cur)\n hist.append(cur)\n if cur == target:\n ans.append(hist)\n #遍历字符串中每个字符,当根据sub[i:j+1]获取栈中字符串在规约规则中未找到满足条件的规则则递增i,根据子串再次查找规约规则\n for i in range(len(cur)):\n #从位置j向后扫描token,因为例子中的token都是单字符,所以此处实际省略了词法分析获取token的过程。idx即代表当前栈中的最后一个token的索引。如果当轮查找未找到可以规约的规则则不会递归进入_back函数,递增j,相当于向前看一个token,继续查找规则。如果j遍历到结尾也没有查找到规则则递增i,使用栈中字符串后缀继续查找\n for j in range(max(i, idx), len(cur)):\n #读取已规约字符串,相当于栈中存在的字符串,j+1位置为lookahead token,sub获取的字符串不包含j+1指向的字符\n sub = cur[i:j+1]\n print \"sub:%s\\n\" % sub\n #遍历每条规则,根据栈中token字符串查找移动或者规约规则\n for r in rules:\n print \"r:%s\\n\" % r\n #查找用于规约的规则,rr为规约规则\n for rr in rules[r]:\n print \"rules[r]:%s rr:%s\\n\" % (rules[r], rr)\n work = False\n if i == 0:\n #栈中字符串为规约规则的后缀,则shift\n work = (work or rr[-(j-i+1):] == sub)\n if work:\n print \"%d|%d|%s|%s|rr[-(j-i+1):] == sub\\n\" % (i, j, rr, sub)\n if j == len(cur) - 1:\n #当前栈中已规约字符串是rr规约规则的前缀,则可以进行规约\n work = (work or rr[:(j-i+1)] == sub)\n if work:\n print \"%d|%d|%s|%s|rr[:(j-i+1)] == sub\\n\" % (i, j, rr, sub)\n #整个栈中的字符串被包含在某条规约规则中,相当于是一个完整语句中的中间片段,没有头部和尾部,只有整个字符串扫描完毕这种情况才成立,如果字符串还未扫描完,则包含情况不能规约,只有是后缀时才能规约。\n if i == 0 and j == len(cur) - 1:\n #当前规约规则包含栈中\n work = (work or (sub in rr))\n if work:\n print \"%d|%d|%s|%s|sub in rr\\n\" % (i,j, rr, sub)\n #规约规则右边字符串等于栈中字符串,可以规约\n work = (work or (sub == rr))\n if work:\n #cur中需要按照规约规则把规约后字符替换规则右边字符,第三个参数i+len(r)-1决定了_back函数中内层迭代j的起始值。i为当前栈中的token个数,len(r)为规约后的token的长度,i+len(r)即为当前栈中token数目,i+len(r)-1为栈中索引\n print \"%d|%d|%s|%s|sub == rr\\n\" % (i,j, rr, sub)\n vv = _back(\n cur[:i] + r + cur[j+1:]\n , hist + ['(%d, %d) %s => %s\\n' % (i, j, r,\nrr)], i + len(r) -1)\n ans += vv\n\n print \"\\n\"\n return ans\n return _back(st, [], 0)\n\n\n#1+1) is valid, it can be reduced to E\n#\t1+1)\t(0, 0) N => 1\n#\tN+1)\t(0, 0) P => N\n#\tP+1)\t(0, 0) M => M*P\n#\tM+1)\t(0, 0) A => A+M\n#\tA+1)\t(2, 2) N => 1\n#\tA+N)\t(2, 2) P => N\n#\tA+P)\t(2, 2) M => P\n#\tA+M)\t(0, 2) A => A+M\n#\tA)\t(0, 0) E => A\n#\tE)\t(0, 1) P => (E)\n#\tP\t(0, 0) M => M*P\n#\tM\t(0, 0) A => A+M\n#\tA\t(0, 0) E => A\n#\tE\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print \"examples: \"\n print \" %s '1+1)'\" % sys.argv[0]\n sys.exit()\n s = sys.argv[1]\n vv = back(s, 'E')\n if vv:\n print s + ' is valid, it can be reduced to E'\n for i in vv:\n print '\\t' + '\\t'.join(map(str, i))\n else:\n print s + ' is invalid, and cannot be reduced to E'\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from selenium import webdriver
driver = webdriver.Chrome()
driver.get("http://192.168.1.248:9079/#/")
lanuage = driver.find_element_by_class_name("el-dropdown-trigger-text")
print(type(lanuage))
print(lanuage.text)
try:
driver.find_element_by_class_name("el-dropdown-trigger-text").text =="中文"
print("符合要求")
except EOFError:
print("不是中文")
# driver.find_element_by_link_text("简体中文")
|
normal
|
{
"blob_id": "6a1f58af26bbc4d584ffd699c512ef433ffb80d8",
"index": 7206,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndriver.get('http://192.168.1.248:9079/#/')\n<mask token>\nprint(type(lanuage))\nprint(lanuage.text)\ntry:\n driver.find_element_by_class_name('el-dropdown-trigger-text').text == '中文'\n print('符合要求')\nexcept EOFError:\n print('不是中文')\n",
"step-3": "<mask token>\ndriver = webdriver.Chrome()\ndriver.get('http://192.168.1.248:9079/#/')\nlanuage = driver.find_element_by_class_name('el-dropdown-trigger-text')\nprint(type(lanuage))\nprint(lanuage.text)\ntry:\n driver.find_element_by_class_name('el-dropdown-trigger-text').text == '中文'\n print('符合要求')\nexcept EOFError:\n print('不是中文')\n",
"step-4": "from selenium import webdriver\ndriver = webdriver.Chrome()\ndriver.get('http://192.168.1.248:9079/#/')\nlanuage = driver.find_element_by_class_name('el-dropdown-trigger-text')\nprint(type(lanuage))\nprint(lanuage.text)\ntry:\n driver.find_element_by_class_name('el-dropdown-trigger-text').text == '中文'\n print('符合要求')\nexcept EOFError:\n print('不是中文')\n",
"step-5": "from selenium import webdriver\n\n\ndriver = webdriver.Chrome()\ndriver.get(\"http://192.168.1.248:9079/#/\")\n\n\nlanuage = driver.find_element_by_class_name(\"el-dropdown-trigger-text\")\nprint(type(lanuage))\nprint(lanuage.text)\ntry:\n driver.find_element_by_class_name(\"el-dropdown-trigger-text\").text ==\"中文\"\n print(\"符合要求\")\nexcept EOFError:\n print(\"不是中文\") \n# driver.find_element_by_link_text(\"简体中文\")\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#
# @lc app=leetcode id=267 lang=python3
#
# [267] Palindrome Permutation II
#
# https://leetcode.com/problems/palindrome-permutation-ii/description/
#
# algorithms
# Medium (33.28%)
# Total Accepted: 24.8K
# Total Submissions: 74.4K
# Testcase Example: '"aabb"'
#
# Given a string s, return all the palindromic permutations (without
# duplicates) of it. Return an empty list if no palindromic permutation could
# be form.
#
# Example 1:
#
#
# Input: "aabb"
# Output: ["abba", "baab"]
#
# Example 2:
#
#
# Input: "abc"
# Output: []
#
#
class Solution:
def generatePalindromes(self, s: str) -> List[str]:
|
normal
|
{
"blob_id": "4e538251dedfe0b9ffb68de2de7dc50681320f1f",
"index": 8619,
"step-1": "#\n# @lc app=leetcode id=267 lang=python3\n#\n# [267] Palindrome Permutation II\n#\n# https://leetcode.com/problems/palindrome-permutation-ii/description/\n#\n# algorithms\n# Medium (33.28%)\n# Total Accepted: 24.8K\n# Total Submissions: 74.4K\n# Testcase Example: '\"aabb\"'\n#\n# Given a string s, return all the palindromic permutations (without\n# duplicates) of it. Return an empty list if no palindromic permutation could\n# be form.\n# \n# Example 1:\n# \n# \n# Input: \"aabb\"\n# Output: [\"abba\", \"baab\"]\n# \n# Example 2:\n# \n# \n# Input: \"abc\"\n# Output: []\n# \n#\nclass Solution:\n def generatePalindromes(self, s: str) -> List[str]:\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Generated by Django 3.2.6 on 2021-10-10 17:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('reward', '0002_delete_user'),
]
operations = [
migrations.CreateModel(
name='order',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('created_date', models.DateField(auto_now_add=True)),
('points', models.IntegerField(blank=True, default=0, null=True)),
('green_rating', models.CharField(choices=[('1', 'rating 1'), ('2', 'rating 2'), ('3', 'rating 3'), ('4', 'rating 4'), ('5', 'rating 5')], max_length=200)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
normal
|
{
"blob_id": "8cec6778f530cb06e4f6cb2e6e9b6cb192d20f97",
"index": 3280,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('reward', '0002_delete_user')]\n operations = [migrations.CreateModel(name='order', fields=[('id',\n models.UUIDField(default=uuid.uuid4, editable=False, primary_key=\n True, serialize=False, unique=True)), ('created_date', models.\n DateField(auto_now_add=True)), ('points', models.IntegerField(blank\n =True, default=0, null=True)), ('green_rating', models.CharField(\n choices=[('1', 'rating 1'), ('2', 'rating 2'), ('3', 'rating 3'), (\n '4', 'rating 4'), ('5', 'rating 5')], max_length=200)), ('user',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))])]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('reward', '0002_delete_user')]\n operations = [migrations.CreateModel(name='order', fields=[('id',\n models.UUIDField(default=uuid.uuid4, editable=False, primary_key=\n True, serialize=False, unique=True)), ('created_date', models.\n DateField(auto_now_add=True)), ('points', models.IntegerField(blank\n =True, default=0, null=True)), ('green_rating', models.CharField(\n choices=[('1', 'rating 1'), ('2', 'rating 2'), ('3', 'rating 3'), (\n '4', 'rating 4'), ('5', 'rating 5')], max_length=200)), ('user',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))])]\n",
"step-5": "# Generated by Django 3.2.6 on 2021-10-10 17:17\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('reward', '0002_delete_user'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='order',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),\n ('created_date', models.DateField(auto_now_add=True)),\n ('points', models.IntegerField(blank=True, default=0, null=True)),\n ('green_rating', models.CharField(choices=[('1', 'rating 1'), ('2', 'rating 2'), ('3', 'rating 3'), ('4', 'rating 4'), ('5', 'rating 5')], max_length=200)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
"""
demo_mininet_topo.py
Sample topology class with Mininet.
G = {V, E}
V = {h1, h2, h3, h4, h51, h52, s0, s1, s4, s5}
# of hosts = 6
# of switches = 4
E = {
(h1, s1), (h2, s1), (h3, s1),
(h4, s4),
(h51, s5), (h52, s5),
(s0, s1), (s0, s4), (s5, s4)
}
"""
from mininet.topo import Topo
class DemoTopology(Topo):
def __init__(self):
Topo.__init__(self)
# Add some hosts
h1 = self.h1 = self.addHost('h1')
h2 = self.h2 = self.addHost('h2')
h3 = self.h3 = self.addHost('h3')
h4 = self.h4 = self.addHost('h4')
h51 = self.h51 = self.addHost('h51')
h52 = self.h52 = self.addHost('h52')
# Add switches
s0 = self.s0 = self.addSwitch('s0')
s1 = self.s1 = self.addSwitch('s1')
s4 = self.s4 = self.addSwitch('s4')
s5 = self.s5 = self.addSwitch('s5')
# Link hosts with switches
self.addLink(h1, s1)
self.addLink(h2, s1)
self.addLink(h3, s1)
self.addLink(h4, s4)
self.addLink(h51, s5)
self.addLink(h52, s5)
# Link switches with switches
self.addLink(s0, s1)
self.addLink(s0, s4)
self.addLink(s5, s4)
topos = {
'demo': lambda: DemoTopology()
}
|
normal
|
{
"blob_id": "8c69813bc576a56c25c828fe24e2707e65ac0d0d",
"index": 5628,
"step-1": "<mask token>\n\n\nclass DemoTopology(Topo):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DemoTopology(Topo):\n\n def __init__(self):\n Topo.__init__(self)\n h1 = self.h1 = self.addHost('h1')\n h2 = self.h2 = self.addHost('h2')\n h3 = self.h3 = self.addHost('h3')\n h4 = self.h4 = self.addHost('h4')\n h51 = self.h51 = self.addHost('h51')\n h52 = self.h52 = self.addHost('h52')\n s0 = self.s0 = self.addSwitch('s0')\n s1 = self.s1 = self.addSwitch('s1')\n s4 = self.s4 = self.addSwitch('s4')\n s5 = self.s5 = self.addSwitch('s5')\n self.addLink(h1, s1)\n self.addLink(h2, s1)\n self.addLink(h3, s1)\n self.addLink(h4, s4)\n self.addLink(h51, s5)\n self.addLink(h52, s5)\n self.addLink(s0, s1)\n self.addLink(s0, s4)\n self.addLink(s5, s4)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DemoTopology(Topo):\n\n def __init__(self):\n Topo.__init__(self)\n h1 = self.h1 = self.addHost('h1')\n h2 = self.h2 = self.addHost('h2')\n h3 = self.h3 = self.addHost('h3')\n h4 = self.h4 = self.addHost('h4')\n h51 = self.h51 = self.addHost('h51')\n h52 = self.h52 = self.addHost('h52')\n s0 = self.s0 = self.addSwitch('s0')\n s1 = self.s1 = self.addSwitch('s1')\n s4 = self.s4 = self.addSwitch('s4')\n s5 = self.s5 = self.addSwitch('s5')\n self.addLink(h1, s1)\n self.addLink(h2, s1)\n self.addLink(h3, s1)\n self.addLink(h4, s4)\n self.addLink(h51, s5)\n self.addLink(h52, s5)\n self.addLink(s0, s1)\n self.addLink(s0, s4)\n self.addLink(s5, s4)\n\n\ntopos = {'demo': lambda : DemoTopology()}\n",
"step-4": "<mask token>\nfrom mininet.topo import Topo\n\n\nclass DemoTopology(Topo):\n\n def __init__(self):\n Topo.__init__(self)\n h1 = self.h1 = self.addHost('h1')\n h2 = self.h2 = self.addHost('h2')\n h3 = self.h3 = self.addHost('h3')\n h4 = self.h4 = self.addHost('h4')\n h51 = self.h51 = self.addHost('h51')\n h52 = self.h52 = self.addHost('h52')\n s0 = self.s0 = self.addSwitch('s0')\n s1 = self.s1 = self.addSwitch('s1')\n s4 = self.s4 = self.addSwitch('s4')\n s5 = self.s5 = self.addSwitch('s5')\n self.addLink(h1, s1)\n self.addLink(h2, s1)\n self.addLink(h3, s1)\n self.addLink(h4, s4)\n self.addLink(h51, s5)\n self.addLink(h52, s5)\n self.addLink(s0, s1)\n self.addLink(s0, s4)\n self.addLink(s5, s4)\n\n\ntopos = {'demo': lambda : DemoTopology()}\n",
"step-5": "#!/usr/bin/python\n\n\"\"\"\ndemo_mininet_topo.py\n\nSample topology class with Mininet.\n\nG = {V, E}\nV = {h1, h2, h3, h4, h51, h52, s0, s1, s4, s5}\n\t# of hosts = 6\n\t# of switches = 4\nE = {\n\t\t(h1, s1), (h2, s1), (h3, s1), \n\t \t(h4, s4), \n\t\t(h51, s5), (h52, s5), \n\t\t(s0, s1), (s0, s4), (s5, s4)\n\t}\n\"\"\"\n\nfrom mininet.topo import Topo\n\nclass DemoTopology(Topo):\n\t\n\tdef __init__(self):\n\t\t\n\t\tTopo.__init__(self)\n\t\t\n\t\t# Add some hosts\n\t\th1 = self.h1 = self.addHost('h1')\n\t\th2 = self.h2 = self.addHost('h2')\n\t\th3 = self.h3 = self.addHost('h3')\n\t\th4 = self.h4 = self.addHost('h4')\n\t\th51 = self.h51 = self.addHost('h51')\n\t\th52 = self.h52 = self.addHost('h52')\n\t\t\n\t\t# Add switches\n\t\ts0 = self.s0 = self.addSwitch('s0')\n\t\ts1 = self.s1 = self.addSwitch('s1')\n\t\ts4 = self.s4 = self.addSwitch('s4')\n\t\ts5 = self.s5 = self.addSwitch('s5')\n\t\t\n\t\t# Link hosts with switches\n\t\tself.addLink(h1, s1)\n\t\tself.addLink(h2, s1)\n\t\tself.addLink(h3, s1)\n\t\tself.addLink(h4, s4)\n\t\tself.addLink(h51, s5)\n\t\tself.addLink(h52, s5)\n\t\t\n\t\t# Link switches with switches\n\t\tself.addLink(s0, s1)\n\t\tself.addLink(s0, s4)\n\t\tself.addLink(s5, s4)\n\t\ntopos = {\n\t'demo': lambda: DemoTopology()\n}\t",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from __future__ import print_function
from __future__ import absolute_import
from builtins import str
from builtins import range
from builtins import object
import hashlib
from xml.sax.saxutils import escape
from struct import unpack, pack
import textwrap
import json
from .anconf import warning, error, CONF, enable_colors, remove_colors, save_colors, color_range
def disable_print_colors():
colors = save_colors()
remove_colors()
return colors
def enable_print_colors(colors):
enable_colors(colors)
# Handle exit message
def Exit(msg):
warning("Error : " + msg)
raise ("oops")
def Warning(msg):
warning(msg)
def _PrintBanner():
print_fct = CONF["PRINT_FCT"]
print_fct("*" * 75 + "\n")
def _PrintSubBanner(title=None):
print_fct = CONF["PRINT_FCT"]
if title == None:
print_fct("#" * 20 + "\n")
else:
print_fct("#" * 10 + " " + title + "\n")
def _PrintNote(note, tab=0):
print_fct = CONF["PRINT_FCT"]
note_color = CONF["COLORS"]["NOTE"]
normal_color = CONF["COLORS"]["NORMAL"]
print_fct("\t" * tab + "%s# %s%s" % (note_color, note, normal_color) + "\n")
# Print arg into a correct format
def _Print(name, arg):
buff = name + " "
if type(arg).__name__ == 'int':
buff += "0x%x" % arg
elif type(arg).__name__ == 'long':
buff += "0x%x" % arg
elif type(arg).__name__ == 'str':
buff += "%s" % arg
elif isinstance(arg, SV):
buff += "0x%x" % arg.get_value()
elif isinstance(arg, SVs):
buff += arg.get_value().__str__()
print(buff)
def PrettyShowEx(exceptions):
if len(exceptions) > 0:
CONF["PRINT_FCT"]("Exceptions:\n")
for i in exceptions:
CONF["PRINT_FCT"]("\t%s%s%s\n" %
(CONF["COLORS"]["EXCEPTION"], i.show_buff(),
CONF["COLORS"]["NORMAL"]))
def _PrintXRef(tag, items):
print_fct = CONF["PRINT_FCT"]
for i in items:
print_fct("%s: %s %s %s %s\n" %
(tag, i[0].get_class_name(), i[0].get_name(),
i[0].get_descriptor(), ' '.join("%x" % j.get_idx()
for j in i[1])))
def _PrintDRef(tag, items):
print_fct = CONF["PRINT_FCT"]
for i in items:
print_fct("%s: %s %s %s %s\n" %
(tag, i[0].get_class_name(), i[0].get_name(),
i[0].get_descriptor(), ' '.join("%x" % j for j in i[1])))
def _PrintDefault(msg):
print_fct = CONF["PRINT_FCT"]
print_fct(msg)
def PrettyShow(m_a, basic_blocks, notes={}):
idx = 0
nb = 0
offset_color = CONF["COLORS"]["OFFSET"]
offset_addr_color = CONF["COLORS"]["OFFSET_ADDR"]
instruction_name_color = CONF["COLORS"]["INSTRUCTION_NAME"]
branch_false_color = CONF["COLORS"]["BRANCH_FALSE"]
branch_true_color = CONF["COLORS"]["BRANCH_TRUE"]
branch_color = CONF["COLORS"]["BRANCH"]
exception_color = CONF["COLORS"]["EXCEPTION"]
bb_color = CONF["COLORS"]["BB"]
normal_color = CONF["COLORS"]["NORMAL"]
print_fct = CONF["PRINT_FCT"]
colors = CONF["COLORS"]["OUTPUT"]
for i in basic_blocks:
print_fct("%s%s%s : \n" % (bb_color, i.get_name(), normal_color))
instructions = i.get_instructions()
for ins in instructions:
if nb in notes:
for note in notes[nb]:
_PrintNote(note, 1)
print_fct("\t%s%-3d%s(%s%08x%s) " %
(offset_color, nb, normal_color, offset_addr_color, idx,
normal_color))
print_fct("%s%-20s%s" %
(instruction_name_color, ins.get_name(), normal_color))
operands = ins.get_operands()
print_fct(
"%s" %
", ".join(m_a.get_vm().colorize_operands(operands, colors)))
op_value = ins.get_op_value()
if ins == instructions[-1] and i.childs:
print_fct(" ")
# packed/sparse-switch
if (op_value == 0x2b or op_value == 0x2c) and len(i.childs) > 1:
values = i.get_special_ins(idx).get_values()
print_fct("%s[ D:%s%s " %
(branch_false_color, i.childs[0][2].get_name(),
branch_color))
print_fct(' '.join("%d:%s" % (
values[j], i.childs[j + 1][2].get_name()) for j in
range(0, len(i.childs) - 1)) + " ]%s" %
normal_color)
else:
if len(i.childs) == 2:
print_fct("%s[ %s%s " % (branch_false_color,
i.childs[0][2].get_name(),
branch_true_color))
print_fct(' '.join("%s" % c[2].get_name(
) for c in i.childs[1:]) + " ]%s" % normal_color)
else:
print_fct("%s[ " % branch_color + ' '.join(
"%s" % c[2].get_name() for c in i.childs) + " ]%s" %
normal_color)
idx += ins.get_length()
nb += 1
print_fct("\n")
if i.get_exception_analysis():
print_fct("\t%s%s%s\n" %
(exception_color, i.exception_analysis.show_buff(),
normal_color))
print_fct("\n")
class TmpBlock(object):
def __init__(self, name):
self.name = name
def get_name(self):
return self.name
def method2json(mx, directed_graph=False):
if directed_graph:
return method2json_direct(mx)
return method2json_undirect(mx)
def method2json_undirect(mx):
d = {}
reports = []
d["reports"] = reports
for DVMBasicMethodBlock in mx.basic_blocks.gets():
cblock = {}
cblock["BasicBlockId"] = DVMBasicMethodBlock.get_name()
cblock["registers"] = mx.get_method().get_code().get_registers_size()
cblock["instructions"] = []
ins_idx = DVMBasicMethodBlock.start
for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(
):
c_ins = {}
c_ins["idx"] = ins_idx
c_ins["name"] = DVMBasicMethodBlockInstruction.get_name()
c_ins["operands"] = DVMBasicMethodBlockInstruction.get_operands(
ins_idx)
cblock["instructions"].append(c_ins)
ins_idx += DVMBasicMethodBlockInstruction.get_length()
cblock["Edge"] = []
for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:
cblock["Edge"].append(DVMBasicMethodBlockChild[-1].get_name())
reports.append(cblock)
return json.dumps(d)
def method2json_direct(mx):
d = {}
reports = []
d["reports"] = reports
hooks = {}
l = []
for DVMBasicMethodBlock in mx.basic_blocks.gets():
for index, DVMBasicMethodBlockChild in enumerate(
DVMBasicMethodBlock.childs):
if DVMBasicMethodBlock.get_name(
) == DVMBasicMethodBlockChild[-1].get_name():
preblock = TmpBlock(DVMBasicMethodBlock.get_name() + "-pre")
cnblock = {}
cnblock["BasicBlockId"] = DVMBasicMethodBlock.get_name(
) + "-pre"
cnblock["start"] = DVMBasicMethodBlock.start
cnblock["notes"] = []
cnblock["Edge"] = [DVMBasicMethodBlock.get_name()]
cnblock["registers"] = 0
cnblock["instructions"] = []
cnblock["info_bb"] = 0
l.append(cnblock)
for parent in DVMBasicMethodBlock.fathers:
hooks[parent[-1].get_name()] = []
hooks[parent[-1].get_name()].append(preblock)
for idx, child in enumerate(parent[-1].childs):
if child[-1].get_name() == DVMBasicMethodBlock.get_name(
):
hooks[parent[-1].get_name()].append(child[-1])
for DVMBasicMethodBlock in mx.basic_blocks.gets():
cblock = {}
cblock["BasicBlockId"] = DVMBasicMethodBlock.get_name()
cblock["start"] = DVMBasicMethodBlock.start
cblock["notes"] = DVMBasicMethodBlock.get_notes()
cblock["registers"] = mx.get_method().get_code().get_registers_size()
cblock["instructions"] = []
ins_idx = DVMBasicMethodBlock.start
last_instru = None
for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(
):
c_ins = {}
c_ins["idx"] = ins_idx
c_ins["name"] = DVMBasicMethodBlockInstruction.get_name()
c_ins["operands"] = DVMBasicMethodBlockInstruction.get_operands(
ins_idx)
c_ins["formatted_operands"
] = DVMBasicMethodBlockInstruction.get_formatted_operands()
cblock["instructions"].append(c_ins)
if (DVMBasicMethodBlockInstruction.get_op_value() == 0x2b or
DVMBasicMethodBlockInstruction.get_op_value() == 0x2c):
values = DVMBasicMethodBlock.get_special_ins(ins_idx)
cblock["info_next"] = values.get_values()
ins_idx += DVMBasicMethodBlockInstruction.get_length()
last_instru = DVMBasicMethodBlockInstruction
cblock["info_bb"] = 0
if DVMBasicMethodBlock.childs:
if len(DVMBasicMethodBlock.childs) > 1:
cblock["info_bb"] = 1
if (last_instru.get_op_value() == 0x2b or
last_instru.get_op_value() == 0x2c):
cblock["info_bb"] = 2
cblock["Edge"] = []
for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:
ok = False
if DVMBasicMethodBlock.get_name() in hooks:
if DVMBasicMethodBlockChild[-1] in hooks[
DVMBasicMethodBlock.get_name()
]:
ok = True
cblock["Edge"].append(hooks[DVMBasicMethodBlock.get_name(
)][0].get_name())
if not ok:
cblock["Edge"].append(DVMBasicMethodBlockChild[-1].get_name())
exception_analysis = DVMBasicMethodBlock.get_exception_analysis()
if exception_analysis:
cblock["Exceptions"] = exception_analysis.get()
reports.append(cblock)
reports.extend(l)
return json.dumps(d)
class SV(object):
def __init__(self, size, buff):
self.__size = size
self.__value = unpack(self.__size, buff)[0]
def _get(self):
return pack(self.__size, self.__value)
def __str__(self):
return "0x%x" % self.__value
def __int__(self):
return self.__value
def get_value_buff(self):
return self._get()
def get_value(self):
return self.__value
def set_value(self, attr):
self.__value = attr
class SVs(object):
def __init__(self, size, ntuple, buff):
self.__size = size
self.__value = ntuple._make(unpack(self.__size, buff))
def _get(self):
l = []
for i in self.__value._fields:
l.append(getattr(self.__value, i))
return pack(self.__size, *l)
def _export(self):
return [x for x in self.__value._fields]
def get_value_buff(self):
return self._get()
def get_value(self):
return self.__value
def set_value(self, attr):
self.__value = self.__value._replace(**attr)
def __str__(self):
return self.__value.__str__()
def object_to_bytes(obj):
"""
Convert a object to a bytearray or call get_raw() of the object
if no useful type was found.
"""
if isinstance(obj, str):
return bytearray(obj, "UTF-8")
elif isinstance(obj, bool):
return bytearray()
elif isinstance(obj, int):
return pack("<L", obj)
elif obj == None:
return bytearray()
elif isinstance(obj, bytearray):
return obj
else:
#print type(obj), obj
return obj.get_raw()
class MethodBC(object):
def show(self, value):
getattr(self, "show_" + value)()
class BuffHandle(object):
def __init__(self, buff):
self.__buff = bytearray(buff)
self.__idx = 0
def size(self):
return len(self.__buff)
def set_idx(self, idx):
self.__idx = idx
def get_idx(self):
return self.__idx
def readNullString(self, size):
data = self.read(size)
return data
def read_b(self, size):
return self.__buff[self.__idx:self.__idx + size]
def read_at(self, offset, size):
return self.__buff[offset:offset + size]
def read(self, size):
if isinstance(size, SV):
size = size.value
buff = self.__buff[self.__idx:self.__idx + size]
self.__idx += size
return buff
def end(self):
return self.__idx == len(self.__buff)
class Buff(object):
def __init__(self, offset, buff):
self.offset = offset
self.buff = buff
self.size = len(buff)
class _Bytecode(object):
def __init__(self, buff):
self.__buff = bytearray(buff)
self.__idx = 0
def read(self, size):
if isinstance(size, SV):
size = size.value
buff = self.__buff[self.__idx:self.__idx + size]
self.__idx += size
return buff
def readat(self, off):
if isinstance(off, SV):
off = off.value
return self.__buff[off:]
def read_b(self, size):
return self.__buff[self.__idx:self.__idx + size]
def set_idx(self, idx):
self.__idx = idx
def get_idx(self):
return self.__idx
def add_idx(self, idx):
self.__idx += idx
def register(self, type_register, fct):
self.__registers[type_register].append(fct)
def get_buff(self):
return self.__buff
def length_buff(self):
return len(self.__buff)
def set_buff(self, buff):
self.__buff = buff
def save(self, filename):
buff = self._save()
with open(filename, "wb") as fd:
fd.write(buff)
def FormatClassToJava(input):
"""
Transoform a typical xml format class into java format
:param input: the input class name
:rtype: string
"""
return "L" + input.replace(".", "/") + ";"
def FormatClassToPython(input):
i = input[:-1]
i = i.replace("/", "_")
i = i.replace("$", "_")
return i
def FormatNameToPython(input):
i = input.replace("<", "")
i = i.replace(">", "")
i = i.replace("$", "_")
return i
def FormatDescriptorToPython(input):
i = input.replace("/", "_")
i = i.replace(";", "")
i = i.replace("[", "")
i = i.replace("(", "")
i = i.replace(")", "")
i = i.replace(" ", "")
i = i.replace("$", "")
return i
class Node(object):
def __init__(self, n, s):
self.id = n
self.title = s
self.children = []
|
normal
|
{
"blob_id": "2e6f04c3ff3e47a2c3e9f6a7d93e7ce2955a2756",
"index": 8354,
"step-1": "<mask token>\n\n\nclass SVs(object):\n\n def __init__(self, size, ntuple, buff):\n self.__size = size\n self.__value = ntuple._make(unpack(self.__size, buff))\n\n def _get(self):\n l = []\n for i in self.__value._fields:\n l.append(getattr(self.__value, i))\n return pack(self.__size, *l)\n <mask token>\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = self.__value._replace(**attr)\n <mask token>\n\n\n<mask token>\n\n\nclass MethodBC(object):\n\n def show(self, value):\n getattr(self, 'show_' + value)()\n\n\nclass BuffHandle(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def size(self):\n return len(self.__buff)\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def readNullString(self, size):\n data = self.read(size)\n return data\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def read_at(self, offset, size):\n return self.__buff[offset:offset + size]\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def end(self):\n return self.__idx == len(self.__buff)\n\n\nclass Buff(object):\n\n def __init__(self, offset, buff):\n self.offset = offset\n self.buff = buff\n self.size = len(buff)\n\n\nclass _Bytecode(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def readat(self, off):\n if isinstance(off, SV):\n off = off.value\n return self.__buff[off:]\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def add_idx(self, idx):\n self.__idx += idx\n\n def register(self, type_register, fct):\n self.__registers[type_register].append(fct)\n\n def get_buff(self):\n return self.__buff\n\n def length_buff(self):\n return len(self.__buff)\n\n def set_buff(self, buff):\n self.__buff = buff\n\n def save(self, filename):\n buff = self._save()\n with open(filename, 'wb') as fd:\n fd.write(buff)\n\n\n<mask token>\n\n\nclass Node(object):\n\n def __init__(self, n, s):\n self.id = n\n self.title = s\n self.children = []\n",
"step-2": "<mask token>\n\n\ndef disable_print_colors():\n colors = save_colors()\n remove_colors()\n return colors\n\n\n<mask token>\n\n\ndef Warning(msg):\n warning(msg)\n\n\ndef _PrintBanner():\n print_fct = CONF['PRINT_FCT']\n print_fct('*' * 75 + '\\n')\n\n\n<mask token>\n\n\ndef _PrintNote(note, tab=0):\n print_fct = CONF['PRINT_FCT']\n note_color = CONF['COLORS']['NOTE']\n normal_color = CONF['COLORS']['NORMAL']\n print_fct('\\t' * tab + '%s# %s%s' % (note_color, note, normal_color) + '\\n'\n )\n\n\n<mask token>\n\n\ndef _PrintXRef(tag, items):\n print_fct = CONF['PRINT_FCT']\n for i in items:\n print_fct('%s: %s %s %s %s\\n' % (tag, i[0].get_class_name(), i[0].\n get_name(), i[0].get_descriptor(), ' '.join('%x' % j.get_idx() for\n j in i[1])))\n\n\n<mask token>\n\n\ndef _PrintDefault(msg):\n print_fct = CONF['PRINT_FCT']\n print_fct(msg)\n\n\ndef PrettyShow(m_a, basic_blocks, notes={}):\n idx = 0\n nb = 0\n offset_color = CONF['COLORS']['OFFSET']\n offset_addr_color = CONF['COLORS']['OFFSET_ADDR']\n instruction_name_color = CONF['COLORS']['INSTRUCTION_NAME']\n branch_false_color = CONF['COLORS']['BRANCH_FALSE']\n branch_true_color = CONF['COLORS']['BRANCH_TRUE']\n branch_color = CONF['COLORS']['BRANCH']\n exception_color = CONF['COLORS']['EXCEPTION']\n bb_color = CONF['COLORS']['BB']\n normal_color = CONF['COLORS']['NORMAL']\n print_fct = CONF['PRINT_FCT']\n colors = CONF['COLORS']['OUTPUT']\n for i in basic_blocks:\n print_fct('%s%s%s : \\n' % (bb_color, i.get_name(), normal_color))\n instructions = i.get_instructions()\n for ins in instructions:\n if nb in notes:\n for note in notes[nb]:\n _PrintNote(note, 1)\n print_fct('\\t%s%-3d%s(%s%08x%s) ' % (offset_color, nb,\n normal_color, offset_addr_color, idx, normal_color))\n print_fct('%s%-20s%s' % (instruction_name_color, ins.get_name(),\n normal_color))\n operands = ins.get_operands()\n print_fct('%s' % ', '.join(m_a.get_vm().colorize_operands(\n operands, colors)))\n op_value = ins.get_op_value()\n if ins == instructions[-1] and i.childs:\n print_fct(' ')\n if (op_value == 43 or op_value == 44) and len(i.childs) > 1:\n values = i.get_special_ins(idx).get_values()\n print_fct('%s[ D:%s%s ' % (branch_false_color, i.childs\n [0][2].get_name(), branch_color))\n print_fct(' '.join('%d:%s' % (values[j], i.childs[j + 1\n ][2].get_name()) for j in range(0, len(i.childs) - \n 1)) + ' ]%s' % normal_color)\n elif len(i.childs) == 2:\n print_fct('%s[ %s%s ' % (branch_false_color, i.childs[0\n ][2].get_name(), branch_true_color))\n print_fct(' '.join('%s' % c[2].get_name() for c in i.\n childs[1:]) + ' ]%s' % normal_color)\n else:\n print_fct('%s[ ' % branch_color + ' '.join('%s' % c[2].\n get_name() for c in i.childs) + ' ]%s' % normal_color)\n idx += ins.get_length()\n nb += 1\n print_fct('\\n')\n if i.get_exception_analysis():\n print_fct('\\t%s%s%s\\n' % (exception_color, i.exception_analysis\n .show_buff(), normal_color))\n print_fct('\\n')\n\n\nclass TmpBlock(object):\n\n def __init__(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n\ndef method2json(mx, directed_graph=False):\n if directed_graph:\n return method2json_direct(mx)\n return method2json_undirect(mx)\n\n\ndef method2json_undirect(mx):\n d = {}\n reports = []\n d['reports'] = reports\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n cblock['BasicBlockId'] = DVMBasicMethodBlock.get_name()\n cblock['registers'] = mx.get_method().get_code().get_registers_size()\n cblock['instructions'] = []\n ins_idx = DVMBasicMethodBlock.start\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins['idx'] = ins_idx\n c_ins['name'] = DVMBasicMethodBlockInstruction.get_name()\n c_ins['operands'] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n cblock['instructions'].append(c_ins)\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n cblock['Edge'] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n cblock['Edge'].append(DVMBasicMethodBlockChild[-1].get_name())\n reports.append(cblock)\n return json.dumps(d)\n\n\ndef method2json_direct(mx):\n d = {}\n reports = []\n d['reports'] = reports\n hooks = {}\n l = []\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n for index, DVMBasicMethodBlockChild in enumerate(DVMBasicMethodBlock\n .childs):\n if DVMBasicMethodBlock.get_name() == DVMBasicMethodBlockChild[-1\n ].get_name():\n preblock = TmpBlock(DVMBasicMethodBlock.get_name() + '-pre')\n cnblock = {}\n cnblock['BasicBlockId'] = DVMBasicMethodBlock.get_name(\n ) + '-pre'\n cnblock['start'] = DVMBasicMethodBlock.start\n cnblock['notes'] = []\n cnblock['Edge'] = [DVMBasicMethodBlock.get_name()]\n cnblock['registers'] = 0\n cnblock['instructions'] = []\n cnblock['info_bb'] = 0\n l.append(cnblock)\n for parent in DVMBasicMethodBlock.fathers:\n hooks[parent[-1].get_name()] = []\n hooks[parent[-1].get_name()].append(preblock)\n for idx, child in enumerate(parent[-1].childs):\n if child[-1].get_name(\n ) == DVMBasicMethodBlock.get_name():\n hooks[parent[-1].get_name()].append(child[-1])\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n cblock['BasicBlockId'] = DVMBasicMethodBlock.get_name()\n cblock['start'] = DVMBasicMethodBlock.start\n cblock['notes'] = DVMBasicMethodBlock.get_notes()\n cblock['registers'] = mx.get_method().get_code().get_registers_size()\n cblock['instructions'] = []\n ins_idx = DVMBasicMethodBlock.start\n last_instru = None\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins['idx'] = ins_idx\n c_ins['name'] = DVMBasicMethodBlockInstruction.get_name()\n c_ins['operands'] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n c_ins['formatted_operands'\n ] = DVMBasicMethodBlockInstruction.get_formatted_operands()\n cblock['instructions'].append(c_ins)\n if DVMBasicMethodBlockInstruction.get_op_value(\n ) == 43 or DVMBasicMethodBlockInstruction.get_op_value() == 44:\n values = DVMBasicMethodBlock.get_special_ins(ins_idx)\n cblock['info_next'] = values.get_values()\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n last_instru = DVMBasicMethodBlockInstruction\n cblock['info_bb'] = 0\n if DVMBasicMethodBlock.childs:\n if len(DVMBasicMethodBlock.childs) > 1:\n cblock['info_bb'] = 1\n if last_instru.get_op_value() == 43 or last_instru.get_op_value(\n ) == 44:\n cblock['info_bb'] = 2\n cblock['Edge'] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n ok = False\n if DVMBasicMethodBlock.get_name() in hooks:\n if DVMBasicMethodBlockChild[-1] in hooks[DVMBasicMethodBlock\n .get_name()]:\n ok = True\n cblock['Edge'].append(hooks[DVMBasicMethodBlock.\n get_name()][0].get_name())\n if not ok:\n cblock['Edge'].append(DVMBasicMethodBlockChild[-1].get_name())\n exception_analysis = DVMBasicMethodBlock.get_exception_analysis()\n if exception_analysis:\n cblock['Exceptions'] = exception_analysis.get()\n reports.append(cblock)\n reports.extend(l)\n return json.dumps(d)\n\n\nclass SV(object):\n\n def __init__(self, size, buff):\n self.__size = size\n self.__value = unpack(self.__size, buff)[0]\n\n def _get(self):\n return pack(self.__size, self.__value)\n\n def __str__(self):\n return '0x%x' % self.__value\n\n def __int__(self):\n return self.__value\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = attr\n\n\nclass SVs(object):\n\n def __init__(self, size, ntuple, buff):\n self.__size = size\n self.__value = ntuple._make(unpack(self.__size, buff))\n\n def _get(self):\n l = []\n for i in self.__value._fields:\n l.append(getattr(self.__value, i))\n return pack(self.__size, *l)\n\n def _export(self):\n return [x for x in self.__value._fields]\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = self.__value._replace(**attr)\n\n def __str__(self):\n return self.__value.__str__()\n\n\n<mask token>\n\n\nclass MethodBC(object):\n\n def show(self, value):\n getattr(self, 'show_' + value)()\n\n\nclass BuffHandle(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def size(self):\n return len(self.__buff)\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def readNullString(self, size):\n data = self.read(size)\n return data\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def read_at(self, offset, size):\n return self.__buff[offset:offset + size]\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def end(self):\n return self.__idx == len(self.__buff)\n\n\nclass Buff(object):\n\n def __init__(self, offset, buff):\n self.offset = offset\n self.buff = buff\n self.size = len(buff)\n\n\nclass _Bytecode(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def readat(self, off):\n if isinstance(off, SV):\n off = off.value\n return self.__buff[off:]\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def add_idx(self, idx):\n self.__idx += idx\n\n def register(self, type_register, fct):\n self.__registers[type_register].append(fct)\n\n def get_buff(self):\n return self.__buff\n\n def length_buff(self):\n return len(self.__buff)\n\n def set_buff(self, buff):\n self.__buff = buff\n\n def save(self, filename):\n buff = self._save()\n with open(filename, 'wb') as fd:\n fd.write(buff)\n\n\ndef FormatClassToJava(input):\n \"\"\"\n Transoform a typical xml format class into java format\n\n :param input: the input class name\n :rtype: string\n \"\"\"\n return 'L' + input.replace('.', '/') + ';'\n\n\ndef FormatClassToPython(input):\n i = input[:-1]\n i = i.replace('/', '_')\n i = i.replace('$', '_')\n return i\n\n\ndef FormatNameToPython(input):\n i = input.replace('<', '')\n i = i.replace('>', '')\n i = i.replace('$', '_')\n return i\n\n\ndef FormatDescriptorToPython(input):\n i = input.replace('/', '_')\n i = i.replace(';', '')\n i = i.replace('[', '')\n i = i.replace('(', '')\n i = i.replace(')', '')\n i = i.replace(' ', '')\n i = i.replace('$', '')\n return i\n\n\nclass Node(object):\n\n def __init__(self, n, s):\n self.id = n\n self.title = s\n self.children = []\n",
"step-3": "<mask token>\n\n\ndef disable_print_colors():\n colors = save_colors()\n remove_colors()\n return colors\n\n\ndef enable_print_colors(colors):\n enable_colors(colors)\n\n\n<mask token>\n\n\ndef Warning(msg):\n warning(msg)\n\n\ndef _PrintBanner():\n print_fct = CONF['PRINT_FCT']\n print_fct('*' * 75 + '\\n')\n\n\n<mask token>\n\n\ndef _PrintNote(note, tab=0):\n print_fct = CONF['PRINT_FCT']\n note_color = CONF['COLORS']['NOTE']\n normal_color = CONF['COLORS']['NORMAL']\n print_fct('\\t' * tab + '%s# %s%s' % (note_color, note, normal_color) + '\\n'\n )\n\n\n<mask token>\n\n\ndef _PrintXRef(tag, items):\n print_fct = CONF['PRINT_FCT']\n for i in items:\n print_fct('%s: %s %s %s %s\\n' % (tag, i[0].get_class_name(), i[0].\n get_name(), i[0].get_descriptor(), ' '.join('%x' % j.get_idx() for\n j in i[1])))\n\n\n<mask token>\n\n\ndef _PrintDefault(msg):\n print_fct = CONF['PRINT_FCT']\n print_fct(msg)\n\n\ndef PrettyShow(m_a, basic_blocks, notes={}):\n idx = 0\n nb = 0\n offset_color = CONF['COLORS']['OFFSET']\n offset_addr_color = CONF['COLORS']['OFFSET_ADDR']\n instruction_name_color = CONF['COLORS']['INSTRUCTION_NAME']\n branch_false_color = CONF['COLORS']['BRANCH_FALSE']\n branch_true_color = CONF['COLORS']['BRANCH_TRUE']\n branch_color = CONF['COLORS']['BRANCH']\n exception_color = CONF['COLORS']['EXCEPTION']\n bb_color = CONF['COLORS']['BB']\n normal_color = CONF['COLORS']['NORMAL']\n print_fct = CONF['PRINT_FCT']\n colors = CONF['COLORS']['OUTPUT']\n for i in basic_blocks:\n print_fct('%s%s%s : \\n' % (bb_color, i.get_name(), normal_color))\n instructions = i.get_instructions()\n for ins in instructions:\n if nb in notes:\n for note in notes[nb]:\n _PrintNote(note, 1)\n print_fct('\\t%s%-3d%s(%s%08x%s) ' % (offset_color, nb,\n normal_color, offset_addr_color, idx, normal_color))\n print_fct('%s%-20s%s' % (instruction_name_color, ins.get_name(),\n normal_color))\n operands = ins.get_operands()\n print_fct('%s' % ', '.join(m_a.get_vm().colorize_operands(\n operands, colors)))\n op_value = ins.get_op_value()\n if ins == instructions[-1] and i.childs:\n print_fct(' ')\n if (op_value == 43 or op_value == 44) and len(i.childs) > 1:\n values = i.get_special_ins(idx).get_values()\n print_fct('%s[ D:%s%s ' % (branch_false_color, i.childs\n [0][2].get_name(), branch_color))\n print_fct(' '.join('%d:%s' % (values[j], i.childs[j + 1\n ][2].get_name()) for j in range(0, len(i.childs) - \n 1)) + ' ]%s' % normal_color)\n elif len(i.childs) == 2:\n print_fct('%s[ %s%s ' % (branch_false_color, i.childs[0\n ][2].get_name(), branch_true_color))\n print_fct(' '.join('%s' % c[2].get_name() for c in i.\n childs[1:]) + ' ]%s' % normal_color)\n else:\n print_fct('%s[ ' % branch_color + ' '.join('%s' % c[2].\n get_name() for c in i.childs) + ' ]%s' % normal_color)\n idx += ins.get_length()\n nb += 1\n print_fct('\\n')\n if i.get_exception_analysis():\n print_fct('\\t%s%s%s\\n' % (exception_color, i.exception_analysis\n .show_buff(), normal_color))\n print_fct('\\n')\n\n\nclass TmpBlock(object):\n\n def __init__(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n\ndef method2json(mx, directed_graph=False):\n if directed_graph:\n return method2json_direct(mx)\n return method2json_undirect(mx)\n\n\ndef method2json_undirect(mx):\n d = {}\n reports = []\n d['reports'] = reports\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n cblock['BasicBlockId'] = DVMBasicMethodBlock.get_name()\n cblock['registers'] = mx.get_method().get_code().get_registers_size()\n cblock['instructions'] = []\n ins_idx = DVMBasicMethodBlock.start\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins['idx'] = ins_idx\n c_ins['name'] = DVMBasicMethodBlockInstruction.get_name()\n c_ins['operands'] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n cblock['instructions'].append(c_ins)\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n cblock['Edge'] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n cblock['Edge'].append(DVMBasicMethodBlockChild[-1].get_name())\n reports.append(cblock)\n return json.dumps(d)\n\n\ndef method2json_direct(mx):\n d = {}\n reports = []\n d['reports'] = reports\n hooks = {}\n l = []\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n for index, DVMBasicMethodBlockChild in enumerate(DVMBasicMethodBlock\n .childs):\n if DVMBasicMethodBlock.get_name() == DVMBasicMethodBlockChild[-1\n ].get_name():\n preblock = TmpBlock(DVMBasicMethodBlock.get_name() + '-pre')\n cnblock = {}\n cnblock['BasicBlockId'] = DVMBasicMethodBlock.get_name(\n ) + '-pre'\n cnblock['start'] = DVMBasicMethodBlock.start\n cnblock['notes'] = []\n cnblock['Edge'] = [DVMBasicMethodBlock.get_name()]\n cnblock['registers'] = 0\n cnblock['instructions'] = []\n cnblock['info_bb'] = 0\n l.append(cnblock)\n for parent in DVMBasicMethodBlock.fathers:\n hooks[parent[-1].get_name()] = []\n hooks[parent[-1].get_name()].append(preblock)\n for idx, child in enumerate(parent[-1].childs):\n if child[-1].get_name(\n ) == DVMBasicMethodBlock.get_name():\n hooks[parent[-1].get_name()].append(child[-1])\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n cblock['BasicBlockId'] = DVMBasicMethodBlock.get_name()\n cblock['start'] = DVMBasicMethodBlock.start\n cblock['notes'] = DVMBasicMethodBlock.get_notes()\n cblock['registers'] = mx.get_method().get_code().get_registers_size()\n cblock['instructions'] = []\n ins_idx = DVMBasicMethodBlock.start\n last_instru = None\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins['idx'] = ins_idx\n c_ins['name'] = DVMBasicMethodBlockInstruction.get_name()\n c_ins['operands'] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n c_ins['formatted_operands'\n ] = DVMBasicMethodBlockInstruction.get_formatted_operands()\n cblock['instructions'].append(c_ins)\n if DVMBasicMethodBlockInstruction.get_op_value(\n ) == 43 or DVMBasicMethodBlockInstruction.get_op_value() == 44:\n values = DVMBasicMethodBlock.get_special_ins(ins_idx)\n cblock['info_next'] = values.get_values()\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n last_instru = DVMBasicMethodBlockInstruction\n cblock['info_bb'] = 0\n if DVMBasicMethodBlock.childs:\n if len(DVMBasicMethodBlock.childs) > 1:\n cblock['info_bb'] = 1\n if last_instru.get_op_value() == 43 or last_instru.get_op_value(\n ) == 44:\n cblock['info_bb'] = 2\n cblock['Edge'] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n ok = False\n if DVMBasicMethodBlock.get_name() in hooks:\n if DVMBasicMethodBlockChild[-1] in hooks[DVMBasicMethodBlock\n .get_name()]:\n ok = True\n cblock['Edge'].append(hooks[DVMBasicMethodBlock.\n get_name()][0].get_name())\n if not ok:\n cblock['Edge'].append(DVMBasicMethodBlockChild[-1].get_name())\n exception_analysis = DVMBasicMethodBlock.get_exception_analysis()\n if exception_analysis:\n cblock['Exceptions'] = exception_analysis.get()\n reports.append(cblock)\n reports.extend(l)\n return json.dumps(d)\n\n\nclass SV(object):\n\n def __init__(self, size, buff):\n self.__size = size\n self.__value = unpack(self.__size, buff)[0]\n\n def _get(self):\n return pack(self.__size, self.__value)\n\n def __str__(self):\n return '0x%x' % self.__value\n\n def __int__(self):\n return self.__value\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = attr\n\n\nclass SVs(object):\n\n def __init__(self, size, ntuple, buff):\n self.__size = size\n self.__value = ntuple._make(unpack(self.__size, buff))\n\n def _get(self):\n l = []\n for i in self.__value._fields:\n l.append(getattr(self.__value, i))\n return pack(self.__size, *l)\n\n def _export(self):\n return [x for x in self.__value._fields]\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = self.__value._replace(**attr)\n\n def __str__(self):\n return self.__value.__str__()\n\n\n<mask token>\n\n\nclass MethodBC(object):\n\n def show(self, value):\n getattr(self, 'show_' + value)()\n\n\nclass BuffHandle(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def size(self):\n return len(self.__buff)\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def readNullString(self, size):\n data = self.read(size)\n return data\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def read_at(self, offset, size):\n return self.__buff[offset:offset + size]\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def end(self):\n return self.__idx == len(self.__buff)\n\n\nclass Buff(object):\n\n def __init__(self, offset, buff):\n self.offset = offset\n self.buff = buff\n self.size = len(buff)\n\n\nclass _Bytecode(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def readat(self, off):\n if isinstance(off, SV):\n off = off.value\n return self.__buff[off:]\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def add_idx(self, idx):\n self.__idx += idx\n\n def register(self, type_register, fct):\n self.__registers[type_register].append(fct)\n\n def get_buff(self):\n return self.__buff\n\n def length_buff(self):\n return len(self.__buff)\n\n def set_buff(self, buff):\n self.__buff = buff\n\n def save(self, filename):\n buff = self._save()\n with open(filename, 'wb') as fd:\n fd.write(buff)\n\n\ndef FormatClassToJava(input):\n \"\"\"\n Transoform a typical xml format class into java format\n\n :param input: the input class name\n :rtype: string\n \"\"\"\n return 'L' + input.replace('.', '/') + ';'\n\n\ndef FormatClassToPython(input):\n i = input[:-1]\n i = i.replace('/', '_')\n i = i.replace('$', '_')\n return i\n\n\ndef FormatNameToPython(input):\n i = input.replace('<', '')\n i = i.replace('>', '')\n i = i.replace('$', '_')\n return i\n\n\ndef FormatDescriptorToPython(input):\n i = input.replace('/', '_')\n i = i.replace(';', '')\n i = i.replace('[', '')\n i = i.replace('(', '')\n i = i.replace(')', '')\n i = i.replace(' ', '')\n i = i.replace('$', '')\n return i\n\n\nclass Node(object):\n\n def __init__(self, n, s):\n self.id = n\n self.title = s\n self.children = []\n",
"step-4": "<mask token>\n\n\ndef disable_print_colors():\n colors = save_colors()\n remove_colors()\n return colors\n\n\ndef enable_print_colors(colors):\n enable_colors(colors)\n\n\n<mask token>\n\n\ndef Warning(msg):\n warning(msg)\n\n\ndef _PrintBanner():\n print_fct = CONF['PRINT_FCT']\n print_fct('*' * 75 + '\\n')\n\n\ndef _PrintSubBanner(title=None):\n print_fct = CONF['PRINT_FCT']\n if title == None:\n print_fct('#' * 20 + '\\n')\n else:\n print_fct('#' * 10 + ' ' + title + '\\n')\n\n\ndef _PrintNote(note, tab=0):\n print_fct = CONF['PRINT_FCT']\n note_color = CONF['COLORS']['NOTE']\n normal_color = CONF['COLORS']['NORMAL']\n print_fct('\\t' * tab + '%s# %s%s' % (note_color, note, normal_color) + '\\n'\n )\n\n\n<mask token>\n\n\ndef _PrintXRef(tag, items):\n print_fct = CONF['PRINT_FCT']\n for i in items:\n print_fct('%s: %s %s %s %s\\n' % (tag, i[0].get_class_name(), i[0].\n get_name(), i[0].get_descriptor(), ' '.join('%x' % j.get_idx() for\n j in i[1])))\n\n\n<mask token>\n\n\ndef _PrintDefault(msg):\n print_fct = CONF['PRINT_FCT']\n print_fct(msg)\n\n\ndef PrettyShow(m_a, basic_blocks, notes={}):\n idx = 0\n nb = 0\n offset_color = CONF['COLORS']['OFFSET']\n offset_addr_color = CONF['COLORS']['OFFSET_ADDR']\n instruction_name_color = CONF['COLORS']['INSTRUCTION_NAME']\n branch_false_color = CONF['COLORS']['BRANCH_FALSE']\n branch_true_color = CONF['COLORS']['BRANCH_TRUE']\n branch_color = CONF['COLORS']['BRANCH']\n exception_color = CONF['COLORS']['EXCEPTION']\n bb_color = CONF['COLORS']['BB']\n normal_color = CONF['COLORS']['NORMAL']\n print_fct = CONF['PRINT_FCT']\n colors = CONF['COLORS']['OUTPUT']\n for i in basic_blocks:\n print_fct('%s%s%s : \\n' % (bb_color, i.get_name(), normal_color))\n instructions = i.get_instructions()\n for ins in instructions:\n if nb in notes:\n for note in notes[nb]:\n _PrintNote(note, 1)\n print_fct('\\t%s%-3d%s(%s%08x%s) ' % (offset_color, nb,\n normal_color, offset_addr_color, idx, normal_color))\n print_fct('%s%-20s%s' % (instruction_name_color, ins.get_name(),\n normal_color))\n operands = ins.get_operands()\n print_fct('%s' % ', '.join(m_a.get_vm().colorize_operands(\n operands, colors)))\n op_value = ins.get_op_value()\n if ins == instructions[-1] and i.childs:\n print_fct(' ')\n if (op_value == 43 or op_value == 44) and len(i.childs) > 1:\n values = i.get_special_ins(idx).get_values()\n print_fct('%s[ D:%s%s ' % (branch_false_color, i.childs\n [0][2].get_name(), branch_color))\n print_fct(' '.join('%d:%s' % (values[j], i.childs[j + 1\n ][2].get_name()) for j in range(0, len(i.childs) - \n 1)) + ' ]%s' % normal_color)\n elif len(i.childs) == 2:\n print_fct('%s[ %s%s ' % (branch_false_color, i.childs[0\n ][2].get_name(), branch_true_color))\n print_fct(' '.join('%s' % c[2].get_name() for c in i.\n childs[1:]) + ' ]%s' % normal_color)\n else:\n print_fct('%s[ ' % branch_color + ' '.join('%s' % c[2].\n get_name() for c in i.childs) + ' ]%s' % normal_color)\n idx += ins.get_length()\n nb += 1\n print_fct('\\n')\n if i.get_exception_analysis():\n print_fct('\\t%s%s%s\\n' % (exception_color, i.exception_analysis\n .show_buff(), normal_color))\n print_fct('\\n')\n\n\nclass TmpBlock(object):\n\n def __init__(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n\ndef method2json(mx, directed_graph=False):\n if directed_graph:\n return method2json_direct(mx)\n return method2json_undirect(mx)\n\n\ndef method2json_undirect(mx):\n d = {}\n reports = []\n d['reports'] = reports\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n cblock['BasicBlockId'] = DVMBasicMethodBlock.get_name()\n cblock['registers'] = mx.get_method().get_code().get_registers_size()\n cblock['instructions'] = []\n ins_idx = DVMBasicMethodBlock.start\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins['idx'] = ins_idx\n c_ins['name'] = DVMBasicMethodBlockInstruction.get_name()\n c_ins['operands'] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n cblock['instructions'].append(c_ins)\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n cblock['Edge'] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n cblock['Edge'].append(DVMBasicMethodBlockChild[-1].get_name())\n reports.append(cblock)\n return json.dumps(d)\n\n\ndef method2json_direct(mx):\n d = {}\n reports = []\n d['reports'] = reports\n hooks = {}\n l = []\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n for index, DVMBasicMethodBlockChild in enumerate(DVMBasicMethodBlock\n .childs):\n if DVMBasicMethodBlock.get_name() == DVMBasicMethodBlockChild[-1\n ].get_name():\n preblock = TmpBlock(DVMBasicMethodBlock.get_name() + '-pre')\n cnblock = {}\n cnblock['BasicBlockId'] = DVMBasicMethodBlock.get_name(\n ) + '-pre'\n cnblock['start'] = DVMBasicMethodBlock.start\n cnblock['notes'] = []\n cnblock['Edge'] = [DVMBasicMethodBlock.get_name()]\n cnblock['registers'] = 0\n cnblock['instructions'] = []\n cnblock['info_bb'] = 0\n l.append(cnblock)\n for parent in DVMBasicMethodBlock.fathers:\n hooks[parent[-1].get_name()] = []\n hooks[parent[-1].get_name()].append(preblock)\n for idx, child in enumerate(parent[-1].childs):\n if child[-1].get_name(\n ) == DVMBasicMethodBlock.get_name():\n hooks[parent[-1].get_name()].append(child[-1])\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n cblock['BasicBlockId'] = DVMBasicMethodBlock.get_name()\n cblock['start'] = DVMBasicMethodBlock.start\n cblock['notes'] = DVMBasicMethodBlock.get_notes()\n cblock['registers'] = mx.get_method().get_code().get_registers_size()\n cblock['instructions'] = []\n ins_idx = DVMBasicMethodBlock.start\n last_instru = None\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins['idx'] = ins_idx\n c_ins['name'] = DVMBasicMethodBlockInstruction.get_name()\n c_ins['operands'] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n c_ins['formatted_operands'\n ] = DVMBasicMethodBlockInstruction.get_formatted_operands()\n cblock['instructions'].append(c_ins)\n if DVMBasicMethodBlockInstruction.get_op_value(\n ) == 43 or DVMBasicMethodBlockInstruction.get_op_value() == 44:\n values = DVMBasicMethodBlock.get_special_ins(ins_idx)\n cblock['info_next'] = values.get_values()\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n last_instru = DVMBasicMethodBlockInstruction\n cblock['info_bb'] = 0\n if DVMBasicMethodBlock.childs:\n if len(DVMBasicMethodBlock.childs) > 1:\n cblock['info_bb'] = 1\n if last_instru.get_op_value() == 43 or last_instru.get_op_value(\n ) == 44:\n cblock['info_bb'] = 2\n cblock['Edge'] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n ok = False\n if DVMBasicMethodBlock.get_name() in hooks:\n if DVMBasicMethodBlockChild[-1] in hooks[DVMBasicMethodBlock\n .get_name()]:\n ok = True\n cblock['Edge'].append(hooks[DVMBasicMethodBlock.\n get_name()][0].get_name())\n if not ok:\n cblock['Edge'].append(DVMBasicMethodBlockChild[-1].get_name())\n exception_analysis = DVMBasicMethodBlock.get_exception_analysis()\n if exception_analysis:\n cblock['Exceptions'] = exception_analysis.get()\n reports.append(cblock)\n reports.extend(l)\n return json.dumps(d)\n\n\nclass SV(object):\n\n def __init__(self, size, buff):\n self.__size = size\n self.__value = unpack(self.__size, buff)[0]\n\n def _get(self):\n return pack(self.__size, self.__value)\n\n def __str__(self):\n return '0x%x' % self.__value\n\n def __int__(self):\n return self.__value\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = attr\n\n\nclass SVs(object):\n\n def __init__(self, size, ntuple, buff):\n self.__size = size\n self.__value = ntuple._make(unpack(self.__size, buff))\n\n def _get(self):\n l = []\n for i in self.__value._fields:\n l.append(getattr(self.__value, i))\n return pack(self.__size, *l)\n\n def _export(self):\n return [x for x in self.__value._fields]\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = self.__value._replace(**attr)\n\n def __str__(self):\n return self.__value.__str__()\n\n\ndef object_to_bytes(obj):\n \"\"\"\n Convert a object to a bytearray or call get_raw() of the object\n if no useful type was found.\n \"\"\"\n if isinstance(obj, str):\n return bytearray(obj, 'UTF-8')\n elif isinstance(obj, bool):\n return bytearray()\n elif isinstance(obj, int):\n return pack('<L', obj)\n elif obj == None:\n return bytearray()\n elif isinstance(obj, bytearray):\n return obj\n else:\n return obj.get_raw()\n\n\nclass MethodBC(object):\n\n def show(self, value):\n getattr(self, 'show_' + value)()\n\n\nclass BuffHandle(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def size(self):\n return len(self.__buff)\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def readNullString(self, size):\n data = self.read(size)\n return data\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def read_at(self, offset, size):\n return self.__buff[offset:offset + size]\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def end(self):\n return self.__idx == len(self.__buff)\n\n\nclass Buff(object):\n\n def __init__(self, offset, buff):\n self.offset = offset\n self.buff = buff\n self.size = len(buff)\n\n\nclass _Bytecode(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n return buff\n\n def readat(self, off):\n if isinstance(off, SV):\n off = off.value\n return self.__buff[off:]\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def add_idx(self, idx):\n self.__idx += idx\n\n def register(self, type_register, fct):\n self.__registers[type_register].append(fct)\n\n def get_buff(self):\n return self.__buff\n\n def length_buff(self):\n return len(self.__buff)\n\n def set_buff(self, buff):\n self.__buff = buff\n\n def save(self, filename):\n buff = self._save()\n with open(filename, 'wb') as fd:\n fd.write(buff)\n\n\ndef FormatClassToJava(input):\n \"\"\"\n Transoform a typical xml format class into java format\n\n :param input: the input class name\n :rtype: string\n \"\"\"\n return 'L' + input.replace('.', '/') + ';'\n\n\ndef FormatClassToPython(input):\n i = input[:-1]\n i = i.replace('/', '_')\n i = i.replace('$', '_')\n return i\n\n\ndef FormatNameToPython(input):\n i = input.replace('<', '')\n i = i.replace('>', '')\n i = i.replace('$', '_')\n return i\n\n\ndef FormatDescriptorToPython(input):\n i = input.replace('/', '_')\n i = i.replace(';', '')\n i = i.replace('[', '')\n i = i.replace('(', '')\n i = i.replace(')', '')\n i = i.replace(' ', '')\n i = i.replace('$', '')\n return i\n\n\nclass Node(object):\n\n def __init__(self, n, s):\n self.id = n\n self.title = s\n self.children = []\n",
"step-5": "from __future__ import print_function\nfrom __future__ import absolute_import\n\nfrom builtins import str\nfrom builtins import range\nfrom builtins import object\nimport hashlib\nfrom xml.sax.saxutils import escape\nfrom struct import unpack, pack\nimport textwrap\n\nimport json\nfrom .anconf import warning, error, CONF, enable_colors, remove_colors, save_colors, color_range\n\n\ndef disable_print_colors():\n colors = save_colors()\n remove_colors()\n return colors\n\n\ndef enable_print_colors(colors):\n enable_colors(colors)\n\n\n# Handle exit message\ndef Exit(msg):\n warning(\"Error : \" + msg)\n raise (\"oops\")\n\n\ndef Warning(msg):\n warning(msg)\n\n\ndef _PrintBanner():\n print_fct = CONF[\"PRINT_FCT\"]\n print_fct(\"*\" * 75 + \"\\n\")\n\n\ndef _PrintSubBanner(title=None):\n print_fct = CONF[\"PRINT_FCT\"]\n if title == None:\n print_fct(\"#\" * 20 + \"\\n\")\n else:\n print_fct(\"#\" * 10 + \" \" + title + \"\\n\")\n\n\ndef _PrintNote(note, tab=0):\n print_fct = CONF[\"PRINT_FCT\"]\n note_color = CONF[\"COLORS\"][\"NOTE\"]\n normal_color = CONF[\"COLORS\"][\"NORMAL\"]\n print_fct(\"\\t\" * tab + \"%s# %s%s\" % (note_color, note, normal_color) + \"\\n\")\n\n\n# Print arg into a correct format\ndef _Print(name, arg):\n buff = name + \" \"\n\n if type(arg).__name__ == 'int':\n buff += \"0x%x\" % arg\n elif type(arg).__name__ == 'long':\n buff += \"0x%x\" % arg\n elif type(arg).__name__ == 'str':\n buff += \"%s\" % arg\n elif isinstance(arg, SV):\n buff += \"0x%x\" % arg.get_value()\n elif isinstance(arg, SVs):\n buff += arg.get_value().__str__()\n\n print(buff)\n\n\ndef PrettyShowEx(exceptions):\n if len(exceptions) > 0:\n CONF[\"PRINT_FCT\"](\"Exceptions:\\n\")\n for i in exceptions:\n CONF[\"PRINT_FCT\"](\"\\t%s%s%s\\n\" %\n (CONF[\"COLORS\"][\"EXCEPTION\"], i.show_buff(),\n CONF[\"COLORS\"][\"NORMAL\"]))\n\n\ndef _PrintXRef(tag, items):\n print_fct = CONF[\"PRINT_FCT\"]\n for i in items:\n print_fct(\"%s: %s %s %s %s\\n\" %\n (tag, i[0].get_class_name(), i[0].get_name(),\n i[0].get_descriptor(), ' '.join(\"%x\" % j.get_idx()\n for j in i[1])))\n\n\ndef _PrintDRef(tag, items):\n print_fct = CONF[\"PRINT_FCT\"]\n for i in items:\n print_fct(\"%s: %s %s %s %s\\n\" %\n (tag, i[0].get_class_name(), i[0].get_name(),\n i[0].get_descriptor(), ' '.join(\"%x\" % j for j in i[1])))\n\n\ndef _PrintDefault(msg):\n print_fct = CONF[\"PRINT_FCT\"]\n print_fct(msg)\n\n\ndef PrettyShow(m_a, basic_blocks, notes={}):\n idx = 0\n nb = 0\n\n offset_color = CONF[\"COLORS\"][\"OFFSET\"]\n offset_addr_color = CONF[\"COLORS\"][\"OFFSET_ADDR\"]\n instruction_name_color = CONF[\"COLORS\"][\"INSTRUCTION_NAME\"]\n branch_false_color = CONF[\"COLORS\"][\"BRANCH_FALSE\"]\n branch_true_color = CONF[\"COLORS\"][\"BRANCH_TRUE\"]\n branch_color = CONF[\"COLORS\"][\"BRANCH\"]\n exception_color = CONF[\"COLORS\"][\"EXCEPTION\"]\n bb_color = CONF[\"COLORS\"][\"BB\"]\n normal_color = CONF[\"COLORS\"][\"NORMAL\"]\n print_fct = CONF[\"PRINT_FCT\"]\n\n colors = CONF[\"COLORS\"][\"OUTPUT\"]\n\n for i in basic_blocks:\n print_fct(\"%s%s%s : \\n\" % (bb_color, i.get_name(), normal_color))\n instructions = i.get_instructions()\n for ins in instructions:\n if nb in notes:\n for note in notes[nb]:\n _PrintNote(note, 1)\n\n print_fct(\"\\t%s%-3d%s(%s%08x%s) \" %\n (offset_color, nb, normal_color, offset_addr_color, idx,\n normal_color))\n print_fct(\"%s%-20s%s\" %\n (instruction_name_color, ins.get_name(), normal_color))\n\n operands = ins.get_operands()\n print_fct(\n \"%s\" %\n \", \".join(m_a.get_vm().colorize_operands(operands, colors)))\n\n op_value = ins.get_op_value()\n if ins == instructions[-1] and i.childs:\n print_fct(\" \")\n\n # packed/sparse-switch\n if (op_value == 0x2b or op_value == 0x2c) and len(i.childs) > 1:\n values = i.get_special_ins(idx).get_values()\n print_fct(\"%s[ D:%s%s \" %\n (branch_false_color, i.childs[0][2].get_name(),\n branch_color))\n print_fct(' '.join(\"%d:%s\" % (\n values[j], i.childs[j + 1][2].get_name()) for j in\n range(0, len(i.childs) - 1)) + \" ]%s\" %\n normal_color)\n else:\n if len(i.childs) == 2:\n print_fct(\"%s[ %s%s \" % (branch_false_color,\n i.childs[0][2].get_name(),\n branch_true_color))\n print_fct(' '.join(\"%s\" % c[2].get_name(\n ) for c in i.childs[1:]) + \" ]%s\" % normal_color)\n else:\n print_fct(\"%s[ \" % branch_color + ' '.join(\n \"%s\" % c[2].get_name() for c in i.childs) + \" ]%s\" %\n normal_color)\n\n idx += ins.get_length()\n nb += 1\n\n print_fct(\"\\n\")\n\n if i.get_exception_analysis():\n print_fct(\"\\t%s%s%s\\n\" %\n (exception_color, i.exception_analysis.show_buff(),\n normal_color))\n\n print_fct(\"\\n\")\n\n\nclass TmpBlock(object):\n\n def __init__(self, name):\n self.name = name\n\n def get_name(self):\n return self.name\n\n\ndef method2json(mx, directed_graph=False):\n if directed_graph:\n return method2json_direct(mx)\n return method2json_undirect(mx)\n\n\ndef method2json_undirect(mx):\n d = {}\n reports = []\n d[\"reports\"] = reports\n\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n\n cblock[\"BasicBlockId\"] = DVMBasicMethodBlock.get_name()\n cblock[\"registers\"] = mx.get_method().get_code().get_registers_size()\n cblock[\"instructions\"] = []\n\n ins_idx = DVMBasicMethodBlock.start\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins[\"idx\"] = ins_idx\n c_ins[\"name\"] = DVMBasicMethodBlockInstruction.get_name()\n c_ins[\"operands\"] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n\n cblock[\"instructions\"].append(c_ins)\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n\n cblock[\"Edge\"] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n cblock[\"Edge\"].append(DVMBasicMethodBlockChild[-1].get_name())\n\n reports.append(cblock)\n\n return json.dumps(d)\n\n\ndef method2json_direct(mx):\n d = {}\n reports = []\n d[\"reports\"] = reports\n\n hooks = {}\n\n l = []\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n for index, DVMBasicMethodBlockChild in enumerate(\n DVMBasicMethodBlock.childs):\n if DVMBasicMethodBlock.get_name(\n ) == DVMBasicMethodBlockChild[-1].get_name():\n\n preblock = TmpBlock(DVMBasicMethodBlock.get_name() + \"-pre\")\n\n cnblock = {}\n cnblock[\"BasicBlockId\"] = DVMBasicMethodBlock.get_name(\n ) + \"-pre\"\n cnblock[\"start\"] = DVMBasicMethodBlock.start\n cnblock[\"notes\"] = []\n\n cnblock[\"Edge\"] = [DVMBasicMethodBlock.get_name()]\n cnblock[\"registers\"] = 0\n cnblock[\"instructions\"] = []\n cnblock[\"info_bb\"] = 0\n\n l.append(cnblock)\n\n for parent in DVMBasicMethodBlock.fathers:\n hooks[parent[-1].get_name()] = []\n hooks[parent[-1].get_name()].append(preblock)\n\n for idx, child in enumerate(parent[-1].childs):\n if child[-1].get_name() == DVMBasicMethodBlock.get_name(\n ):\n hooks[parent[-1].get_name()].append(child[-1])\n\n for DVMBasicMethodBlock in mx.basic_blocks.gets():\n cblock = {}\n\n cblock[\"BasicBlockId\"] = DVMBasicMethodBlock.get_name()\n cblock[\"start\"] = DVMBasicMethodBlock.start\n cblock[\"notes\"] = DVMBasicMethodBlock.get_notes()\n\n cblock[\"registers\"] = mx.get_method().get_code().get_registers_size()\n cblock[\"instructions\"] = []\n\n ins_idx = DVMBasicMethodBlock.start\n last_instru = None\n for DVMBasicMethodBlockInstruction in DVMBasicMethodBlock.get_instructions(\n ):\n c_ins = {}\n c_ins[\"idx\"] = ins_idx\n c_ins[\"name\"] = DVMBasicMethodBlockInstruction.get_name()\n c_ins[\"operands\"] = DVMBasicMethodBlockInstruction.get_operands(\n ins_idx)\n\n c_ins[\"formatted_operands\"\n ] = DVMBasicMethodBlockInstruction.get_formatted_operands()\n\n cblock[\"instructions\"].append(c_ins)\n\n if (DVMBasicMethodBlockInstruction.get_op_value() == 0x2b or\n DVMBasicMethodBlockInstruction.get_op_value() == 0x2c):\n values = DVMBasicMethodBlock.get_special_ins(ins_idx)\n cblock[\"info_next\"] = values.get_values()\n\n ins_idx += DVMBasicMethodBlockInstruction.get_length()\n last_instru = DVMBasicMethodBlockInstruction\n\n cblock[\"info_bb\"] = 0\n if DVMBasicMethodBlock.childs:\n if len(DVMBasicMethodBlock.childs) > 1:\n cblock[\"info_bb\"] = 1\n\n if (last_instru.get_op_value() == 0x2b or\n last_instru.get_op_value() == 0x2c):\n cblock[\"info_bb\"] = 2\n\n cblock[\"Edge\"] = []\n for DVMBasicMethodBlockChild in DVMBasicMethodBlock.childs:\n ok = False\n if DVMBasicMethodBlock.get_name() in hooks:\n if DVMBasicMethodBlockChild[-1] in hooks[\n DVMBasicMethodBlock.get_name()\n ]:\n ok = True\n cblock[\"Edge\"].append(hooks[DVMBasicMethodBlock.get_name(\n )][0].get_name())\n\n if not ok:\n cblock[\"Edge\"].append(DVMBasicMethodBlockChild[-1].get_name())\n\n exception_analysis = DVMBasicMethodBlock.get_exception_analysis()\n if exception_analysis:\n cblock[\"Exceptions\"] = exception_analysis.get()\n\n reports.append(cblock)\n\n reports.extend(l)\n\n return json.dumps(d)\n\n\nclass SV(object):\n\n def __init__(self, size, buff):\n self.__size = size\n self.__value = unpack(self.__size, buff)[0]\n\n def _get(self):\n return pack(self.__size, self.__value)\n\n def __str__(self):\n return \"0x%x\" % self.__value\n\n def __int__(self):\n return self.__value\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = attr\n\n\nclass SVs(object):\n\n def __init__(self, size, ntuple, buff):\n self.__size = size\n\n self.__value = ntuple._make(unpack(self.__size, buff))\n\n def _get(self):\n l = []\n for i in self.__value._fields:\n l.append(getattr(self.__value, i))\n return pack(self.__size, *l)\n\n def _export(self):\n return [x for x in self.__value._fields]\n\n def get_value_buff(self):\n return self._get()\n\n def get_value(self):\n return self.__value\n\n def set_value(self, attr):\n self.__value = self.__value._replace(**attr)\n\n def __str__(self):\n return self.__value.__str__()\n\n\ndef object_to_bytes(obj):\n \"\"\"\n Convert a object to a bytearray or call get_raw() of the object\n if no useful type was found.\n \"\"\"\n if isinstance(obj, str):\n return bytearray(obj, \"UTF-8\")\n elif isinstance(obj, bool):\n return bytearray()\n elif isinstance(obj, int):\n return pack(\"<L\", obj)\n elif obj == None:\n return bytearray()\n elif isinstance(obj, bytearray):\n return obj\n else:\n #print type(obj), obj\n return obj.get_raw()\n\n\nclass MethodBC(object):\n\n def show(self, value):\n getattr(self, \"show_\" + value)()\n\n\nclass BuffHandle(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def size(self):\n return len(self.__buff)\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def readNullString(self, size):\n data = self.read(size)\n return data\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def read_at(self, offset, size):\n return self.__buff[offset:offset + size]\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n\n return buff\n\n def end(self):\n return self.__idx == len(self.__buff)\n\n\nclass Buff(object):\n\n def __init__(self, offset, buff):\n self.offset = offset\n self.buff = buff\n\n self.size = len(buff)\n\n\nclass _Bytecode(object):\n\n def __init__(self, buff):\n self.__buff = bytearray(buff)\n self.__idx = 0\n\n def read(self, size):\n if isinstance(size, SV):\n size = size.value\n\n buff = self.__buff[self.__idx:self.__idx + size]\n self.__idx += size\n\n return buff\n\n def readat(self, off):\n if isinstance(off, SV):\n off = off.value\n\n return self.__buff[off:]\n\n def read_b(self, size):\n return self.__buff[self.__idx:self.__idx + size]\n\n def set_idx(self, idx):\n self.__idx = idx\n\n def get_idx(self):\n return self.__idx\n\n def add_idx(self, idx):\n self.__idx += idx\n\n def register(self, type_register, fct):\n self.__registers[type_register].append(fct)\n\n def get_buff(self):\n return self.__buff\n\n def length_buff(self):\n return len(self.__buff)\n\n def set_buff(self, buff):\n self.__buff = buff\n\n def save(self, filename):\n buff = self._save()\n with open(filename, \"wb\") as fd:\n fd.write(buff)\n\n\ndef FormatClassToJava(input):\n \"\"\"\n Transoform a typical xml format class into java format\n\n :param input: the input class name\n :rtype: string\n \"\"\"\n return \"L\" + input.replace(\".\", \"/\") + \";\"\n\n\ndef FormatClassToPython(input):\n i = input[:-1]\n i = i.replace(\"/\", \"_\")\n i = i.replace(\"$\", \"_\")\n\n return i\n\n\ndef FormatNameToPython(input):\n i = input.replace(\"<\", \"\")\n i = i.replace(\">\", \"\")\n i = i.replace(\"$\", \"_\")\n\n return i\n\n\ndef FormatDescriptorToPython(input):\n i = input.replace(\"/\", \"_\")\n i = i.replace(\";\", \"\")\n i = i.replace(\"[\", \"\")\n i = i.replace(\"(\", \"\")\n i = i.replace(\")\", \"\")\n i = i.replace(\" \", \"\")\n i = i.replace(\"$\", \"\")\n\n return i\n\n\nclass Node(object):\n\n def __init__(self, n, s):\n self.id = n\n self.title = s\n self.children = []\n",
"step-ids": [
35,
62,
63,
65,
71
]
}
|
[
35,
62,
63,
65,
71
] |
# Named Entity Recognition on Medical Data (BIO Tagging)
# Bio-Word2Vec Embeddings Source and Reference: https://github.com/ncbi-nlp/BioWordVec
import os
import re
import torch
import pickle
from torch import nn
from torch import optim
import torch.nn.functional as F
import numpy as np
import random
from DNC.dnc import DNC_Module # Importing DNC Implementation
class task_NER():
def __init__(self):
self.name = "NER_task_bio"
# Controller Params
self.controller_size = 128
self.controller_layers = 1
# Head Params
self.num_read_heads = 1
self.num_write_heads = 1
# Processor Params
self.num_inputs = 200 # Length of Embeddings
self.num_outputs = 7 # Class size
# Memory Params
self.memory_N = 128
self.memory_M = 128
# Training Params
self.num_batches = -1
self.save_batch = 5 # Saving model after every save_batch number of batches
self.batch_size = 10
self.num_epoch = 4
# Optimizer Params
self.adam_lr = 1e-4
self.adam_betas = (0.9, 0.999)
self.adam_eps = 1e-8
# Handles
self.machine = None
self.loss = None
self.optimizer = None
# Class Dictionaries
self.labelDict = None # Label Dictionary - Labels to Index
self.reverseDict = None # Inverse Label Dictionary - Index to Labels
# File Paths
self.concept_path_train = "../medical_data/train_data/concept" # Path to train concept files
self.text_path_train = "../medical_data/train_data/txt" # Path to train text summaries
self.concept_path_test = "../medical_data/test_data/concept" # Path to test concept files
self.text_path_test = "../medical_data/test_data/txt" # Path to test text summaries
self.save_path = "../medical_data/cleaned_files" # Save path
self.embed_dic_path = "../medical_data/embeddings/bio_embedding_dictionary.dat" # Word2Vec embeddings Dictionary path
self.random_vec = "../medical_data/embeddings/random_vec.dat" # Path to random embedding (Used to create new vectors)
self.model_path = "../saved_models/" # Stores Trained Models
# Miscellaneous
self.padding_symbol = np.full((self.num_inputs), 0.01) # Padding symbol embedding
def get_task_name(self):
return self.name
def init_dnc(self):
self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.controller_size, self.controller_layers, self.num_read_heads, self.num_write_heads, self.memory_N, self.memory_M)
def init_loss(self):
self.loss = nn.CrossEntropyLoss(reduction = 'mean') # Cross Entropy Loss -> Softmax Activation + Cross Entropy Loss
def init_optimizer(self):
self.optimizer = optim.Adam(self.machine.parameters(), lr = self.adam_lr, betas = self.adam_betas, eps = self.adam_eps)
def calc_loss(self, Y_pred, Y):
# Y: dim -> (sequence_len x batch_size)
# Y_pred: dim -> (sequence_len x batch_size x num_outputs)
loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)
for i in range(Y_pred.shape[0]):
loss_vec[i] = self.loss(Y_pred[i], Y[i])
return torch.mean(loss_vec)
def calc_cost(self, Y_pred, Y): # Calculates % Cost
# Y: dim -> (sequence_len x batch_size)
# Y_pred: dim -> (sequence_len x batch_size x sequence_width)
'''
Note:
1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.
2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.
'''
# Stores correct class labels for each entity type
class_bag = {}
class_bag['problem'] = 0 # Total labels
class_bag['test'] = 0 # Total labels
class_bag['treatment'] = 0 # Total labels
class_bag['problem_cor'] = 0 # Correctly classified labels
class_bag['test_cor'] = 0 # Correctly classified labels
class_bag['treatment_cor'] = 0 # Correctly classified labels
class_bag['problem_fp'] = 0 # False positive classified labels
class_bag['test_fp'] = 0 # False positive classified labels
class_bag['treatment_fp'] = 0 # False positive classified labels
pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()).reshape(-1) # Predicted class. dim -> (sequence_len*batch_size)
Y = np.transpose(Y.numpy()).reshape(-1) # Converting to NumPy Array and linearizing
cor_pred = (Y == pred_class).astype(np.int) # Comparing Prediction and Labels to find correct predictions
class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size)*100.0 # % Accuracy of Correctly Predicted Words (Not Entities)
# Getting the beginning index of all the entities
beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])
# Getting the end index of all the entities (All the Index previous of 'Other'/'Begin' and not equal to 'Other')
target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1
if target[0] == -1:
target = target[1:]
end_idx = list(target[np.where(Y[target] != 6)[0]])
if Y[-1] != 6:
end_idx.append(Y.size-1)
assert len(beg_idx) == len(end_idx) # Sanity Check
class_bag['total'] = len(beg_idx) # Total number of Entities
# Counting Entities
sum_vec = np.cumsum(cor_pred) # Calculates cumulative summation of predicted vector
for b, e in zip(beg_idx, end_idx):
idx_range = e-b+1 # Entity span
sum_range = sum_vec[e]-sum_vec[b]+1 # Count of entity elements which are predicted correctly
lab = self.reverseDict[Y[b]][2:] # Extracting entity type (Problem, Test or Treatment)
class_bag[lab] = class_bag[lab]+1 # Getting count of each entities
if sum_range == idx_range: # +1 if entity is classified correctly
class_bag[lab+'_cor'] = class_bag[lab+'_cor']+1
# Detecting False Positives
# Getting the beginning index of all the entities in Predicted Results
beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])
for b in beg_idx_p:
if cor_pred[b] == 0:
lab = self.reverseDict[pred_class[b]][2:]
class_bag[lab+'_fp'] = class_bag[lab+'_fp']+1
return class_bag
def print_word(self, token_class): # Prints the Class name from Class number
word = self.reverseDict[token_class]
print(word + "\n")
def clip_grads(self): # Clipping gradients for stability
"""Gradient clipping to the range [10, 10]."""
parameters = list(filter(lambda p: p.grad is not None, self.machine.parameters()))
for p in parameters:
p.grad.data.clamp_(-10, 10)
def initialize_labels(self): # Initializing label dictionaries for Labels->IDX and IDX->Labels
self.labelDict = {} # Label Dictionary - Labels to Index
self.reverseDict = {} # Inverse Label Dictionary - Index to Labels
# Using BIEOS labelling scheme
self.labelDict['b-problem'] = 0 # Problem - Beginning
self.labelDict['i-problem'] = 1 # Problem - Inside
self.labelDict['b-test'] = 2 # Test - Beginning
self.labelDict['i-test'] = 3 # Test - Inside
self.labelDict['b-treatment'] = 4 # Treatment - Beginning
self.labelDict['i-treatment'] = 5 # Treatment - Inside
self.labelDict['o'] = 6 # Outside Token
# Making Inverse Label Dictionary
for k in self.labelDict.keys():
self.reverseDict[self.labelDict[k]] = k
# Saving the diictionaries into a file
self.save_data([self.labelDict, self.reverseDict], os.path.join(self.save_path, "label_dicts_bio.dat"))
def parse_concepts(self, file_path): # Parses the concept file to extract concepts and labels
conceptList = [] # Stores all the Concept in the File
f = open(file_path) # Opening and reading a concept file
content = f.readlines() # Reading all the lines in the concept file
f.close() # Closing the concept file
for x in content: # Reading each line in the concept file
dic = {}
# Cleaning and extracting the entities, labels and their positions in the corresponding medical summaries
x = re.sub('\n', ' ', x)
x = re.sub(r'\ +', ' ', x)
x = x.strip().split('||')
temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]
temp1[0] = temp1[0][3:]
temp1[-3] = temp1[-3][0:-1]
entity = temp1[0:-2]
if len(entity) >= 1:
lab = ['i']*len(entity)
lab[0] = 'b'
lab = [l+"-"+label for l in lab]
else:
print("Data in File: " + file_path + ", not in expected format..")
exit()
noLab = [self.labelDict[l] for l in lab]
sLine, sCol = int(temp1[-2].split(":")[0]), int(temp1[-2].split(":")[1])
eLine, eCol = int(temp1[-1].split(":")[0]), int(temp1[-1].split(":")[1])
'''
# Printing the information
print("------------------------------------------------------------")
print("Entity: " + str(entity))
print("Entity Label: " + label)
print("Labels - BIO form: " + str(lab))
print("Labels Index: " + str(noLab))
print("Start Line: " + str(sLine) + ", Start Column: " + str(sCol))
print("End Line: " + str(eLine) + ", End Column: " + str(eCol))
print("------------------------------------------------------------")
'''
# Storing the information as a dictionary
dic['entity'] = entity # Entity Name (In the form of list of words)
dic['label'] = label # Common Label
dic['BIO_labels'] = lab # List of BIO labels for each word
dic['label_index'] = noLab # Labels in the index form
dic['start_line'] = sLine # Start line of the concept in the corresponding text summaries
dic['start_word_no'] = sCol # Starting word number of the concept in the corresponding start line
dic['end_line'] = eLine # End line of the concept in the corresponding text summaries
dic['end_word_no'] = eCol # Ending word number of the concept in the corresponding end line
# Appending the concept dictionary to the list
conceptList.append(dic)
return conceptList # Returning the all the concepts in the current file in the form of dictionary list
def parse_summary(self, file_path): # Parses the Text summaries
file_lines = [] # Stores the lins of files in the list form
tags = [] # Stores corresponding labels for each word in the file (Default label: 'o' [Outside])
default_label = len(self.labelDict)-1 # default_label is "7" (Corresponding to 'Other' entity)
# counter = 1 # Temporary variable used during print
f = open(file_path) # Opening and reading a concept file
content = f.readlines() # Reading all the lines in the concept file
f.close()
for x in content:
x = re.sub('\n', ' ', x)
x = re.sub(r'\ +', ' ', x)
file_lines.append(x.strip().split(" ")) # Spliting the lines into word list and Appending each of them in the file list
tags.append([default_label]*len(file_lines[-1])) # Assigining the default_label to all the words in a line
'''
# Printing the information
print("------------------------------------------------------------")
print("File Lines No: " + str(counter))
print(file_lines[-1])
print("\nCorresponding labels:")
print(tags[-1])
print("------------------------------------------------------------")
counter += 1
'''
assert len(tags[-1]) == len(file_lines[-1]), "Line length is not matching labels length..." # Sanity Check
return file_lines, tags
def modify_labels(self, conceptList, tags): # Modifies the default labels of each word in text files with the true labels from the concept files
for e in conceptList: # Iterating over all the dictionary elements in the Concept List
if e['start_line'] == e['end_line']: # Checking whether concept is spanning over a single line or multiple line in the summary
tags[e['start_line']-1][e['start_word_no']:e['end_word_no']+1] = e['label_index'][:]
else:
start = e['start_line']
end = e['end_line']
beg = 0
for i in range(start, end+1): # Distributing labels over multiple lines in the text summaries
if i == start:
tags[i-1][e['start_word_no']:] = e['label_index'][0:len(tags[i-1])-e['start_word_no']]
beg = len(tags[i-1])-e['start_word_no']
elif i == end:
tags[i-1][0:e['end_word_no']+1] = e['label_index'][beg:]
else:
tags[i-1][:] = e['label_index'][beg:beg+len(tags[i-1])]
beg = beg+len(tags[i-1])
return tags
def print_data(self, file, file_lines, tags): # Prints the given data
counter = 1
print("\n************ Printing details of the file: " + file + " ************\n")
for x in file_lines:
print("------------------------------------------------------------")
print("File Lines No: " + str(counter))
print(x)
print("\nCorresponding labels:")
print([self.reverseDict[i] for i in tags[counter-1]])
print("\nCorresponding Label Indices:")
print(tags[counter-1])
print("------------------------------------------------------------")
counter += 1
def save_data(self, obj_list, s_path): # Saves the file into the binary file using Pickle
# Note: The 'obj_list' must be a list and none other than that
pickle.dump(tuple(obj_list), open(s_path,'wb'))
def acquire_data(self, task): # Read all the concept files to get concepts and labels, proces them and save them
data = {} # Dictionary to store all the data objects (conceptList, file_lines, tags) each indexed by file name
if task == 'train': # Determining the task type to assign the data path accordingly
t_path = self.text_path_train
c_path = self.concept_path_train
else:
t_path = self.text_path_test
c_path = self.concept_path_test
for f in os.listdir(t_path):
f1 = f.split('.')[0] + ".con"
if os.path.isfile(os.path.join(c_path, f1)):
conceptList = self.parse_concepts(os.path.join(c_path, f1)) # Parsing concepts and labels from the corresponding concept file
file_lines, tags = self.parse_summary(os.path.join(t_path, f)) # Parses the document summaries to get the written notes
tags = self.modify_labels(conceptList, tags) # Modifies he default labels to each word with the true labels from the concept files
data[f1] = [conceptList, file_lines, tags] # Storing each object in dictionary
# self.print_data(f, file_lines, tags) # Printing the details
return data
def structure_data(self, data_dict): # Structures the data in proper trainable form
final_line_list = [] # Stores words of all the files in separate sub-lists
final_tag_list = [] # Stores tags of all the files in separate sub-lists
for k in data_dict.keys(): # Extracting data from each pre-processed file in dictionary
file_lines = data_dict[k][1] # Extracting story
tags = data_dict[k][2] # Extracting corresponding labels
# Creating empty lists
temp1 = []
temp2 = []
# Merging all the lines in file into a single list. Same for corresponding labels
for i in range(len(file_lines)):
temp1.extend(file_lines[i])
temp2.extend(tags[i])
assert len(temp1) == len(temp2), "Word length not matching Label length for story in " + str(k) # Sanity Check
final_line_list.append(temp1)
final_tag_list.append(temp2)
assert len(final_line_list) == len(final_tag_list), "Number of stories not matching number of labels list" # Sanity Check
return final_line_list, final_tag_list
def padding(self, line_list, tag_list): # Pads stories with padding symbol to make them of same length
diff = 0
max_len = 0
outside_class = len(self.labelDict)-1 # Classifying padding symbol as "outside" term
# Calculating Max Summary Length
for i in range(len(line_list)):
if len(line_list[i])>max_len:
max_len = len(line_list[i])
for i in range(len(line_list)):
diff = max_len - len(line_list[i])
line_list[i].extend([self.padding_symbol]*diff)
tag_list[i].extend([outside_class]*diff)
assert (len(line_list[i]) == max_len) and (len(line_list[i]) == len(tag_list[i])), "Padding unsuccessful" # Sanity check
return np.asarray(line_list), np.asarray(tag_list) # Making NumPy array of size (batch_size x story_length x word size) and (batch_size x story_length x 1) respectively
def embed_input(self, line_list): # Converts words to vector embeddings
final_list = [] # Stores embedded words
summary = None # Temp variable
word = None # Temp variable
temp = None # Temp variable
embed_dic = pickle.load(open(self.embed_dic_path, 'rb')) # Loading word2vec dictionary using Pickle
r_embed = pickle.load(open(self.random_vec, 'rb')) # Loading Random embedding
for i in range(len(line_list)): # Iterating over all the summaries
summary = line_list[i]
final_list.append([]) # Reserving space for curent summary
for j in range(len(summary)):
word = summary[j].lower()
if word in embed_dic: # Checking for existence of word in dictionary
final_list[-1].append(embed_dic[word])
else:
temp = r_embed[:] # Copying the values of the list
random.shuffle(temp) # Randomly shuffling the word embedding to make it unique
temp = np.asarray(temp, dtype=np.float32) # Converting to NumPy array
final_list[-1].append(temp)
return final_list
def prepare_data(self, task='train'): # Preparing all the data necessary
line_list, tag_list = None, None
'''
line_list is the list of rows, where each row is a list of all the words in a medical summary
Similar is the case for tag_list, except, it stores labels for each words
'''
if not os.path.exists(self.save_path):
os.mkdir(self.save_path) # Creating a new directory if it does not exist else reading previously saved data
if not os.path.exists(os.path.join(self.save_path, "label_dicts_bio.dat")):
self.initialize_labels() # Initialize label to index dictionaries
else:
self.labelDict, self.reverseDict = pickle.load(open(os.path.join(self.save_path, "label_dicts_bio.dat"), 'rb')) # Loading Label dictionaries
if not os.path.exists(os.path.join(self.save_path, "object_dict_bio_"+str(task)+".dat")):
data_dict = self.acquire_data(task) # Read data from file
line_list, tag_list = self.structure_data(data_dict) # Structures the data into proper form
line_list = self.embed_input(line_list) # Embeds input data (words) into embeddings
self.save_data([line_list, tag_list], os.path.join(self.save_path, "object_dict_bio_"+str(task)+".dat"))
else:
line_list, tag_list = pickle.load(open(os.path.join(self.save_path, "object_dict_bio_"+str(task)+".dat"), 'rb')) # Loading Data dictionary
return line_list, tag_list
def get_data(self, task='train'):
line_list, tag_list = self.prepare_data(task)
# Shuffling stories
story_idx = list(range(0, len(line_list)))
random.shuffle(story_idx)
num_batch = int(len(story_idx)/self.batch_size)
self.num_batches = num_batch
# Out Data
x_out = []
y_out = []
counter = 1
for i in story_idx:
if num_batch<=0:
break
x_out.append(line_list[i])
y_out.append(tag_list[i])
if counter % self.batch_size == 0:
counter = 0
# Padding and converting labels to one hot vectors
x_out_pad, y_out_pad = self.padding(x_out, y_out)
x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=torch.float32) # Converting from (batch_size x story_length x word size) to (story_length x batch_size x word size)
y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=torch.long) # Converting from (batch_size x story_length x 1) to (story_length x batch_size x 1)
x_out = []
y_out = []
num_batch -= 1
yield (self.num_batches - num_batch), x_out_array, y_out_array
counter += 1
def train_model(self):
# Here, the model is optimized using Cross Entropy Loss.
loss_list = []
seq_length = []
last_batch = 0
# self.load_model(1, 99, 13) # Loading Pre-Trained model to train further
for j in range(self.num_epoch):
for batch_num, X, Y in self.get_data(task='train'):
self.optimizer.zero_grad() # Making old gradients zero before calculating the fresh ones
self.machine.initialization(self.batch_size) # Initializing states
Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)
# Feeding the DNC network all the data first and then predicting output
# by giving zero vector as input and previous read states and hidden vector
# and thus training vector this way to give outputs matching the labels
embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1]) # Passing Embeddings from backwards
loss = self.calc_loss(Y_out, Y)
loss.backward()
self.clip_grads()
self.optimizer.step()
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']
tot = class_bag['total']
loss_list += [loss.item()]
seq_length += [Y.shape[0]]
if (batch_num % self.save_batch) == 0:
self.save_model(j, batch_num)
last_batch = batch_num
print("Epoch: " + str(j) + "/" + str(self.num_epoch) + ", Batch: " + str(batch_num) + "/" + str(self.num_batches) + ", Loss: {0:.2f}, ".format(loss.item()) + \
"Batch Accuracy (Entity Prediction): {0:.2f} %, ".format((float(corr)/float(tot))*100.0) + "Batch Accuracy (Word Prediction): {0:.2f} %".format(class_bag['word_pred_acc']))
self.save_model(j, last_batch)
def test_model(self): # Testing the model
correct = 0
total = 0
result_dict = {}
result_dict['total_problem'] = 0 # Total labels in data
result_dict['total_test'] = 0 # Total labels in data
result_dict['total_treatment'] = 0 # Total labels in data
result_dict['correct_problem'] = 0 # Correctly classified labels
result_dict['correct_test'] = 0 # Correctly classified labels
result_dict['correct_treatment'] = 0 # Correctly classified labels
result_dict['false_positive_problem'] = 0 # False Positive labels
result_dict['false_positive_test'] = 0 # False Positive labels
result_dict['false_positive_treatment'] = 0 # False Positive labels
print("\n")
for batch_num, X, Y in self.get_data(task='test'):
self.machine.initialization(self.batch_size) # Initializing states
Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)
# Feeding the DNC network all the data first and then predicting output
# by giving zero vector as input and previous read states and hidden vector
# and thus training vector this way to give outputs matching the labels
embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation
temp_size = X.shape[0]
for i in range(temp_size):
Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1])
class_bag = self.calc_cost(Y_out, Y)
corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']
tot = class_bag['total']
result_dict['total_problem'] = result_dict['total_problem'] + class_bag['problem']
result_dict['total_test'] = result_dict['total_test'] + class_bag['test']
result_dict['total_treatment'] = result_dict['total_treatment'] + class_bag['treatment']
result_dict['correct_problem'] = result_dict['correct_problem'] + class_bag['problem_cor']
result_dict['correct_test'] = result_dict['correct_test'] + class_bag['test_cor']
result_dict['correct_treatment'] = result_dict['correct_treatment'] + class_bag['treatment_cor']
result_dict['false_positive_problem'] = result_dict['false_positive_problem'] + class_bag['problem_fp']
result_dict['false_positive_test'] = result_dict['false_positive_test'] + class_bag['test_fp']
result_dict['false_positive_treatment'] = result_dict['false_positive_treatment'] + class_bag['treatment_fp']
correct += corr
total += tot
print("Test Example " + str(batch_num) + "/" + str(self.num_batches) + " processed, Batch Accuracy: {0:.2f} %, ".format((float(corr)/float(tot))*100.0) + "Batch Accuracy (Word Prediction): {0:.2f} %".format(class_bag['word_pred_acc']))
result_dict['accuracy'] = (float(correct)/float(total))*100.0
result_dict = self.calc_metrics(result_dict)
print("\nOverall Entity Prediction Accuracy: {0:.2f} %".format(result_dict['accuracy']))
return result_dict
def calc_metrics(self, result_dict): # Calculates Certain Metrices
precision_p = float(result_dict['correct_problem'])/float(result_dict['correct_problem'] + result_dict['false_positive_problem']) # Problem Precision
recall_p = float(result_dict['correct_problem'])/float(result_dict['total_problem']) # Problem Recall
precision_ts = float(result_dict['correct_test'])/float(result_dict['correct_test'] + result_dict['false_positive_test']) # Test Precision
recall_ts = float(result_dict['correct_test'])/float(result_dict['total_test']) # Test Recall
precision_tr = float(result_dict['correct_treatment'])/float(result_dict['correct_treatment'] + result_dict['false_positive_treatment']) # Treatment Precision
recall_tr = float(result_dict['correct_treatment'])/float(result_dict['total_treatment']) # Treatment Recall
f_score_p = 2*precision_p*recall_p/(precision_p+recall_p) # Problem F1 Score
f_score_ts = 2*precision_ts*recall_ts/(precision_ts+recall_ts) # Test F1 Score
f_score_tr = 2*precision_tr*recall_tr/(precision_tr+recall_tr) # Treatment F1 Score
result_dict['problem_precision'] = precision_p
result_dict['problem_recall'] = recall_p
result_dict['problem_f1'] = f_score_p
result_dict['test_precision'] = precision_ts
result_dict['test_recall'] = recall_ts
result_dict['test_f1'] = f_score_ts
result_dict['treatment_precision'] = precision_tr
result_dict['treatment_recall'] = recall_tr
result_dict['treatment_f1'] = f_score_tr
result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr)/3.0 # Macro Average F1 Score
# Micro Average F1 Score
correct_sum = result_dict['correct_problem'] + result_dict['correct_test'] + result_dict['correct_treatment']
fp_sum = result_dict['false_positive_problem'] + result_dict['false_positive_test'] + result_dict['false_positive_treatment']
total_sum = result_dict['total_problem'] + result_dict['total_test'] + result_dict['total_treatment']
precision_avg = float(correct_sum)/float(correct_sum + fp_sum)
recall_avg = float(correct_sum)/float(total_sum)
result_dict['micro_average_f1'] = 2*precision_avg*recall_avg/(precision_avg+recall_avg)
return result_dict
def save_model(self, curr_epoch, curr_batch):
# Here 'start_epoch' and 'start_batch' params below are the 'epoch' and 'batch' number from which to start training after next model loading
# Note: It is recommended to start from the 'start_epoch' and not 'start_epoch' + 'start_batch', because batches are formed randomly
if not os.path.exists(os.path.join(self.model_path, self.name)):
os.mkdir(os.path.join(self.model_path, self.name))
state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1, 'start_batch': curr_batch + 1, 'state_dict': self.machine.state_dict(), 'optimizer_dic' : self.optimizer.state_dict()}
filename = self.model_path + self.name + "/" + self.name + "_" + str(curr_epoch) + "_" + str(curr_batch) + "_saved_model.pth.tar"
torch.save(state_dic, filename)
def load_model(self, option, epoch, batch):
path = self.model_path + self.name + "/" + self.name + "_" + str(epoch) + "_" + str(batch) + "_saved_model.pth.tar"
if option == 1: # Loading for training
checkpoint = torch.load(path)
self.machine.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_dic'])
else: # Loading for testing
checkpoint = torch.load(path)
self.machine.load_state_dict(checkpoint['state_dict'])
self.machine.eval()
|
normal
|
{
"blob_id": "eb99def75404bc3b674bcb633714009149f2d50d",
"index": 5097,
"step-1": "<mask token>\n\n\nclass task_NER:\n\n def __init__(self):\n self.name = 'NER_task_bio'\n self.controller_size = 128\n self.controller_layers = 1\n self.num_read_heads = 1\n self.num_write_heads = 1\n self.num_inputs = 200\n self.num_outputs = 7\n self.memory_N = 128\n self.memory_M = 128\n self.num_batches = -1\n self.save_batch = 5\n self.batch_size = 10\n self.num_epoch = 4\n self.adam_lr = 0.0001\n self.adam_betas = 0.9, 0.999\n self.adam_eps = 1e-08\n self.machine = None\n self.loss = None\n self.optimizer = None\n self.labelDict = None\n self.reverseDict = None\n self.concept_path_train = '../medical_data/train_data/concept'\n self.text_path_train = '../medical_data/train_data/txt'\n self.concept_path_test = '../medical_data/test_data/concept'\n self.text_path_test = '../medical_data/test_data/txt'\n self.save_path = '../medical_data/cleaned_files'\n self.embed_dic_path = (\n '../medical_data/embeddings/bio_embedding_dictionary.dat')\n self.random_vec = '../medical_data/embeddings/random_vec.dat'\n self.model_path = '../saved_models/'\n self.padding_symbol = np.full(self.num_inputs, 0.01)\n\n def get_task_name(self):\n return self.name\n <mask token>\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction='mean')\n <mask token>\n\n def calc_loss(self, Y_pred, Y):\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n <mask token>\n\n def print_word(self, token_class):\n word = self.reverseDict[token_class]\n print(word + '\\n')\n <mask token>\n\n def initialize_labels(self):\n self.labelDict = {}\n self.reverseDict = {}\n self.labelDict['b-problem'] = 0\n self.labelDict['i-problem'] = 1\n self.labelDict['b-test'] = 2\n self.labelDict['i-test'] = 3\n self.labelDict['b-treatment'] = 4\n self.labelDict['i-treatment'] = 5\n self.labelDict['o'] = 6\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n self.save_data([self.labelDict, self.reverseDict], os.path.join(\n self.save_path, 'label_dicts_bio.dat'))\n <mask token>\n <mask token>\n\n def modify_labels(self, conceptList, tags):\n for e in conceptList:\n if e['start_line'] == e['end_line']:\n tags[e['start_line'] - 1][e['start_word_no']:e[\n 'end_word_no'] + 1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end + 1):\n if i == start:\n tags[i - 1][e['start_word_no']:] = e['label_index'][\n 0:len(tags[i - 1]) - e['start_word_no']]\n beg = len(tags[i - 1]) - e['start_word_no']\n elif i == end:\n tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][\n beg:]\n else:\n tags[i - 1][:] = e['label_index'][beg:beg + len(\n tags[i - 1])]\n beg = beg + len(tags[i - 1])\n return tags\n <mask token>\n\n def save_data(self, obj_list, s_path):\n pickle.dump(tuple(obj_list), open(s_path, 'wb'))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n num_batch = int(len(story_idx) / self.batch_size)\n self.num_batches = num_batch\n x_out = []\n y_out = []\n counter = 1\n for i in story_idx:\n if num_batch <= 0:\n break\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n if counter % self.batch_size == 0:\n counter = 0\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=\n torch.float32)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=\n torch.long)\n x_out = []\n y_out = []\n num_batch -= 1\n yield self.num_batches - num_batch, x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n loss_list = []\n seq_length = []\n last_batch = 0\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad()\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.\n num_outputs), dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[\n temp_size - i - 1])\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n if batch_num % self.save_batch == 0:\n self.save_model(j, batch_num)\n last_batch = batch_num\n print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +\n ', Batch: ' + str(batch_num) + '/' + str(self.\n num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +\n 'Batch Accuracy (Entity Prediction): {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self):\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0\n result_dict['total_test'] = 0\n result_dict['total_treatment'] = 0\n result_dict['correct_problem'] = 0\n result_dict['correct_test'] = 0\n result_dict['correct_treatment'] = 0\n result_dict['false_positive_problem'] = 0\n result_dict['false_positive_test'] = 0\n result_dict['false_positive_treatment'] = 0\n print('\\n')\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),\n dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -\n i - 1])\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n result_dict['total_problem'] = result_dict['total_problem'\n ] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag[\n 'test']\n result_dict['total_treatment'] = result_dict['total_treatment'\n ] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'\n ] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'\n ] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'\n ] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict[\n 'false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict[\n 'false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict[\n 'false_positive_treatment'] + class_bag['treatment_fp']\n correct += corr\n total += tot\n print('Test Example ' + str(batch_num) + '/' + str(self.\n num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n result_dict['accuracy'] = float(correct) / float(total) * 100.0\n result_dict = self.calc_metrics(result_dict)\n print('\\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(\n result_dict['accuracy']))\n return result_dict\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass task_NER:\n\n def __init__(self):\n self.name = 'NER_task_bio'\n self.controller_size = 128\n self.controller_layers = 1\n self.num_read_heads = 1\n self.num_write_heads = 1\n self.num_inputs = 200\n self.num_outputs = 7\n self.memory_N = 128\n self.memory_M = 128\n self.num_batches = -1\n self.save_batch = 5\n self.batch_size = 10\n self.num_epoch = 4\n self.adam_lr = 0.0001\n self.adam_betas = 0.9, 0.999\n self.adam_eps = 1e-08\n self.machine = None\n self.loss = None\n self.optimizer = None\n self.labelDict = None\n self.reverseDict = None\n self.concept_path_train = '../medical_data/train_data/concept'\n self.text_path_train = '../medical_data/train_data/txt'\n self.concept_path_test = '../medical_data/test_data/concept'\n self.text_path_test = '../medical_data/test_data/txt'\n self.save_path = '../medical_data/cleaned_files'\n self.embed_dic_path = (\n '../medical_data/embeddings/bio_embedding_dictionary.dat')\n self.random_vec = '../medical_data/embeddings/random_vec.dat'\n self.model_path = '../saved_models/'\n self.padding_symbol = np.full(self.num_inputs, 0.01)\n\n def get_task_name(self):\n return self.name\n <mask token>\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction='mean')\n\n def init_optimizer(self):\n self.optimizer = optim.Adam(self.machine.parameters(), lr=self.\n adam_lr, betas=self.adam_betas, eps=self.adam_eps)\n\n def calc_loss(self, Y_pred, Y):\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n <mask token>\n\n def print_word(self, token_class):\n word = self.reverseDict[token_class]\n print(word + '\\n')\n\n def clip_grads(self):\n \"\"\"Gradient clipping to the range [10, 10].\"\"\"\n parameters = list(filter(lambda p: p.grad is not None, self.machine\n .parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)\n\n def initialize_labels(self):\n self.labelDict = {}\n self.reverseDict = {}\n self.labelDict['b-problem'] = 0\n self.labelDict['i-problem'] = 1\n self.labelDict['b-test'] = 2\n self.labelDict['i-test'] = 3\n self.labelDict['b-treatment'] = 4\n self.labelDict['i-treatment'] = 5\n self.labelDict['o'] = 6\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n self.save_data([self.labelDict, self.reverseDict], os.path.join(\n self.save_path, 'label_dicts_bio.dat'))\n\n def parse_concepts(self, file_path):\n conceptList = []\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n dic = {}\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n x = x.strip().split('||')\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n if len(entity) >= 1:\n lab = ['i'] * len(entity)\n lab[0] = 'b'\n lab = [(l + '-' + label) for l in lab]\n else:\n print('Data in File: ' + file_path +\n ', not in expected format..')\n exit()\n noLab = [self.labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(':')[0]), int(temp1[-2].split\n (':')[1])\n eLine, eCol = int(temp1[-1].split(':')[0]), int(temp1[-1].split\n (':')[1])\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIO form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n \"\"\"\n dic['entity'] = entity\n dic['label'] = label\n dic['BIO_labels'] = lab\n dic['label_index'] = noLab\n dic['start_line'] = sLine\n dic['start_word_no'] = sCol\n dic['end_line'] = eLine\n dic['end_word_no'] = eCol\n conceptList.append(dic)\n return conceptList\n\n def parse_summary(self, file_path):\n file_lines = []\n tags = []\n default_label = len(self.labelDict) - 1\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n file_lines.append(x.strip().split(' '))\n tags.append([default_label] * len(file_lines[-1]))\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n \"\"\"\n assert len(tags[-1]) == len(file_lines[-1]\n ), 'Line length is not matching labels length...'\n return file_lines, tags\n\n def modify_labels(self, conceptList, tags):\n for e in conceptList:\n if e['start_line'] == e['end_line']:\n tags[e['start_line'] - 1][e['start_word_no']:e[\n 'end_word_no'] + 1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end + 1):\n if i == start:\n tags[i - 1][e['start_word_no']:] = e['label_index'][\n 0:len(tags[i - 1]) - e['start_word_no']]\n beg = len(tags[i - 1]) - e['start_word_no']\n elif i == end:\n tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][\n beg:]\n else:\n tags[i - 1][:] = e['label_index'][beg:beg + len(\n tags[i - 1])]\n beg = beg + len(tags[i - 1])\n return tags\n\n def print_data(self, file, file_lines, tags):\n counter = 1\n print('\\n************ Printing details of the file: ' + file +\n ' ************\\n')\n for x in file_lines:\n print(\n '------------------------------------------------------------')\n print('File Lines No: ' + str(counter))\n print(x)\n print('\\nCorresponding labels:')\n print([self.reverseDict[i] for i in tags[counter - 1]])\n print('\\nCorresponding Label Indices:')\n print(tags[counter - 1])\n print(\n '------------------------------------------------------------')\n counter += 1\n\n def save_data(self, obj_list, s_path):\n pickle.dump(tuple(obj_list), open(s_path, 'wb'))\n\n def acquire_data(self, task):\n data = {}\n if task == 'train':\n t_path = self.text_path_train\n c_path = self.concept_path_train\n else:\n t_path = self.text_path_test\n c_path = self.concept_path_test\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + '.con'\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = self.parse_concepts(os.path.join(c_path, f1))\n file_lines, tags = self.parse_summary(os.path.join(t_path, f))\n tags = self.modify_labels(conceptList, tags)\n data[f1] = [conceptList, file_lines, tags]\n return data\n <mask token>\n <mask token>\n <mask token>\n\n def prepare_data(self, task='train'):\n line_list, tag_list = None, None\n \"\"\"\n line_list is the list of rows, where each row is a list of all the words in a medical summary\n Similar is the case for tag_list, except, it stores labels for each words\n \"\"\"\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path)\n if not os.path.exists(os.path.join(self.save_path,\n 'label_dicts_bio.dat')):\n self.initialize_labels()\n else:\n self.labelDict, self.reverseDict = pickle.load(open(os.path.\n join(self.save_path, 'label_dicts_bio.dat'), 'rb'))\n if not os.path.exists(os.path.join(self.save_path, \n 'object_dict_bio_' + str(task) + '.dat')):\n data_dict = self.acquire_data(task)\n line_list, tag_list = self.structure_data(data_dict)\n line_list = self.embed_input(line_list)\n self.save_data([line_list, tag_list], os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'))\n else:\n line_list, tag_list = pickle.load(open(os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'), 'rb'))\n return line_list, tag_list\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n num_batch = int(len(story_idx) / self.batch_size)\n self.num_batches = num_batch\n x_out = []\n y_out = []\n counter = 1\n for i in story_idx:\n if num_batch <= 0:\n break\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n if counter % self.batch_size == 0:\n counter = 0\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=\n torch.float32)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=\n torch.long)\n x_out = []\n y_out = []\n num_batch -= 1\n yield self.num_batches - num_batch, x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n loss_list = []\n seq_length = []\n last_batch = 0\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad()\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.\n num_outputs), dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[\n temp_size - i - 1])\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n if batch_num % self.save_batch == 0:\n self.save_model(j, batch_num)\n last_batch = batch_num\n print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +\n ', Batch: ' + str(batch_num) + '/' + str(self.\n num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +\n 'Batch Accuracy (Entity Prediction): {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self):\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0\n result_dict['total_test'] = 0\n result_dict['total_treatment'] = 0\n result_dict['correct_problem'] = 0\n result_dict['correct_test'] = 0\n result_dict['correct_treatment'] = 0\n result_dict['false_positive_problem'] = 0\n result_dict['false_positive_test'] = 0\n result_dict['false_positive_treatment'] = 0\n print('\\n')\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),\n dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -\n i - 1])\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n result_dict['total_problem'] = result_dict['total_problem'\n ] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag[\n 'test']\n result_dict['total_treatment'] = result_dict['total_treatment'\n ] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'\n ] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'\n ] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'\n ] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict[\n 'false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict[\n 'false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict[\n 'false_positive_treatment'] + class_bag['treatment_fp']\n correct += corr\n total += tot\n print('Test Example ' + str(batch_num) + '/' + str(self.\n num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n result_dict['accuracy'] = float(correct) / float(total) * 100.0\n result_dict = self.calc_metrics(result_dict)\n print('\\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(\n result_dict['accuracy']))\n return result_dict\n <mask token>\n <mask token>\n\n def load_model(self, option, epoch, batch):\n path = self.model_path + self.name + '/' + self.name + '_' + str(epoch\n ) + '_' + str(batch) + '_saved_model.pth.tar'\n if option == 1:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_dic'])\n else:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.machine.eval()\n",
"step-3": "<mask token>\n\n\nclass task_NER:\n\n def __init__(self):\n self.name = 'NER_task_bio'\n self.controller_size = 128\n self.controller_layers = 1\n self.num_read_heads = 1\n self.num_write_heads = 1\n self.num_inputs = 200\n self.num_outputs = 7\n self.memory_N = 128\n self.memory_M = 128\n self.num_batches = -1\n self.save_batch = 5\n self.batch_size = 10\n self.num_epoch = 4\n self.adam_lr = 0.0001\n self.adam_betas = 0.9, 0.999\n self.adam_eps = 1e-08\n self.machine = None\n self.loss = None\n self.optimizer = None\n self.labelDict = None\n self.reverseDict = None\n self.concept_path_train = '../medical_data/train_data/concept'\n self.text_path_train = '../medical_data/train_data/txt'\n self.concept_path_test = '../medical_data/test_data/concept'\n self.text_path_test = '../medical_data/test_data/txt'\n self.save_path = '../medical_data/cleaned_files'\n self.embed_dic_path = (\n '../medical_data/embeddings/bio_embedding_dictionary.dat')\n self.random_vec = '../medical_data/embeddings/random_vec.dat'\n self.model_path = '../saved_models/'\n self.padding_symbol = np.full(self.num_inputs, 0.01)\n\n def get_task_name(self):\n return self.name\n\n def init_dnc(self):\n self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.\n controller_size, self.controller_layers, self.num_read_heads,\n self.num_write_heads, self.memory_N, self.memory_M)\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction='mean')\n\n def init_optimizer(self):\n self.optimizer = optim.Adam(self.machine.parameters(), lr=self.\n adam_lr, betas=self.adam_betas, eps=self.adam_eps)\n\n def calc_loss(self, Y_pred, Y):\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n\n def calc_cost(self, Y_pred, Y):\n \"\"\"\n Note: \n 1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.\n 2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.\n \"\"\"\n class_bag = {}\n class_bag['problem'] = 0\n class_bag['test'] = 0\n class_bag['treatment'] = 0\n class_bag['problem_cor'] = 0\n class_bag['test_cor'] = 0\n class_bag['treatment_cor'] = 0\n class_bag['problem_fp'] = 0\n class_bag['test_fp'] = 0\n class_bag['treatment_fp'] = 0\n pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()\n ).reshape(-1)\n Y = np.transpose(Y.numpy()).reshape(-1)\n cor_pred = (Y == pred_class).astype(np.int)\n class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size\n ) * 100.0\n beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])\n target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1\n if target[0] == -1:\n target = target[1:]\n end_idx = list(target[np.where(Y[target] != 6)[0]])\n if Y[-1] != 6:\n end_idx.append(Y.size - 1)\n assert len(beg_idx) == len(end_idx)\n class_bag['total'] = len(beg_idx)\n sum_vec = np.cumsum(cor_pred)\n for b, e in zip(beg_idx, end_idx):\n idx_range = e - b + 1\n sum_range = sum_vec[e] - sum_vec[b] + 1\n lab = self.reverseDict[Y[b]][2:]\n class_bag[lab] = class_bag[lab] + 1\n if sum_range == idx_range:\n class_bag[lab + '_cor'] = class_bag[lab + '_cor'] + 1\n beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])\n for b in beg_idx_p:\n if cor_pred[b] == 0:\n lab = self.reverseDict[pred_class[b]][2:]\n class_bag[lab + '_fp'] = class_bag[lab + '_fp'] + 1\n return class_bag\n\n def print_word(self, token_class):\n word = self.reverseDict[token_class]\n print(word + '\\n')\n\n def clip_grads(self):\n \"\"\"Gradient clipping to the range [10, 10].\"\"\"\n parameters = list(filter(lambda p: p.grad is not None, self.machine\n .parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)\n\n def initialize_labels(self):\n self.labelDict = {}\n self.reverseDict = {}\n self.labelDict['b-problem'] = 0\n self.labelDict['i-problem'] = 1\n self.labelDict['b-test'] = 2\n self.labelDict['i-test'] = 3\n self.labelDict['b-treatment'] = 4\n self.labelDict['i-treatment'] = 5\n self.labelDict['o'] = 6\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n self.save_data([self.labelDict, self.reverseDict], os.path.join(\n self.save_path, 'label_dicts_bio.dat'))\n\n def parse_concepts(self, file_path):\n conceptList = []\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n dic = {}\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n x = x.strip().split('||')\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n if len(entity) >= 1:\n lab = ['i'] * len(entity)\n lab[0] = 'b'\n lab = [(l + '-' + label) for l in lab]\n else:\n print('Data in File: ' + file_path +\n ', not in expected format..')\n exit()\n noLab = [self.labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(':')[0]), int(temp1[-2].split\n (':')[1])\n eLine, eCol = int(temp1[-1].split(':')[0]), int(temp1[-1].split\n (':')[1])\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIO form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n \"\"\"\n dic['entity'] = entity\n dic['label'] = label\n dic['BIO_labels'] = lab\n dic['label_index'] = noLab\n dic['start_line'] = sLine\n dic['start_word_no'] = sCol\n dic['end_line'] = eLine\n dic['end_word_no'] = eCol\n conceptList.append(dic)\n return conceptList\n\n def parse_summary(self, file_path):\n file_lines = []\n tags = []\n default_label = len(self.labelDict) - 1\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n file_lines.append(x.strip().split(' '))\n tags.append([default_label] * len(file_lines[-1]))\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n \"\"\"\n assert len(tags[-1]) == len(file_lines[-1]\n ), 'Line length is not matching labels length...'\n return file_lines, tags\n\n def modify_labels(self, conceptList, tags):\n for e in conceptList:\n if e['start_line'] == e['end_line']:\n tags[e['start_line'] - 1][e['start_word_no']:e[\n 'end_word_no'] + 1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end + 1):\n if i == start:\n tags[i - 1][e['start_word_no']:] = e['label_index'][\n 0:len(tags[i - 1]) - e['start_word_no']]\n beg = len(tags[i - 1]) - e['start_word_no']\n elif i == end:\n tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][\n beg:]\n else:\n tags[i - 1][:] = e['label_index'][beg:beg + len(\n tags[i - 1])]\n beg = beg + len(tags[i - 1])\n return tags\n\n def print_data(self, file, file_lines, tags):\n counter = 1\n print('\\n************ Printing details of the file: ' + file +\n ' ************\\n')\n for x in file_lines:\n print(\n '------------------------------------------------------------')\n print('File Lines No: ' + str(counter))\n print(x)\n print('\\nCorresponding labels:')\n print([self.reverseDict[i] for i in tags[counter - 1]])\n print('\\nCorresponding Label Indices:')\n print(tags[counter - 1])\n print(\n '------------------------------------------------------------')\n counter += 1\n\n def save_data(self, obj_list, s_path):\n pickle.dump(tuple(obj_list), open(s_path, 'wb'))\n\n def acquire_data(self, task):\n data = {}\n if task == 'train':\n t_path = self.text_path_train\n c_path = self.concept_path_train\n else:\n t_path = self.text_path_test\n c_path = self.concept_path_test\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + '.con'\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = self.parse_concepts(os.path.join(c_path, f1))\n file_lines, tags = self.parse_summary(os.path.join(t_path, f))\n tags = self.modify_labels(conceptList, tags)\n data[f1] = [conceptList, file_lines, tags]\n return data\n\n def structure_data(self, data_dict):\n final_line_list = []\n final_tag_list = []\n for k in data_dict.keys():\n file_lines = data_dict[k][1]\n tags = data_dict[k][2]\n temp1 = []\n temp2 = []\n for i in range(len(file_lines)):\n temp1.extend(file_lines[i])\n temp2.extend(tags[i])\n assert len(temp1) == len(temp2\n ), 'Word length not matching Label length for story in ' + str(\n k)\n final_line_list.append(temp1)\n final_tag_list.append(temp2)\n assert len(final_line_list) == len(final_tag_list\n ), 'Number of stories not matching number of labels list'\n return final_line_list, final_tag_list\n\n def padding(self, line_list, tag_list):\n diff = 0\n max_len = 0\n outside_class = len(self.labelDict) - 1\n for i in range(len(line_list)):\n if len(line_list[i]) > max_len:\n max_len = len(line_list[i])\n for i in range(len(line_list)):\n diff = max_len - len(line_list[i])\n line_list[i].extend([self.padding_symbol] * diff)\n tag_list[i].extend([outside_class] * diff)\n assert len(line_list[i]) == max_len and len(line_list[i]) == len(\n tag_list[i]), 'Padding unsuccessful'\n return np.asarray(line_list), np.asarray(tag_list)\n <mask token>\n\n def prepare_data(self, task='train'):\n line_list, tag_list = None, None\n \"\"\"\n line_list is the list of rows, where each row is a list of all the words in a medical summary\n Similar is the case for tag_list, except, it stores labels for each words\n \"\"\"\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path)\n if not os.path.exists(os.path.join(self.save_path,\n 'label_dicts_bio.dat')):\n self.initialize_labels()\n else:\n self.labelDict, self.reverseDict = pickle.load(open(os.path.\n join(self.save_path, 'label_dicts_bio.dat'), 'rb'))\n if not os.path.exists(os.path.join(self.save_path, \n 'object_dict_bio_' + str(task) + '.dat')):\n data_dict = self.acquire_data(task)\n line_list, tag_list = self.structure_data(data_dict)\n line_list = self.embed_input(line_list)\n self.save_data([line_list, tag_list], os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'))\n else:\n line_list, tag_list = pickle.load(open(os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'), 'rb'))\n return line_list, tag_list\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n num_batch = int(len(story_idx) / self.batch_size)\n self.num_batches = num_batch\n x_out = []\n y_out = []\n counter = 1\n for i in story_idx:\n if num_batch <= 0:\n break\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n if counter % self.batch_size == 0:\n counter = 0\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=\n torch.float32)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=\n torch.long)\n x_out = []\n y_out = []\n num_batch -= 1\n yield self.num_batches - num_batch, x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n loss_list = []\n seq_length = []\n last_batch = 0\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad()\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.\n num_outputs), dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[\n temp_size - i - 1])\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n if batch_num % self.save_batch == 0:\n self.save_model(j, batch_num)\n last_batch = batch_num\n print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +\n ', Batch: ' + str(batch_num) + '/' + str(self.\n num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +\n 'Batch Accuracy (Entity Prediction): {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self):\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0\n result_dict['total_test'] = 0\n result_dict['total_treatment'] = 0\n result_dict['correct_problem'] = 0\n result_dict['correct_test'] = 0\n result_dict['correct_treatment'] = 0\n result_dict['false_positive_problem'] = 0\n result_dict['false_positive_test'] = 0\n result_dict['false_positive_treatment'] = 0\n print('\\n')\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),\n dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -\n i - 1])\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n result_dict['total_problem'] = result_dict['total_problem'\n ] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag[\n 'test']\n result_dict['total_treatment'] = result_dict['total_treatment'\n ] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'\n ] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'\n ] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'\n ] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict[\n 'false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict[\n 'false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict[\n 'false_positive_treatment'] + class_bag['treatment_fp']\n correct += corr\n total += tot\n print('Test Example ' + str(batch_num) + '/' + str(self.\n num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n result_dict['accuracy'] = float(correct) / float(total) * 100.0\n result_dict = self.calc_metrics(result_dict)\n print('\\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(\n result_dict['accuracy']))\n return result_dict\n\n def calc_metrics(self, result_dict):\n precision_p = float(result_dict['correct_problem']) / float(\n result_dict['correct_problem'] + result_dict[\n 'false_positive_problem'])\n recall_p = float(result_dict['correct_problem']) / float(result_dict\n ['total_problem'])\n precision_ts = float(result_dict['correct_test']) / float(\n result_dict['correct_test'] + result_dict['false_positive_test'])\n recall_ts = float(result_dict['correct_test']) / float(result_dict[\n 'total_test'])\n precision_tr = float(result_dict['correct_treatment']) / float(\n result_dict['correct_treatment'] + result_dict[\n 'false_positive_treatment'])\n recall_tr = float(result_dict['correct_treatment']) / float(result_dict\n ['total_treatment'])\n f_score_p = 2 * precision_p * recall_p / (precision_p + recall_p)\n f_score_ts = 2 * precision_ts * recall_ts / (precision_ts + recall_ts)\n f_score_tr = 2 * precision_tr * recall_tr / (precision_tr + recall_tr)\n result_dict['problem_precision'] = precision_p\n result_dict['problem_recall'] = recall_p\n result_dict['problem_f1'] = f_score_p\n result_dict['test_precision'] = precision_ts\n result_dict['test_recall'] = recall_ts\n result_dict['test_f1'] = f_score_ts\n result_dict['treatment_precision'] = precision_tr\n result_dict['treatment_recall'] = recall_tr\n result_dict['treatment_f1'] = f_score_tr\n result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr\n ) / 3.0\n correct_sum = result_dict['correct_problem'] + result_dict[\n 'correct_test'] + result_dict['correct_treatment']\n fp_sum = result_dict['false_positive_problem'] + result_dict[\n 'false_positive_test'] + result_dict['false_positive_treatment']\n total_sum = result_dict['total_problem'] + result_dict['total_test'\n ] + result_dict['total_treatment']\n precision_avg = float(correct_sum) / float(correct_sum + fp_sum)\n recall_avg = float(correct_sum) / float(total_sum)\n result_dict['micro_average_f1'] = 2 * precision_avg * recall_avg / (\n precision_avg + recall_avg)\n return result_dict\n\n def save_model(self, curr_epoch, curr_batch):\n if not os.path.exists(os.path.join(self.model_path, self.name)):\n os.mkdir(os.path.join(self.model_path, self.name))\n state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1,\n 'start_batch': curr_batch + 1, 'state_dict': self.machine.\n state_dict(), 'optimizer_dic': self.optimizer.state_dict()}\n filename = self.model_path + self.name + '/' + self.name + '_' + str(\n curr_epoch) + '_' + str(curr_batch) + '_saved_model.pth.tar'\n torch.save(state_dic, filename)\n\n def load_model(self, option, epoch, batch):\n path = self.model_path + self.name + '/' + self.name + '_' + str(epoch\n ) + '_' + str(batch) + '_saved_model.pth.tar'\n if option == 1:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_dic'])\n else:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.machine.eval()\n",
"step-4": "<mask token>\n\n\nclass task_NER:\n\n def __init__(self):\n self.name = 'NER_task_bio'\n self.controller_size = 128\n self.controller_layers = 1\n self.num_read_heads = 1\n self.num_write_heads = 1\n self.num_inputs = 200\n self.num_outputs = 7\n self.memory_N = 128\n self.memory_M = 128\n self.num_batches = -1\n self.save_batch = 5\n self.batch_size = 10\n self.num_epoch = 4\n self.adam_lr = 0.0001\n self.adam_betas = 0.9, 0.999\n self.adam_eps = 1e-08\n self.machine = None\n self.loss = None\n self.optimizer = None\n self.labelDict = None\n self.reverseDict = None\n self.concept_path_train = '../medical_data/train_data/concept'\n self.text_path_train = '../medical_data/train_data/txt'\n self.concept_path_test = '../medical_data/test_data/concept'\n self.text_path_test = '../medical_data/test_data/txt'\n self.save_path = '../medical_data/cleaned_files'\n self.embed_dic_path = (\n '../medical_data/embeddings/bio_embedding_dictionary.dat')\n self.random_vec = '../medical_data/embeddings/random_vec.dat'\n self.model_path = '../saved_models/'\n self.padding_symbol = np.full(self.num_inputs, 0.01)\n\n def get_task_name(self):\n return self.name\n\n def init_dnc(self):\n self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.\n controller_size, self.controller_layers, self.num_read_heads,\n self.num_write_heads, self.memory_N, self.memory_M)\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction='mean')\n\n def init_optimizer(self):\n self.optimizer = optim.Adam(self.machine.parameters(), lr=self.\n adam_lr, betas=self.adam_betas, eps=self.adam_eps)\n\n def calc_loss(self, Y_pred, Y):\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n\n def calc_cost(self, Y_pred, Y):\n \"\"\"\n Note: \n 1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.\n 2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.\n \"\"\"\n class_bag = {}\n class_bag['problem'] = 0\n class_bag['test'] = 0\n class_bag['treatment'] = 0\n class_bag['problem_cor'] = 0\n class_bag['test_cor'] = 0\n class_bag['treatment_cor'] = 0\n class_bag['problem_fp'] = 0\n class_bag['test_fp'] = 0\n class_bag['treatment_fp'] = 0\n pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()\n ).reshape(-1)\n Y = np.transpose(Y.numpy()).reshape(-1)\n cor_pred = (Y == pred_class).astype(np.int)\n class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size\n ) * 100.0\n beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])\n target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1\n if target[0] == -1:\n target = target[1:]\n end_idx = list(target[np.where(Y[target] != 6)[0]])\n if Y[-1] != 6:\n end_idx.append(Y.size - 1)\n assert len(beg_idx) == len(end_idx)\n class_bag['total'] = len(beg_idx)\n sum_vec = np.cumsum(cor_pred)\n for b, e in zip(beg_idx, end_idx):\n idx_range = e - b + 1\n sum_range = sum_vec[e] - sum_vec[b] + 1\n lab = self.reverseDict[Y[b]][2:]\n class_bag[lab] = class_bag[lab] + 1\n if sum_range == idx_range:\n class_bag[lab + '_cor'] = class_bag[lab + '_cor'] + 1\n beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])\n for b in beg_idx_p:\n if cor_pred[b] == 0:\n lab = self.reverseDict[pred_class[b]][2:]\n class_bag[lab + '_fp'] = class_bag[lab + '_fp'] + 1\n return class_bag\n\n def print_word(self, token_class):\n word = self.reverseDict[token_class]\n print(word + '\\n')\n\n def clip_grads(self):\n \"\"\"Gradient clipping to the range [10, 10].\"\"\"\n parameters = list(filter(lambda p: p.grad is not None, self.machine\n .parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)\n\n def initialize_labels(self):\n self.labelDict = {}\n self.reverseDict = {}\n self.labelDict['b-problem'] = 0\n self.labelDict['i-problem'] = 1\n self.labelDict['b-test'] = 2\n self.labelDict['i-test'] = 3\n self.labelDict['b-treatment'] = 4\n self.labelDict['i-treatment'] = 5\n self.labelDict['o'] = 6\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n self.save_data([self.labelDict, self.reverseDict], os.path.join(\n self.save_path, 'label_dicts_bio.dat'))\n\n def parse_concepts(self, file_path):\n conceptList = []\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n dic = {}\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n x = x.strip().split('||')\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n if len(entity) >= 1:\n lab = ['i'] * len(entity)\n lab[0] = 'b'\n lab = [(l + '-' + label) for l in lab]\n else:\n print('Data in File: ' + file_path +\n ', not in expected format..')\n exit()\n noLab = [self.labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(':')[0]), int(temp1[-2].split\n (':')[1])\n eLine, eCol = int(temp1[-1].split(':')[0]), int(temp1[-1].split\n (':')[1])\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIO form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n \"\"\"\n dic['entity'] = entity\n dic['label'] = label\n dic['BIO_labels'] = lab\n dic['label_index'] = noLab\n dic['start_line'] = sLine\n dic['start_word_no'] = sCol\n dic['end_line'] = eLine\n dic['end_word_no'] = eCol\n conceptList.append(dic)\n return conceptList\n\n def parse_summary(self, file_path):\n file_lines = []\n tags = []\n default_label = len(self.labelDict) - 1\n f = open(file_path)\n content = f.readlines()\n f.close()\n for x in content:\n x = re.sub('\\n', ' ', x)\n x = re.sub('\\\\ +', ' ', x)\n file_lines.append(x.strip().split(' '))\n tags.append([default_label] * len(file_lines[-1]))\n \"\"\"\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n \"\"\"\n assert len(tags[-1]) == len(file_lines[-1]\n ), 'Line length is not matching labels length...'\n return file_lines, tags\n\n def modify_labels(self, conceptList, tags):\n for e in conceptList:\n if e['start_line'] == e['end_line']:\n tags[e['start_line'] - 1][e['start_word_no']:e[\n 'end_word_no'] + 1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end + 1):\n if i == start:\n tags[i - 1][e['start_word_no']:] = e['label_index'][\n 0:len(tags[i - 1]) - e['start_word_no']]\n beg = len(tags[i - 1]) - e['start_word_no']\n elif i == end:\n tags[i - 1][0:e['end_word_no'] + 1] = e['label_index'][\n beg:]\n else:\n tags[i - 1][:] = e['label_index'][beg:beg + len(\n tags[i - 1])]\n beg = beg + len(tags[i - 1])\n return tags\n\n def print_data(self, file, file_lines, tags):\n counter = 1\n print('\\n************ Printing details of the file: ' + file +\n ' ************\\n')\n for x in file_lines:\n print(\n '------------------------------------------------------------')\n print('File Lines No: ' + str(counter))\n print(x)\n print('\\nCorresponding labels:')\n print([self.reverseDict[i] for i in tags[counter - 1]])\n print('\\nCorresponding Label Indices:')\n print(tags[counter - 1])\n print(\n '------------------------------------------------------------')\n counter += 1\n\n def save_data(self, obj_list, s_path):\n pickle.dump(tuple(obj_list), open(s_path, 'wb'))\n\n def acquire_data(self, task):\n data = {}\n if task == 'train':\n t_path = self.text_path_train\n c_path = self.concept_path_train\n else:\n t_path = self.text_path_test\n c_path = self.concept_path_test\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + '.con'\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = self.parse_concepts(os.path.join(c_path, f1))\n file_lines, tags = self.parse_summary(os.path.join(t_path, f))\n tags = self.modify_labels(conceptList, tags)\n data[f1] = [conceptList, file_lines, tags]\n return data\n\n def structure_data(self, data_dict):\n final_line_list = []\n final_tag_list = []\n for k in data_dict.keys():\n file_lines = data_dict[k][1]\n tags = data_dict[k][2]\n temp1 = []\n temp2 = []\n for i in range(len(file_lines)):\n temp1.extend(file_lines[i])\n temp2.extend(tags[i])\n assert len(temp1) == len(temp2\n ), 'Word length not matching Label length for story in ' + str(\n k)\n final_line_list.append(temp1)\n final_tag_list.append(temp2)\n assert len(final_line_list) == len(final_tag_list\n ), 'Number of stories not matching number of labels list'\n return final_line_list, final_tag_list\n\n def padding(self, line_list, tag_list):\n diff = 0\n max_len = 0\n outside_class = len(self.labelDict) - 1\n for i in range(len(line_list)):\n if len(line_list[i]) > max_len:\n max_len = len(line_list[i])\n for i in range(len(line_list)):\n diff = max_len - len(line_list[i])\n line_list[i].extend([self.padding_symbol] * diff)\n tag_list[i].extend([outside_class] * diff)\n assert len(line_list[i]) == max_len and len(line_list[i]) == len(\n tag_list[i]), 'Padding unsuccessful'\n return np.asarray(line_list), np.asarray(tag_list)\n\n def embed_input(self, line_list):\n final_list = []\n summary = None\n word = None\n temp = None\n embed_dic = pickle.load(open(self.embed_dic_path, 'rb'))\n r_embed = pickle.load(open(self.random_vec, 'rb'))\n for i in range(len(line_list)):\n summary = line_list[i]\n final_list.append([])\n for j in range(len(summary)):\n word = summary[j].lower()\n if word in embed_dic:\n final_list[-1].append(embed_dic[word])\n else:\n temp = r_embed[:]\n random.shuffle(temp)\n temp = np.asarray(temp, dtype=np.float32)\n final_list[-1].append(temp)\n return final_list\n\n def prepare_data(self, task='train'):\n line_list, tag_list = None, None\n \"\"\"\n line_list is the list of rows, where each row is a list of all the words in a medical summary\n Similar is the case for tag_list, except, it stores labels for each words\n \"\"\"\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path)\n if not os.path.exists(os.path.join(self.save_path,\n 'label_dicts_bio.dat')):\n self.initialize_labels()\n else:\n self.labelDict, self.reverseDict = pickle.load(open(os.path.\n join(self.save_path, 'label_dicts_bio.dat'), 'rb'))\n if not os.path.exists(os.path.join(self.save_path, \n 'object_dict_bio_' + str(task) + '.dat')):\n data_dict = self.acquire_data(task)\n line_list, tag_list = self.structure_data(data_dict)\n line_list = self.embed_input(line_list)\n self.save_data([line_list, tag_list], os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'))\n else:\n line_list, tag_list = pickle.load(open(os.path.join(self.\n save_path, 'object_dict_bio_' + str(task) + '.dat'), 'rb'))\n return line_list, tag_list\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n num_batch = int(len(story_idx) / self.batch_size)\n self.num_batches = num_batch\n x_out = []\n y_out = []\n counter = 1\n for i in story_idx:\n if num_batch <= 0:\n break\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n if counter % self.batch_size == 0:\n counter = 0\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=\n torch.float32)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=\n torch.long)\n x_out = []\n y_out = []\n num_batch -= 1\n yield self.num_batches - num_batch, x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n loss_list = []\n seq_length = []\n last_batch = 0\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad()\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.\n num_outputs), dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[\n temp_size - i - 1])\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n if batch_num % self.save_batch == 0:\n self.save_model(j, batch_num)\n last_batch = batch_num\n print('Epoch: ' + str(j) + '/' + str(self.num_epoch) +\n ', Batch: ' + str(batch_num) + '/' + str(self.\n num_batches) + ', Loss: {0:.2f}, '.format(loss.item()) +\n 'Batch Accuracy (Entity Prediction): {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self):\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0\n result_dict['total_test'] = 0\n result_dict['total_treatment'] = 0\n result_dict['correct_problem'] = 0\n result_dict['correct_test'] = 0\n result_dict['correct_treatment'] = 0\n result_dict['false_positive_problem'] = 0\n result_dict['false_positive_test'] = 0\n result_dict['false_positive_treatment'] = 0\n print('\\n')\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size)\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs),\n dtype=torch.float32)\n embeddings = self.machine.backward_prediction(X)\n temp_size = X.shape[0]\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size -\n i - 1])\n class_bag = self.calc_cost(Y_out, Y)\n corr = class_bag['problem_cor'] + class_bag['test_cor'\n ] + class_bag['treatment_cor']\n tot = class_bag['total']\n result_dict['total_problem'] = result_dict['total_problem'\n ] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag[\n 'test']\n result_dict['total_treatment'] = result_dict['total_treatment'\n ] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'\n ] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'\n ] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'\n ] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict[\n 'false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict[\n 'false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict[\n 'false_positive_treatment'] + class_bag['treatment_fp']\n correct += corr\n total += tot\n print('Test Example ' + str(batch_num) + '/' + str(self.\n num_batches) + ' processed, Batch Accuracy: {0:.2f} %, '.\n format(float(corr) / float(tot) * 100.0) +\n 'Batch Accuracy (Word Prediction): {0:.2f} %'.format(\n class_bag['word_pred_acc']))\n result_dict['accuracy'] = float(correct) / float(total) * 100.0\n result_dict = self.calc_metrics(result_dict)\n print('\\nOverall Entity Prediction Accuracy: {0:.2f} %'.format(\n result_dict['accuracy']))\n return result_dict\n\n def calc_metrics(self, result_dict):\n precision_p = float(result_dict['correct_problem']) / float(\n result_dict['correct_problem'] + result_dict[\n 'false_positive_problem'])\n recall_p = float(result_dict['correct_problem']) / float(result_dict\n ['total_problem'])\n precision_ts = float(result_dict['correct_test']) / float(\n result_dict['correct_test'] + result_dict['false_positive_test'])\n recall_ts = float(result_dict['correct_test']) / float(result_dict[\n 'total_test'])\n precision_tr = float(result_dict['correct_treatment']) / float(\n result_dict['correct_treatment'] + result_dict[\n 'false_positive_treatment'])\n recall_tr = float(result_dict['correct_treatment']) / float(result_dict\n ['total_treatment'])\n f_score_p = 2 * precision_p * recall_p / (precision_p + recall_p)\n f_score_ts = 2 * precision_ts * recall_ts / (precision_ts + recall_ts)\n f_score_tr = 2 * precision_tr * recall_tr / (precision_tr + recall_tr)\n result_dict['problem_precision'] = precision_p\n result_dict['problem_recall'] = recall_p\n result_dict['problem_f1'] = f_score_p\n result_dict['test_precision'] = precision_ts\n result_dict['test_recall'] = recall_ts\n result_dict['test_f1'] = f_score_ts\n result_dict['treatment_precision'] = precision_tr\n result_dict['treatment_recall'] = recall_tr\n result_dict['treatment_f1'] = f_score_tr\n result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr\n ) / 3.0\n correct_sum = result_dict['correct_problem'] + result_dict[\n 'correct_test'] + result_dict['correct_treatment']\n fp_sum = result_dict['false_positive_problem'] + result_dict[\n 'false_positive_test'] + result_dict['false_positive_treatment']\n total_sum = result_dict['total_problem'] + result_dict['total_test'\n ] + result_dict['total_treatment']\n precision_avg = float(correct_sum) / float(correct_sum + fp_sum)\n recall_avg = float(correct_sum) / float(total_sum)\n result_dict['micro_average_f1'] = 2 * precision_avg * recall_avg / (\n precision_avg + recall_avg)\n return result_dict\n\n def save_model(self, curr_epoch, curr_batch):\n if not os.path.exists(os.path.join(self.model_path, self.name)):\n os.mkdir(os.path.join(self.model_path, self.name))\n state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1,\n 'start_batch': curr_batch + 1, 'state_dict': self.machine.\n state_dict(), 'optimizer_dic': self.optimizer.state_dict()}\n filename = self.model_path + self.name + '/' + self.name + '_' + str(\n curr_epoch) + '_' + str(curr_batch) + '_saved_model.pth.tar'\n torch.save(state_dic, filename)\n\n def load_model(self, option, epoch, batch):\n path = self.model_path + self.name + '/' + self.name + '_' + str(epoch\n ) + '_' + str(batch) + '_saved_model.pth.tar'\n if option == 1:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_dic'])\n else:\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.machine.eval()\n",
"step-5": "# Named Entity Recognition on Medical Data (BIO Tagging)\n# Bio-Word2Vec Embeddings Source and Reference: https://github.com/ncbi-nlp/BioWordVec\n\nimport os\nimport re\nimport torch\nimport pickle\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\n\nimport numpy as np\nimport random\n\nfrom DNC.dnc import DNC_Module # Importing DNC Implementation\n\nclass task_NER():\n\n def __init__(self):\n self.name = \"NER_task_bio\"\n\n # Controller Params\n self.controller_size = 128\n self.controller_layers = 1\n\n # Head Params\n self.num_read_heads = 1\n self.num_write_heads = 1\n\n # Processor Params\n self.num_inputs = 200 # Length of Embeddings\n self.num_outputs = 7 # Class size\n\n # Memory Params\n self.memory_N = 128\n self.memory_M = 128\n\n # Training Params\n self.num_batches = -1\n self.save_batch = 5 # Saving model after every save_batch number of batches\n self.batch_size = 10\n self.num_epoch = 4\n\n # Optimizer Params\n self.adam_lr = 1e-4\n self.adam_betas = (0.9, 0.999)\n self.adam_eps = 1e-8\n\n # Handles\n self.machine = None\n self.loss = None\n self.optimizer = None\n\n # Class Dictionaries\n self.labelDict = None # Label Dictionary - Labels to Index\n self.reverseDict = None # Inverse Label Dictionary - Index to Labels\n\n # File Paths\n self.concept_path_train = \"../medical_data/train_data/concept\" # Path to train concept files\n self.text_path_train = \"../medical_data/train_data/txt\" # Path to train text summaries\n self.concept_path_test = \"../medical_data/test_data/concept\" # Path to test concept files\n self.text_path_test = \"../medical_data/test_data/txt\" # Path to test text summaries\n self.save_path = \"../medical_data/cleaned_files\" # Save path\n self.embed_dic_path = \"../medical_data/embeddings/bio_embedding_dictionary.dat\" # Word2Vec embeddings Dictionary path\n self.random_vec = \"../medical_data/embeddings/random_vec.dat\" # Path to random embedding (Used to create new vectors)\n self.model_path = \"../saved_models/\" # Stores Trained Models\n\n # Miscellaneous\n self.padding_symbol = np.full((self.num_inputs), 0.01) # Padding symbol embedding\n\n def get_task_name(self):\n return self.name\n\n def init_dnc(self):\n self.machine = DNC_Module(self.num_inputs, self.num_outputs, self.controller_size, self.controller_layers, self.num_read_heads, self.num_write_heads, self.memory_N, self.memory_M)\n\n def init_loss(self):\n self.loss = nn.CrossEntropyLoss(reduction = 'mean') # Cross Entropy Loss -> Softmax Activation + Cross Entropy Loss\n\n def init_optimizer(self):\n self.optimizer = optim.Adam(self.machine.parameters(), lr = self.adam_lr, betas = self.adam_betas, eps = self.adam_eps)\n\n def calc_loss(self, Y_pred, Y):\n # Y: dim -> (sequence_len x batch_size)\n # Y_pred: dim -> (sequence_len x batch_size x num_outputs)\n loss_vec = torch.empty(Y.shape[0], dtype=torch.float32)\n for i in range(Y_pred.shape[0]):\n loss_vec[i] = self.loss(Y_pred[i], Y[i])\n return torch.mean(loss_vec)\n\n def calc_cost(self, Y_pred, Y): # Calculates % Cost\n # Y: dim -> (sequence_len x batch_size)\n # Y_pred: dim -> (sequence_len x batch_size x sequence_width)\n\n '''\n Note: \n 1). For considering an prediction to be True Positive, prediction must match completely with labels entity (not partially). Else it is False Negative.\n 2). For considering a prediction to be False Positive, it must be full entity (BIII) and not completely match the label entity.\n '''\n\n # Stores correct class labels for each entity type\n class_bag = {}\n class_bag['problem'] = 0 # Total labels\n class_bag['test'] = 0 # Total labels\n class_bag['treatment'] = 0 # Total labels\n class_bag['problem_cor'] = 0 # Correctly classified labels\n class_bag['test_cor'] = 0 # Correctly classified labels\n class_bag['treatment_cor'] = 0 # Correctly classified labels\n class_bag['problem_fp'] = 0 # False positive classified labels\n class_bag['test_fp'] = 0 # False positive classified labels\n class_bag['treatment_fp'] = 0 # False positive classified labels\n \n pred_class = np.transpose(F.softmax(Y_pred, dim=2).max(2)[1].numpy()).reshape(-1) # Predicted class. dim -> (sequence_len*batch_size)\n Y = np.transpose(Y.numpy()).reshape(-1) # Converting to NumPy Array and linearizing\n cor_pred = (Y == pred_class).astype(np.int) # Comparing Prediction and Labels to find correct predictions\n\n class_bag['word_pred_acc'] = np.divide(np.sum(cor_pred), cor_pred.size)*100.0 # % Accuracy of Correctly Predicted Words (Not Entities)\n\n # Getting the beginning index of all the entities\n beg_idx = list(np.where(np.in1d(Y, [0, 2, 4]))[0])\n\n # Getting the end index of all the entities (All the Index previous of 'Other'/'Begin' and not equal to 'Other')\n target = np.where(np.in1d(Y, [0, 2, 4, 6]))[0] - 1\n if target[0] == -1:\n target = target[1:]\n end_idx = list(target[np.where(Y[target] != 6)[0]])\n if Y[-1] != 6:\n end_idx.append(Y.size-1)\n\n assert len(beg_idx) == len(end_idx) # Sanity Check\n class_bag['total'] = len(beg_idx) # Total number of Entities\n\n # Counting Entities\n sum_vec = np.cumsum(cor_pred) # Calculates cumulative summation of predicted vector\n for b, e in zip(beg_idx, end_idx):\n idx_range = e-b+1 # Entity span\n sum_range = sum_vec[e]-sum_vec[b]+1 # Count of entity elements which are predicted correctly\n\n lab = self.reverseDict[Y[b]][2:] # Extracting entity type (Problem, Test or Treatment)\n class_bag[lab] = class_bag[lab]+1 # Getting count of each entities\n \n if sum_range == idx_range: # +1 if entity is classified correctly\n class_bag[lab+'_cor'] = class_bag[lab+'_cor']+1\n\n # Detecting False Positives\n # Getting the beginning index of all the entities in Predicted Results\n beg_idx_p = list(np.where(np.in1d(pred_class, [0, 2, 4]))[0])\n \n for b in beg_idx_p:\n if cor_pred[b] == 0:\n lab = self.reverseDict[pred_class[b]][2:]\n class_bag[lab+'_fp'] = class_bag[lab+'_fp']+1\n\n return class_bag\n \n def print_word(self, token_class): # Prints the Class name from Class number\n word = self.reverseDict[token_class]\n print(word + \"\\n\")\n\n def clip_grads(self): # Clipping gradients for stability\n \"\"\"Gradient clipping to the range [10, 10].\"\"\"\n parameters = list(filter(lambda p: p.grad is not None, self.machine.parameters()))\n for p in parameters:\n p.grad.data.clamp_(-10, 10)\n\n def initialize_labels(self): # Initializing label dictionaries for Labels->IDX and IDX->Labels\n self.labelDict = {} # Label Dictionary - Labels to Index\n self.reverseDict = {} # Inverse Label Dictionary - Index to Labels\n\n # Using BIEOS labelling scheme\n self.labelDict['b-problem'] = 0 # Problem - Beginning \n self.labelDict['i-problem'] = 1 # Problem - Inside\n self.labelDict['b-test'] = 2 # Test - Beginning\n self.labelDict['i-test'] = 3 # Test - Inside\n self.labelDict['b-treatment'] = 4 # Treatment - Beginning\n self.labelDict['i-treatment'] = 5 # Treatment - Inside\n self.labelDict['o'] = 6 # Outside Token\n\n # Making Inverse Label Dictionary\n for k in self.labelDict.keys():\n self.reverseDict[self.labelDict[k]] = k\n\n # Saving the diictionaries into a file\n self.save_data([self.labelDict, self.reverseDict], os.path.join(self.save_path, \"label_dicts_bio.dat\"))\n\n def parse_concepts(self, file_path): # Parses the concept file to extract concepts and labels\n conceptList = [] # Stores all the Concept in the File\n\n f = open(file_path) # Opening and reading a concept file\n content = f.readlines() # Reading all the lines in the concept file\n f.close() # Closing the concept file\n\n for x in content: # Reading each line in the concept file\n dic = {}\n\n # Cleaning and extracting the entities, labels and their positions in the corresponding medical summaries\n x = re.sub('\\n', ' ', x)\n x = re.sub(r'\\ +', ' ', x)\n x = x.strip().split('||')\n\n temp1, label = x[0].split(' '), x[1].split('=')[1][1:-1]\n\n temp1[0] = temp1[0][3:]\n temp1[-3] = temp1[-3][0:-1]\n entity = temp1[0:-2]\n\n if len(entity) >= 1:\n lab = ['i']*len(entity)\n lab[0] = 'b'\n lab = [l+\"-\"+label for l in lab]\n else:\n print(\"Data in File: \" + file_path + \", not in expected format..\")\n exit()\n\n noLab = [self.labelDict[l] for l in lab]\n sLine, sCol = int(temp1[-2].split(\":\")[0]), int(temp1[-2].split(\":\")[1])\n eLine, eCol = int(temp1[-1].split(\":\")[0]), int(temp1[-1].split(\":\")[1])\n \n '''\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"Entity: \" + str(entity))\n print(\"Entity Label: \" + label)\n print(\"Labels - BIO form: \" + str(lab))\n print(\"Labels Index: \" + str(noLab))\n print(\"Start Line: \" + str(sLine) + \", Start Column: \" + str(sCol))\n print(\"End Line: \" + str(eLine) + \", End Column: \" + str(eCol))\n print(\"------------------------------------------------------------\")\n '''\n\n # Storing the information as a dictionary\n dic['entity'] = entity # Entity Name (In the form of list of words)\n dic['label'] = label # Common Label\n dic['BIO_labels'] = lab # List of BIO labels for each word\n dic['label_index'] = noLab # Labels in the index form\n dic['start_line'] = sLine # Start line of the concept in the corresponding text summaries\n dic['start_word_no'] = sCol # Starting word number of the concept in the corresponding start line\n dic['end_line'] = eLine # End line of the concept in the corresponding text summaries\n dic['end_word_no'] = eCol # Ending word number of the concept in the corresponding end line\n\n # Appending the concept dictionary to the list\n conceptList.append(dic)\n\n return conceptList # Returning the all the concepts in the current file in the form of dictionary list\n\n def parse_summary(self, file_path): # Parses the Text summaries\n file_lines = [] # Stores the lins of files in the list form\n tags = [] # Stores corresponding labels for each word in the file (Default label: 'o' [Outside])\n default_label = len(self.labelDict)-1 # default_label is \"7\" (Corresponding to 'Other' entity) \n # counter = 1 # Temporary variable used during print\n\n f = open(file_path) # Opening and reading a concept file\n content = f.readlines() # Reading all the lines in the concept file\n f.close()\n\n for x in content:\n x = re.sub('\\n', ' ', x)\n x = re.sub(r'\\ +', ' ', x)\n file_lines.append(x.strip().split(\" \")) # Spliting the lines into word list and Appending each of them in the file list\n tags.append([default_label]*len(file_lines[-1])) # Assigining the default_label to all the words in a line\n '''\n # Printing the information\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(file_lines[-1])\n print(\"\\nCorresponding labels:\")\n print(tags[-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n '''\n assert len(tags[-1]) == len(file_lines[-1]), \"Line length is not matching labels length...\" # Sanity Check\n return file_lines, tags\n\n def modify_labels(self, conceptList, tags): # Modifies the default labels of each word in text files with the true labels from the concept files\n for e in conceptList: # Iterating over all the dictionary elements in the Concept List\n if e['start_line'] == e['end_line']: # Checking whether concept is spanning over a single line or multiple line in the summary\n tags[e['start_line']-1][e['start_word_no']:e['end_word_no']+1] = e['label_index'][:]\n else:\n start = e['start_line']\n end = e['end_line']\n beg = 0\n for i in range(start, end+1): # Distributing labels over multiple lines in the text summaries\n if i == start:\n tags[i-1][e['start_word_no']:] = e['label_index'][0:len(tags[i-1])-e['start_word_no']]\n beg = len(tags[i-1])-e['start_word_no']\n elif i == end:\n tags[i-1][0:e['end_word_no']+1] = e['label_index'][beg:]\n else:\n tags[i-1][:] = e['label_index'][beg:beg+len(tags[i-1])]\n beg = beg+len(tags[i-1])\n return tags\n\n def print_data(self, file, file_lines, tags): # Prints the given data\n counter = 1\n\n print(\"\\n************ Printing details of the file: \" + file + \" ************\\n\")\n for x in file_lines:\n print(\"------------------------------------------------------------\")\n print(\"File Lines No: \" + str(counter))\n print(x)\n print(\"\\nCorresponding labels:\")\n print([self.reverseDict[i] for i in tags[counter-1]])\n print(\"\\nCorresponding Label Indices:\")\n print(tags[counter-1])\n print(\"------------------------------------------------------------\")\n counter += 1\n\n def save_data(self, obj_list, s_path): # Saves the file into the binary file using Pickle\n # Note: The 'obj_list' must be a list and none other than that\n pickle.dump(tuple(obj_list), open(s_path,'wb'))\n\n def acquire_data(self, task): # Read all the concept files to get concepts and labels, proces them and save them\n data = {} # Dictionary to store all the data objects (conceptList, file_lines, tags) each indexed by file name\n\n if task == 'train': # Determining the task type to assign the data path accordingly\n t_path = self.text_path_train\n c_path = self.concept_path_train\n else:\n t_path = self.text_path_test\n c_path = self.concept_path_test\n\n for f in os.listdir(t_path):\n f1 = f.split('.')[0] + \".con\"\n if os.path.isfile(os.path.join(c_path, f1)):\n conceptList = self.parse_concepts(os.path.join(c_path, f1)) # Parsing concepts and labels from the corresponding concept file\n file_lines, tags = self.parse_summary(os.path.join(t_path, f)) # Parses the document summaries to get the written notes\n tags = self.modify_labels(conceptList, tags) # Modifies he default labels to each word with the true labels from the concept files\n data[f1] = [conceptList, file_lines, tags] # Storing each object in dictionary\n # self.print_data(f, file_lines, tags) # Printing the details\n return data\n\n def structure_data(self, data_dict): # Structures the data in proper trainable form\n final_line_list = [] # Stores words of all the files in separate sub-lists\n final_tag_list = [] # Stores tags of all the files in separate sub-lists\n\n for k in data_dict.keys(): # Extracting data from each pre-processed file in dictionary\n file_lines = data_dict[k][1] # Extracting story\n tags = data_dict[k][2] # Extracting corresponding labels\n\n # Creating empty lists\n temp1 = []\n temp2 = []\n\n # Merging all the lines in file into a single list. Same for corresponding labels\n for i in range(len(file_lines)):\n temp1.extend(file_lines[i])\n temp2.extend(tags[i])\n \n assert len(temp1) == len(temp2), \"Word length not matching Label length for story in \" + str(k) # Sanity Check\n\n final_line_list.append(temp1)\n final_tag_list.append(temp2)\n \n assert len(final_line_list) == len(final_tag_list), \"Number of stories not matching number of labels list\" # Sanity Check\n return final_line_list, final_tag_list\n \n def padding(self, line_list, tag_list): # Pads stories with padding symbol to make them of same length \n diff = 0\n max_len = 0\n outside_class = len(self.labelDict)-1 # Classifying padding symbol as \"outside\" term\n\n # Calculating Max Summary Length\n for i in range(len(line_list)):\n if len(line_list[i])>max_len:\n max_len = len(line_list[i])\n\n for i in range(len(line_list)):\n diff = max_len - len(line_list[i])\n line_list[i].extend([self.padding_symbol]*diff)\n tag_list[i].extend([outside_class]*diff)\n assert (len(line_list[i]) == max_len) and (len(line_list[i]) == len(tag_list[i])), \"Padding unsuccessful\" # Sanity check\n return np.asarray(line_list), np.asarray(tag_list) # Making NumPy array of size (batch_size x story_length x word size) and (batch_size x story_length x 1) respectively\n\n def embed_input(self, line_list): # Converts words to vector embeddings\n final_list = [] # Stores embedded words\n summary = None # Temp variable\n word = None # Temp variable\n temp = None # Temp variable\n\n embed_dic = pickle.load(open(self.embed_dic_path, 'rb')) # Loading word2vec dictionary using Pickle\n r_embed = pickle.load(open(self.random_vec, 'rb')) # Loading Random embedding\n\n for i in range(len(line_list)): # Iterating over all the summaries\n summary = line_list[i]\n final_list.append([]) # Reserving space for curent summary\n\n for j in range(len(summary)):\n word = summary[j].lower()\n if word in embed_dic: # Checking for existence of word in dictionary\n final_list[-1].append(embed_dic[word])\n else:\n temp = r_embed[:] # Copying the values of the list\n random.shuffle(temp) # Randomly shuffling the word embedding to make it unique\n temp = np.asarray(temp, dtype=np.float32) # Converting to NumPy array\n final_list[-1].append(temp)\n return final_list\n\n def prepare_data(self, task='train'): # Preparing all the data necessary\n line_list, tag_list = None, None\n\n '''\n line_list is the list of rows, where each row is a list of all the words in a medical summary\n Similar is the case for tag_list, except, it stores labels for each words\n '''\n\n if not os.path.exists(self.save_path):\n os.mkdir(self.save_path) # Creating a new directory if it does not exist else reading previously saved data\n \n if not os.path.exists(os.path.join(self.save_path, \"label_dicts_bio.dat\")):\n self.initialize_labels() # Initialize label to index dictionaries\n else:\n self.labelDict, self.reverseDict = pickle.load(open(os.path.join(self.save_path, \"label_dicts_bio.dat\"), 'rb')) # Loading Label dictionaries\n \n if not os.path.exists(os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\")):\n data_dict = self.acquire_data(task) # Read data from file\n line_list, tag_list = self.structure_data(data_dict) # Structures the data into proper form\n line_list = self.embed_input(line_list) # Embeds input data (words) into embeddings\n self.save_data([line_list, tag_list], os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\"))\n else:\n line_list, tag_list = pickle.load(open(os.path.join(self.save_path, \"object_dict_bio_\"+str(task)+\".dat\"), 'rb')) # Loading Data dictionary\n return line_list, tag_list\n\n def get_data(self, task='train'):\n line_list, tag_list = self.prepare_data(task)\n\n # Shuffling stories\n story_idx = list(range(0, len(line_list)))\n random.shuffle(story_idx)\n\n num_batch = int(len(story_idx)/self.batch_size)\n self.num_batches = num_batch\n\n # Out Data\n x_out = []\n y_out = []\n \n counter = 1\n\n for i in story_idx:\n if num_batch<=0:\n break\n\n x_out.append(line_list[i])\n y_out.append(tag_list[i])\n\n if counter % self.batch_size == 0:\n counter = 0\n \n # Padding and converting labels to one hot vectors\n x_out_pad, y_out_pad = self.padding(x_out, y_out)\n x_out_array = torch.tensor(x_out_pad.swapaxes(0, 1), dtype=torch.float32) # Converting from (batch_size x story_length x word size) to (story_length x batch_size x word size)\n y_out_array = torch.tensor(y_out_pad.swapaxes(0, 1), dtype=torch.long) # Converting from (batch_size x story_length x 1) to (story_length x batch_size x 1)\n\n x_out = []\n y_out = []\n num_batch -= 1\n\n yield (self.num_batches - num_batch), x_out_array, y_out_array\n counter += 1\n\n def train_model(self):\n # Here, the model is optimized using Cross Entropy Loss.\n loss_list = []\n seq_length = []\n last_batch = 0\n\n # self.load_model(1, 99, 13) # Loading Pre-Trained model to train further\n\n for j in range(self.num_epoch):\n for batch_num, X, Y in self.get_data(task='train'):\n self.optimizer.zero_grad() # Making old gradients zero before calculating the fresh ones\n self.machine.initialization(self.batch_size) # Initializing states\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)\n\n # Feeding the DNC network all the data first and then predicting output\n # by giving zero vector as input and previous read states and hidden vector\n # and thus training vector this way to give outputs matching the labels\n\n embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation\n temp_size = X.shape[0]\n\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1]) # Passing Embeddings from backwards\n\n loss = self.calc_loss(Y_out, Y)\n loss.backward()\n self.clip_grads()\n self.optimizer.step()\n\n class_bag = self.calc_cost(Y_out, Y)\n\n corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']\n tot = class_bag['total']\n\n loss_list += [loss.item()]\n seq_length += [Y.shape[0]]\n\n if (batch_num % self.save_batch) == 0:\n self.save_model(j, batch_num)\n\n last_batch = batch_num\n print(\"Epoch: \" + str(j) + \"/\" + str(self.num_epoch) + \", Batch: \" + str(batch_num) + \"/\" + str(self.num_batches) + \", Loss: {0:.2f}, \".format(loss.item()) + \\\n \"Batch Accuracy (Entity Prediction): {0:.2f} %, \".format((float(corr)/float(tot))*100.0) + \"Batch Accuracy (Word Prediction): {0:.2f} %\".format(class_bag['word_pred_acc']))\n self.save_model(j, last_batch)\n\n def test_model(self): # Testing the model\n correct = 0\n total = 0\n result_dict = {}\n result_dict['total_problem'] = 0 # Total labels in data\n result_dict['total_test'] = 0 # Total labels in data\n result_dict['total_treatment'] = 0 # Total labels in data\n result_dict['correct_problem'] = 0 # Correctly classified labels\n result_dict['correct_test'] = 0 # Correctly classified labels\n result_dict['correct_treatment'] = 0 # Correctly classified labels\n result_dict['false_positive_problem'] = 0 # False Positive labels\n result_dict['false_positive_test'] = 0 # False Positive labels\n result_dict['false_positive_treatment'] = 0 # False Positive labels\n print(\"\\n\")\n\n for batch_num, X, Y in self.get_data(task='test'):\n self.machine.initialization(self.batch_size) # Initializing states\n Y_out = torch.empty((X.shape[0], X.shape[1], self.num_outputs), dtype=torch.float32) # dim: (seq_len x batch_size x num_output)\n\n # Feeding the DNC network all the data first and then predicting output\n # by giving zero vector as input and previous read states and hidden vector\n # and thus training vector this way to give outputs matching the labels\n\n embeddings = self.machine.backward_prediction(X) # Creating embeddings from data for backward calculation\n temp_size = X.shape[0]\n\n for i in range(temp_size):\n Y_out[i, :, :], _ = self.machine(X[i], embeddings[temp_size-i-1])\n\n class_bag = self.calc_cost(Y_out, Y)\n\n corr = class_bag['problem_cor']+class_bag['test_cor']+class_bag['treatment_cor']\n tot = class_bag['total']\n\n result_dict['total_problem'] = result_dict['total_problem'] + class_bag['problem']\n result_dict['total_test'] = result_dict['total_test'] + class_bag['test']\n result_dict['total_treatment'] = result_dict['total_treatment'] + class_bag['treatment']\n result_dict['correct_problem'] = result_dict['correct_problem'] + class_bag['problem_cor']\n result_dict['correct_test'] = result_dict['correct_test'] + class_bag['test_cor']\n result_dict['correct_treatment'] = result_dict['correct_treatment'] + class_bag['treatment_cor']\n result_dict['false_positive_problem'] = result_dict['false_positive_problem'] + class_bag['problem_fp']\n result_dict['false_positive_test'] = result_dict['false_positive_test'] + class_bag['test_fp']\n result_dict['false_positive_treatment'] = result_dict['false_positive_treatment'] + class_bag['treatment_fp']\n\n correct += corr\n total += tot\n print(\"Test Example \" + str(batch_num) + \"/\" + str(self.num_batches) + \" processed, Batch Accuracy: {0:.2f} %, \".format((float(corr)/float(tot))*100.0) + \"Batch Accuracy (Word Prediction): {0:.2f} %\".format(class_bag['word_pred_acc']))\n \n result_dict['accuracy'] = (float(correct)/float(total))*100.0\n result_dict = self.calc_metrics(result_dict)\n print(\"\\nOverall Entity Prediction Accuracy: {0:.2f} %\".format(result_dict['accuracy']))\n return result_dict\n\n def calc_metrics(self, result_dict): # Calculates Certain Metrices\n precision_p = float(result_dict['correct_problem'])/float(result_dict['correct_problem'] + result_dict['false_positive_problem']) # Problem Precision\n recall_p = float(result_dict['correct_problem'])/float(result_dict['total_problem']) # Problem Recall\n\n precision_ts = float(result_dict['correct_test'])/float(result_dict['correct_test'] + result_dict['false_positive_test']) # Test Precision\n recall_ts = float(result_dict['correct_test'])/float(result_dict['total_test']) # Test Recall\n\n precision_tr = float(result_dict['correct_treatment'])/float(result_dict['correct_treatment'] + result_dict['false_positive_treatment']) # Treatment Precision\n recall_tr = float(result_dict['correct_treatment'])/float(result_dict['total_treatment']) # Treatment Recall\n\n f_score_p = 2*precision_p*recall_p/(precision_p+recall_p) # Problem F1 Score\n f_score_ts = 2*precision_ts*recall_ts/(precision_ts+recall_ts) # Test F1 Score\n f_score_tr = 2*precision_tr*recall_tr/(precision_tr+recall_tr) # Treatment F1 Score\n\n result_dict['problem_precision'] = precision_p\n result_dict['problem_recall'] = recall_p\n result_dict['problem_f1'] = f_score_p\n result_dict['test_precision'] = precision_ts\n result_dict['test_recall'] = recall_ts\n result_dict['test_f1'] = f_score_ts\n result_dict['treatment_precision'] = precision_tr\n result_dict['treatment_recall'] = recall_tr\n result_dict['treatment_f1'] = f_score_tr\n result_dict['macro_average_f1'] = (f_score_p + f_score_ts + f_score_tr)/3.0 # Macro Average F1 Score\n\n # Micro Average F1 Score\n correct_sum = result_dict['correct_problem'] + result_dict['correct_test'] + result_dict['correct_treatment']\n fp_sum = result_dict['false_positive_problem'] + result_dict['false_positive_test'] + result_dict['false_positive_treatment']\n total_sum = result_dict['total_problem'] + result_dict['total_test'] + result_dict['total_treatment']\n \n precision_avg = float(correct_sum)/float(correct_sum + fp_sum)\n recall_avg = float(correct_sum)/float(total_sum)\n result_dict['micro_average_f1'] = 2*precision_avg*recall_avg/(precision_avg+recall_avg)\n\n return result_dict\n\n def save_model(self, curr_epoch, curr_batch):\n # Here 'start_epoch' and 'start_batch' params below are the 'epoch' and 'batch' number from which to start training after next model loading\n # Note: It is recommended to start from the 'start_epoch' and not 'start_epoch' + 'start_batch', because batches are formed randomly\n if not os.path.exists(os.path.join(self.model_path, self.name)):\n os.mkdir(os.path.join(self.model_path, self.name))\n state_dic = {'task_name': self.name, 'start_epoch': curr_epoch + 1, 'start_batch': curr_batch + 1, 'state_dict': self.machine.state_dict(), 'optimizer_dic' : self.optimizer.state_dict()}\n filename = self.model_path + self.name + \"/\" + self.name + \"_\" + str(curr_epoch) + \"_\" + str(curr_batch) + \"_saved_model.pth.tar\"\n torch.save(state_dic, filename)\n\n def load_model(self, option, epoch, batch):\n path = self.model_path + self.name + \"/\" + self.name + \"_\" + str(epoch) + \"_\" + str(batch) + \"_saved_model.pth.tar\"\n if option == 1: # Loading for training\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer_dic'])\n else: # Loading for testing\n checkpoint = torch.load(path)\n self.machine.load_state_dict(checkpoint['state_dict'])\n self.machine.eval()",
"step-ids": [
12,
20,
26,
27,
29
]
}
|
[
12,
20,
26,
27,
29
] |
from PIL import Image
from src import urbandictionary_api
from src.card.cardDrawer import CardDrawer
from src.card.cardModel import CardModel
from src.repository import Repository
from src.urbandictionary_api import get_random_word
def save_card(word, image_path, filepath='data/cards/', filename=None):
'''Функция для генерации и сохранения изображения
Возвращает filepath+filename
Параметры:
word - слово, чей контент будет на карточке
image - задний фон изображения
filepath - путь для хранения изображения
filename - имя изображения
'''
content = urbandictionary_api.get_word_data(word)
image = Image.open(image_path)
rep = Repository()
fonts = rep.fonts
model = CardModel(
content=content,
image=image,
auth_font=fonts.aut_font,
cat_font=fonts.cat_font,
def_font=fonts.def_font,
ex_font=fonts.ex_font,
rect_font=fonts.rect_font,
word_font=fonts.word_font,
thumb_font=fonts.thumb_font
)
card_drawer = CardDrawer(model)
card_drawer.draw_card()
path = card_drawer.save(filepath=filepath, filename=filename)
return path
if __name__ == '__main__':
from random import randint
save_card(get_random_word(), f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')
|
normal
|
{
"blob_id": "6bf1d410a33e3b2535e39e4f8c5c7f8278b3de67",
"index": 330,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n \"\"\"Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n \"\"\"\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(content=content, image=image, auth_font=fonts.\n aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font\n =fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.\n word_font, thumb_font=fonts.thumb_font)\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n return path\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n \"\"\"Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n \"\"\"\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(content=content, image=image, auth_font=fonts.\n aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font\n =fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.\n word_font, thumb_font=fonts.thumb_font)\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n return path\n\n\nif __name__ == '__main__':\n from random import randint\n save_card(get_random_word(),\n f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')\n",
"step-4": "from PIL import Image\nfrom src import urbandictionary_api\nfrom src.card.cardDrawer import CardDrawer\nfrom src.card.cardModel import CardModel\nfrom src.repository import Repository\nfrom src.urbandictionary_api import get_random_word\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n \"\"\"Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n \"\"\"\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(content=content, image=image, auth_font=fonts.\n aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font\n =fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.\n word_font, thumb_font=fonts.thumb_font)\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n return path\n\n\nif __name__ == '__main__':\n from random import randint\n save_card(get_random_word(),\n f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')\n",
"step-5": "from PIL import Image\n\nfrom src import urbandictionary_api\nfrom src.card.cardDrawer import CardDrawer\nfrom src.card.cardModel import CardModel\nfrom src.repository import Repository\nfrom src.urbandictionary_api import get_random_word\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n '''Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n '''\n\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(\n content=content,\n image=image,\n auth_font=fonts.aut_font,\n cat_font=fonts.cat_font,\n def_font=fonts.def_font,\n ex_font=fonts.ex_font,\n rect_font=fonts.rect_font,\n word_font=fonts.word_font,\n thumb_font=fonts.thumb_font\n )\n\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n\n return path\n\n\nif __name__ == '__main__':\n from random import randint\n\n save_card(get_random_word(), f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import simple_draw as sd
import random
# sd.resolution = (1400, 900)
# Prepare data for the sun function
def sun_prepare(xpoint, ypoint, radius, color, angle):
delta_list = []
radius_list = []
for delta in range(0, 360, angle):
delta_list.append(delta)
radius_list.append(random.randint(radius - 10, radius + 10))
return xpoint, ypoint, color, radius, delta_list, radius_list
# Drawing the sun
def sun(prepare_list):
xpoint = prepare_list[0]
ypoint = prepare_list[1]
color = prepare_list[2]
radius = prepare_list[3]
delta_list = prepare_list[4]
radius_list = prepare_list[5]
sd.start_drawing()
point = sd.get_point(xpoint, ypoint)
sd.circle(center_position=point, radius=radius * 3.9, color=sd.background_color, width=0)
sd.circle(center_position=point, radius=radius, color=color, width=0)
for j, (delta, radius) in enumerate(zip(delta_list, radius_list)):
v = sd.get_vector(start_point=point, angle=delta, width=6,
length=random.randint(radius * 2, radius * 3))
v.draw(color)
sd.finish_drawing()
# sd.pause()
|
normal
|
{
"blob_id": "46babde9c26a944c9d29121b6bbf89a32f242a81",
"index": 251,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sun_prepare(xpoint, ypoint, radius, color, angle):\n delta_list = []\n radius_list = []\n for delta in range(0, 360, angle):\n delta_list.append(delta)\n radius_list.append(random.randint(radius - 10, radius + 10))\n return xpoint, ypoint, color, radius, delta_list, radius_list\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef sun_prepare(xpoint, ypoint, radius, color, angle):\n delta_list = []\n radius_list = []\n for delta in range(0, 360, angle):\n delta_list.append(delta)\n radius_list.append(random.randint(radius - 10, radius + 10))\n return xpoint, ypoint, color, radius, delta_list, radius_list\n\n\ndef sun(prepare_list):\n xpoint = prepare_list[0]\n ypoint = prepare_list[1]\n color = prepare_list[2]\n radius = prepare_list[3]\n delta_list = prepare_list[4]\n radius_list = prepare_list[5]\n sd.start_drawing()\n point = sd.get_point(xpoint, ypoint)\n sd.circle(center_position=point, radius=radius * 3.9, color=sd.\n background_color, width=0)\n sd.circle(center_position=point, radius=radius, color=color, width=0)\n for j, (delta, radius) in enumerate(zip(delta_list, radius_list)):\n v = sd.get_vector(start_point=point, angle=delta, width=6, length=\n random.randint(radius * 2, radius * 3))\n v.draw(color)\n sd.finish_drawing()\n",
"step-4": "import simple_draw as sd\nimport random\n\n\ndef sun_prepare(xpoint, ypoint, radius, color, angle):\n delta_list = []\n radius_list = []\n for delta in range(0, 360, angle):\n delta_list.append(delta)\n radius_list.append(random.randint(radius - 10, radius + 10))\n return xpoint, ypoint, color, radius, delta_list, radius_list\n\n\ndef sun(prepare_list):\n xpoint = prepare_list[0]\n ypoint = prepare_list[1]\n color = prepare_list[2]\n radius = prepare_list[3]\n delta_list = prepare_list[4]\n radius_list = prepare_list[5]\n sd.start_drawing()\n point = sd.get_point(xpoint, ypoint)\n sd.circle(center_position=point, radius=radius * 3.9, color=sd.\n background_color, width=0)\n sd.circle(center_position=point, radius=radius, color=color, width=0)\n for j, (delta, radius) in enumerate(zip(delta_list, radius_list)):\n v = sd.get_vector(start_point=point, angle=delta, width=6, length=\n random.randint(radius * 2, radius * 3))\n v.draw(color)\n sd.finish_drawing()\n",
"step-5": "import simple_draw as sd\nimport random\n\n\n# sd.resolution = (1400, 900)\n\n# Prepare data for the sun function\ndef sun_prepare(xpoint, ypoint, radius, color, angle):\n delta_list = []\n radius_list = []\n for delta in range(0, 360, angle):\n delta_list.append(delta)\n radius_list.append(random.randint(radius - 10, radius + 10))\n\n return xpoint, ypoint, color, radius, delta_list, radius_list\n\n\n# Drawing the sun\ndef sun(prepare_list):\n xpoint = prepare_list[0]\n ypoint = prepare_list[1]\n color = prepare_list[2]\n radius = prepare_list[3]\n delta_list = prepare_list[4]\n radius_list = prepare_list[5]\n sd.start_drawing()\n point = sd.get_point(xpoint, ypoint)\n sd.circle(center_position=point, radius=radius * 3.9, color=sd.background_color, width=0)\n sd.circle(center_position=point, radius=radius, color=color, width=0)\n for j, (delta, radius) in enumerate(zip(delta_list, radius_list)):\n v = sd.get_vector(start_point=point, angle=delta, width=6,\n length=random.randint(radius * 2, radius * 3))\n v.draw(color)\n sd.finish_drawing()\n\n# sd.pause()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from pathlib import Path
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, Session
# SQLALCHEMY_DATABASE_URL = "postgresql://user:password@postgresserver/db"
SQLALCHEMY_DATABASE_URL = f"sqlite:///{Path(__name__).parent.absolute()}/sql_app.db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL,
connect_args={"check_same_thread": False} # Needed only for SQLite
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# # FastAPI "Dependency" (used with Depends)
# def get_db():
# db = SessionLocal()
# try:
# yield db
# finally:
# db.close()
Base = declarative_base()
from flashcards_core.database.algorithms.model import Algorithm
from flashcards_core.database.algorithm_params.model import AlgorithmParam
from flashcards_core.database.cards.model import Card
from flashcards_core.database.decks.model import Deck
from flashcards_core.database.faces.model import Face
from flashcards_core.database.facts.model import Fact
from flashcards_core.database.reviews.model import Review
from flashcards_core.database.tags.model import Tag
from flashcards_core.database.many_to_many.model import FaceFact, DeckTag, CardTag, FaceTag, FactTag
# Create all the tables imported above
Base.metadata.create_all(bind=engine)
|
normal
|
{
"blob_id": "0656c3e1d8f84cfb33c4531e41efb4a349d08aac",
"index": 6747,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nBase.metadata.create_all(bind=engine)\n",
"step-3": "<mask token>\nSQLALCHEMY_DATABASE_URL = (\n f'sqlite:///{Path(__name__).parent.absolute()}/sql_app.db')\nengine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args={\n 'check_same_thread': False})\nSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\nBase = declarative_base()\n<mask token>\nBase.metadata.create_all(bind=engine)\n",
"step-4": "from pathlib import Path\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, Session\nSQLALCHEMY_DATABASE_URL = (\n f'sqlite:///{Path(__name__).parent.absolute()}/sql_app.db')\nengine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args={\n 'check_same_thread': False})\nSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\nBase = declarative_base()\nfrom flashcards_core.database.algorithms.model import Algorithm\nfrom flashcards_core.database.algorithm_params.model import AlgorithmParam\nfrom flashcards_core.database.cards.model import Card\nfrom flashcards_core.database.decks.model import Deck\nfrom flashcards_core.database.faces.model import Face\nfrom flashcards_core.database.facts.model import Fact\nfrom flashcards_core.database.reviews.model import Review\nfrom flashcards_core.database.tags.model import Tag\nfrom flashcards_core.database.many_to_many.model import FaceFact, DeckTag, CardTag, FaceTag, FactTag\nBase.metadata.create_all(bind=engine)\n",
"step-5": "from pathlib import Path\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker, Session\n\n# SQLALCHEMY_DATABASE_URL = \"postgresql://user:password@postgresserver/db\"\nSQLALCHEMY_DATABASE_URL = f\"sqlite:///{Path(__name__).parent.absolute()}/sql_app.db\"\n\nengine = create_engine(\n SQLALCHEMY_DATABASE_URL, \n connect_args={\"check_same_thread\": False} # Needed only for SQLite\n)\n\nSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n\n# # FastAPI \"Dependency\" (used with Depends)\n# def get_db():\n# db = SessionLocal()\n# try:\n# yield db\n# finally:\n# db.close()\n\nBase = declarative_base()\n\nfrom flashcards_core.database.algorithms.model import Algorithm\nfrom flashcards_core.database.algorithm_params.model import AlgorithmParam\nfrom flashcards_core.database.cards.model import Card\nfrom flashcards_core.database.decks.model import Deck\nfrom flashcards_core.database.faces.model import Face\nfrom flashcards_core.database.facts.model import Fact\nfrom flashcards_core.database.reviews.model import Review\nfrom flashcards_core.database.tags.model import Tag\nfrom flashcards_core.database.many_to_many.model import FaceFact, DeckTag, CardTag, FaceTag, FactTag\n\n# Create all the tables imported above\nBase.metadata.create_all(bind=engine)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
def __str__(self):
values = []
iter = self
while iter != None:
values.append(iter.value)
iter = iter.next
return ' -> '.join(values)
@staticmethod
def makelist(values):
node = None
for i in range(len(values) - 1, -1, -1):
node = Node(values[i], node)
return node
def reverse(node, s, f):
dummy = Node(0, node)
iter = node
start = dummy
end = node
rstart = node
rend = node
i = 1
if s == f:
return node
while i < s:
start = iter
if iter != None:
iter = iter.next
else:
return node
i += 1
rstart = iter
prev = iter
if iter == None:
return node
next = iter.next
while i < f:
curr = next
if next != None:
next = next.next
else:
return node
curr.next = prev
prev = curr
i += 1
rend = prev
end = next
start.next = rend
rstart.next = end
return dummy.next
values = input('Enter a list: ').split(',')
s, f = map(lambda x: int(x), input('Enter start and finish: ').split(','))
node = Node.makelist(values)
print(node)
print(reverse(node, s, f))
|
normal
|
{
"blob_id": "599310cfd05be28445535bc72251128ed72a9069",
"index": 4372,
"step-1": "class Node:\n\n def __init__(self, value, next=None):\n self.value = value\n self.next = next\n <mask token>\n\n @staticmethod\n def makelist(values):\n node = None\n for i in range(len(values) - 1, -1, -1):\n node = Node(values[i], node)\n return node\n\n\n<mask token>\n",
"step-2": "class Node:\n\n def __init__(self, value, next=None):\n self.value = value\n self.next = next\n\n def __str__(self):\n values = []\n iter = self\n while iter != None:\n values.append(iter.value)\n iter = iter.next\n return ' -> '.join(values)\n\n @staticmethod\n def makelist(values):\n node = None\n for i in range(len(values) - 1, -1, -1):\n node = Node(values[i], node)\n return node\n\n\n<mask token>\n",
"step-3": "class Node:\n\n def __init__(self, value, next=None):\n self.value = value\n self.next = next\n\n def __str__(self):\n values = []\n iter = self\n while iter != None:\n values.append(iter.value)\n iter = iter.next\n return ' -> '.join(values)\n\n @staticmethod\n def makelist(values):\n node = None\n for i in range(len(values) - 1, -1, -1):\n node = Node(values[i], node)\n return node\n\n\ndef reverse(node, s, f):\n dummy = Node(0, node)\n iter = node\n start = dummy\n end = node\n rstart = node\n rend = node\n i = 1\n if s == f:\n return node\n while i < s:\n start = iter\n if iter != None:\n iter = iter.next\n else:\n return node\n i += 1\n rstart = iter\n prev = iter\n if iter == None:\n return node\n next = iter.next\n while i < f:\n curr = next\n if next != None:\n next = next.next\n else:\n return node\n curr.next = prev\n prev = curr\n i += 1\n rend = prev\n end = next\n start.next = rend\n rstart.next = end\n return dummy.next\n\n\n<mask token>\nprint(node)\nprint(reverse(node, s, f))\n",
"step-4": "class Node:\n\n def __init__(self, value, next=None):\n self.value = value\n self.next = next\n\n def __str__(self):\n values = []\n iter = self\n while iter != None:\n values.append(iter.value)\n iter = iter.next\n return ' -> '.join(values)\n\n @staticmethod\n def makelist(values):\n node = None\n for i in range(len(values) - 1, -1, -1):\n node = Node(values[i], node)\n return node\n\n\ndef reverse(node, s, f):\n dummy = Node(0, node)\n iter = node\n start = dummy\n end = node\n rstart = node\n rend = node\n i = 1\n if s == f:\n return node\n while i < s:\n start = iter\n if iter != None:\n iter = iter.next\n else:\n return node\n i += 1\n rstart = iter\n prev = iter\n if iter == None:\n return node\n next = iter.next\n while i < f:\n curr = next\n if next != None:\n next = next.next\n else:\n return node\n curr.next = prev\n prev = curr\n i += 1\n rend = prev\n end = next\n start.next = rend\n rstart.next = end\n return dummy.next\n\n\nvalues = input('Enter a list: ').split(',')\ns, f = map(lambda x: int(x), input('Enter start and finish: ').split(','))\nnode = Node.makelist(values)\nprint(node)\nprint(reverse(node, s, f))\n",
"step-5": null,
"step-ids": [
3,
4,
6,
7
]
}
|
[
3,
4,
6,
7
] |
r, n = map(int, input().split())
if r == n:
print("too late")
else:
l = list(range(1, r+1))
for _ in range(n):
l.remove(int(input()))
print(l[0])
|
normal
|
{
"blob_id": "381d3f0890a2916d2e0a21a6a47a5f87afde622d",
"index": 9241,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif r == n:\n print('too late')\nelse:\n l = list(range(1, r + 1))\n for _ in range(n):\n l.remove(int(input()))\n print(l[0])\n",
"step-3": "r, n = map(int, input().split())\nif r == n:\n print('too late')\nelse:\n l = list(range(1, r + 1))\n for _ in range(n):\n l.remove(int(input()))\n print(l[0])\n",
"step-4": "r, n = map(int, input().split())\nif r == n:\n print(\"too late\")\nelse:\n l = list(range(1, r+1))\n for _ in range(n):\n l.remove(int(input()))\n print(l[0])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Given two binary strings, return their sum (also a binary string).
#
# For example,
# a = "11"
# b = "1"
# Return "100".
#
# Show Company Tags
# Show Tags
# Show Similar Problems
class Solution(object):
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
max_len = max(len(a), len(b))
a = a.zfill(max_len)
b = b.zfill(max_len)
carry = 0
res = ''
for i in range(max_len - 1, -1, -1):
sums = int(a[i]) + int(b[i]) + carry
if sums < 2:
res += str(sums)
carry = 0
elif sums == 2:
res += '0'
carry = 1
else:
res += '1'
carry = 1
if carry == 1:
res += '1'
return res[::-1]
|
normal
|
{
"blob_id": "9655cba5b459ae8b6812bcebc31cc46e19e52386",
"index": 2741,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def addBinary(self, a, b):\n \"\"\"\n :type a: str\n :type b: str\n :rtype: str\n \"\"\"\n max_len = max(len(a), len(b))\n a = a.zfill(max_len)\n b = b.zfill(max_len)\n carry = 0\n res = ''\n for i in range(max_len - 1, -1, -1):\n sums = int(a[i]) + int(b[i]) + carry\n if sums < 2:\n res += str(sums)\n carry = 0\n elif sums == 2:\n res += '0'\n carry = 1\n else:\n res += '1'\n carry = 1\n if carry == 1:\n res += '1'\n return res[::-1]\n",
"step-4": "# Given two binary strings, return their sum (also a binary string).\n#\n# For example,\n# a = \"11\"\n# b = \"1\"\n# Return \"100\".\n#\n# Show Company Tags\n# Show Tags\n# Show Similar Problems\n\n\nclass Solution(object):\n def addBinary(self, a, b):\n \"\"\"\n :type a: str\n :type b: str\n :rtype: str\n \"\"\"\n max_len = max(len(a), len(b))\n a = a.zfill(max_len)\n b = b.zfill(max_len)\n carry = 0\n res = ''\n for i in range(max_len - 1, -1, -1):\n sums = int(a[i]) + int(b[i]) + carry\n if sums < 2:\n res += str(sums)\n carry = 0\n elif sums == 2:\n res += '0'\n carry = 1\n else:\n res += '1'\n carry = 1\n if carry == 1:\n res += '1'\n return res[::-1]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import math
import operator as op
Symbol = str
Number = (int, float)
Atom = (Symbol, Number)
List = list
Exp = (Atom, List)
Env = dict
def standard_env() -> Env:
"An environment with some scheme standard procedures"
env = Env()
env.update(vars(math)) # sin, cos, sqrt, pi ...
env.update({
'+':op.add, '-':op.sub, '*':op.mul, '/':op.truediv,
'>':op.gt, '>':op.lt, '>=':op.ge, '<=':op.le, '=':op.eq,
'abs':abs,
'append':op.add,
'apply':lambda proc, args: proc(*args),
'begin':lambda *x: x[-1],
'car':lambda x: x[0],
'cdr':lambda x: x[1:],
'cons':lambda x,y: [x] + y,
'eq?':op.is_,
'expt':pow,
'equal?':op.eq,
'length':len,
'list':lambda *x: List(x),
'list?':lambda x: isinstance(x, List),
'map':map,
'max':max,
'min':min,
'not':op.not_,
'null?':lambda x: x == [],
'number?':lambda x: isinstance(x, Number),
'print':print,
'procedure?':callable,
'round':round,
'symbol?':lambda x: isinstance(x, Symbol),
})
return env
global_env = standard_env()
def eval(x: Exp, env=global_env) -> Exp:
"Evaluate an expression in an environment."
if isinstance(x, Symbol): # variable reference
return env[x]
elif not isinstance(x, List): # constant number
return x
elif x[0] == 'if': # conditional
(_, test, conseq, alt) = x
exp = (conseq if eval(test, env) else alt)
return eval(exp, env)
elif x[0] == 'define': # definition
(_, symbol, exp) = x
env[symbol] = eval(exp, env)
else: # procedure call
proc = eval(x[0], env)
args = [eval(arg, env) for arg in x[1:]]
return proc(*args)
def tokenize(chars: str) -> list:
"convert a string of characters into a list of tokens"
return chars.replace('(', ' ( ').replace(')', ' ) ').split()
def parse(program: str) -> Exp:
"Read a scheme expression from a string"
return read_from_tokens(tokenize(program))
def read_from_tokens(tokens: list) -> Exp:
"Read an expression from a sequence of tokens"
if len(tokens) == 0:
raise SyntaxError('unexpected EOF')
token = tokens.pop(0)
if token == '(':
L = []
while tokens[0] != ')':
L.append(read_from_tokens(tokens))
tokens.pop(0) # pop off ')'
return L
elif token == ')':
raise SyntaxError('unexpected )')
else:
return atom(token)
def atom(token: str) -> Atom:
"Numbers become numbers; every other token is a symbol"
try: return int(token)
except ValueError:
try: return float(token)
except ValueError:
return Symbol(token)
program = "(begin (define r 10) (* pi (* r r)))"
print(eval(parse(program)))
|
normal
|
{
"blob_id": "88862d6bee5d83dd5f1c656a06a9dc46a5254b10",
"index": 3608,
"step-1": "<mask token>\n\n\ndef standard_env() ->Env:\n \"\"\"An environment with some scheme standard procedures\"\"\"\n env = Env()\n env.update(vars(math))\n env.update({'+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv, '>':\n op.gt, '>': op.lt, '>=': op.ge, '<=': op.le, '=': op.eq, 'abs': abs,\n 'append': op.add, 'apply': lambda proc, args: proc(*args), 'begin':\n lambda *x: x[-1], 'car': lambda x: x[0], 'cdr': lambda x: x[1:],\n 'cons': lambda x, y: [x] + y, 'eq?': op.is_, 'expt': pow, 'equal?':\n op.eq, 'length': len, 'list': lambda *x: List(x), 'list?': lambda x:\n isinstance(x, List), 'map': map, 'max': max, 'min': min, 'not': op.\n not_, 'null?': lambda x: x == [], 'number?': lambda x: isinstance(x,\n Number), 'print': print, 'procedure?': callable, 'round': round,\n 'symbol?': lambda x: isinstance(x, Symbol)})\n return env\n\n\n<mask token>\n\n\ndef eval(x: Exp, env=global_env) ->Exp:\n \"\"\"Evaluate an expression in an environment.\"\"\"\n if isinstance(x, Symbol):\n return env[x]\n elif not isinstance(x, List):\n return x\n elif x[0] == 'if':\n _, test, conseq, alt = x\n exp = conseq if eval(test, env) else alt\n return eval(exp, env)\n elif x[0] == 'define':\n _, symbol, exp = x\n env[symbol] = eval(exp, env)\n else:\n proc = eval(x[0], env)\n args = [eval(arg, env) for arg in x[1:]]\n return proc(*args)\n\n\ndef tokenize(chars: str) ->list:\n \"\"\"convert a string of characters into a list of tokens\"\"\"\n return chars.replace('(', ' ( ').replace(')', ' ) ').split()\n\n\ndef parse(program: str) ->Exp:\n \"\"\"Read a scheme expression from a string\"\"\"\n return read_from_tokens(tokenize(program))\n\n\n<mask token>\n\n\ndef atom(token: str) ->Atom:\n \"\"\"Numbers become numbers; every other token is a symbol\"\"\"\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef standard_env() ->Env:\n \"\"\"An environment with some scheme standard procedures\"\"\"\n env = Env()\n env.update(vars(math))\n env.update({'+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv, '>':\n op.gt, '>': op.lt, '>=': op.ge, '<=': op.le, '=': op.eq, 'abs': abs,\n 'append': op.add, 'apply': lambda proc, args: proc(*args), 'begin':\n lambda *x: x[-1], 'car': lambda x: x[0], 'cdr': lambda x: x[1:],\n 'cons': lambda x, y: [x] + y, 'eq?': op.is_, 'expt': pow, 'equal?':\n op.eq, 'length': len, 'list': lambda *x: List(x), 'list?': lambda x:\n isinstance(x, List), 'map': map, 'max': max, 'min': min, 'not': op.\n not_, 'null?': lambda x: x == [], 'number?': lambda x: isinstance(x,\n Number), 'print': print, 'procedure?': callable, 'round': round,\n 'symbol?': lambda x: isinstance(x, Symbol)})\n return env\n\n\n<mask token>\n\n\ndef eval(x: Exp, env=global_env) ->Exp:\n \"\"\"Evaluate an expression in an environment.\"\"\"\n if isinstance(x, Symbol):\n return env[x]\n elif not isinstance(x, List):\n return x\n elif x[0] == 'if':\n _, test, conseq, alt = x\n exp = conseq if eval(test, env) else alt\n return eval(exp, env)\n elif x[0] == 'define':\n _, symbol, exp = x\n env[symbol] = eval(exp, env)\n else:\n proc = eval(x[0], env)\n args = [eval(arg, env) for arg in x[1:]]\n return proc(*args)\n\n\ndef tokenize(chars: str) ->list:\n \"\"\"convert a string of characters into a list of tokens\"\"\"\n return chars.replace('(', ' ( ').replace(')', ' ) ').split()\n\n\ndef parse(program: str) ->Exp:\n \"\"\"Read a scheme expression from a string\"\"\"\n return read_from_tokens(tokenize(program))\n\n\ndef read_from_tokens(tokens: list) ->Exp:\n \"\"\"Read an expression from a sequence of tokens\"\"\"\n if len(tokens) == 0:\n raise SyntaxError('unexpected EOF')\n token = tokens.pop(0)\n if token == '(':\n L = []\n while tokens[0] != ')':\n L.append(read_from_tokens(tokens))\n tokens.pop(0)\n return L\n elif token == ')':\n raise SyntaxError('unexpected )')\n else:\n return atom(token)\n\n\ndef atom(token: str) ->Atom:\n \"\"\"Numbers become numbers; every other token is a symbol\"\"\"\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token)\n\n\n<mask token>\nprint(eval(parse(program)))\n",
"step-3": "<mask token>\nSymbol = str\nNumber = int, float\nAtom = Symbol, Number\nList = list\nExp = Atom, List\nEnv = dict\n\n\ndef standard_env() ->Env:\n \"\"\"An environment with some scheme standard procedures\"\"\"\n env = Env()\n env.update(vars(math))\n env.update({'+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv, '>':\n op.gt, '>': op.lt, '>=': op.ge, '<=': op.le, '=': op.eq, 'abs': abs,\n 'append': op.add, 'apply': lambda proc, args: proc(*args), 'begin':\n lambda *x: x[-1], 'car': lambda x: x[0], 'cdr': lambda x: x[1:],\n 'cons': lambda x, y: [x] + y, 'eq?': op.is_, 'expt': pow, 'equal?':\n op.eq, 'length': len, 'list': lambda *x: List(x), 'list?': lambda x:\n isinstance(x, List), 'map': map, 'max': max, 'min': min, 'not': op.\n not_, 'null?': lambda x: x == [], 'number?': lambda x: isinstance(x,\n Number), 'print': print, 'procedure?': callable, 'round': round,\n 'symbol?': lambda x: isinstance(x, Symbol)})\n return env\n\n\nglobal_env = standard_env()\n\n\ndef eval(x: Exp, env=global_env) ->Exp:\n \"\"\"Evaluate an expression in an environment.\"\"\"\n if isinstance(x, Symbol):\n return env[x]\n elif not isinstance(x, List):\n return x\n elif x[0] == 'if':\n _, test, conseq, alt = x\n exp = conseq if eval(test, env) else alt\n return eval(exp, env)\n elif x[0] == 'define':\n _, symbol, exp = x\n env[symbol] = eval(exp, env)\n else:\n proc = eval(x[0], env)\n args = [eval(arg, env) for arg in x[1:]]\n return proc(*args)\n\n\ndef tokenize(chars: str) ->list:\n \"\"\"convert a string of characters into a list of tokens\"\"\"\n return chars.replace('(', ' ( ').replace(')', ' ) ').split()\n\n\ndef parse(program: str) ->Exp:\n \"\"\"Read a scheme expression from a string\"\"\"\n return read_from_tokens(tokenize(program))\n\n\ndef read_from_tokens(tokens: list) ->Exp:\n \"\"\"Read an expression from a sequence of tokens\"\"\"\n if len(tokens) == 0:\n raise SyntaxError('unexpected EOF')\n token = tokens.pop(0)\n if token == '(':\n L = []\n while tokens[0] != ')':\n L.append(read_from_tokens(tokens))\n tokens.pop(0)\n return L\n elif token == ')':\n raise SyntaxError('unexpected )')\n else:\n return atom(token)\n\n\ndef atom(token: str) ->Atom:\n \"\"\"Numbers become numbers; every other token is a symbol\"\"\"\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token)\n\n\nprogram = '(begin (define r 10) (* pi (* r r)))'\nprint(eval(parse(program)))\n",
"step-4": "import math\nimport operator as op\nSymbol = str\nNumber = int, float\nAtom = Symbol, Number\nList = list\nExp = Atom, List\nEnv = dict\n\n\ndef standard_env() ->Env:\n \"\"\"An environment with some scheme standard procedures\"\"\"\n env = Env()\n env.update(vars(math))\n env.update({'+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv, '>':\n op.gt, '>': op.lt, '>=': op.ge, '<=': op.le, '=': op.eq, 'abs': abs,\n 'append': op.add, 'apply': lambda proc, args: proc(*args), 'begin':\n lambda *x: x[-1], 'car': lambda x: x[0], 'cdr': lambda x: x[1:],\n 'cons': lambda x, y: [x] + y, 'eq?': op.is_, 'expt': pow, 'equal?':\n op.eq, 'length': len, 'list': lambda *x: List(x), 'list?': lambda x:\n isinstance(x, List), 'map': map, 'max': max, 'min': min, 'not': op.\n not_, 'null?': lambda x: x == [], 'number?': lambda x: isinstance(x,\n Number), 'print': print, 'procedure?': callable, 'round': round,\n 'symbol?': lambda x: isinstance(x, Symbol)})\n return env\n\n\nglobal_env = standard_env()\n\n\ndef eval(x: Exp, env=global_env) ->Exp:\n \"\"\"Evaluate an expression in an environment.\"\"\"\n if isinstance(x, Symbol):\n return env[x]\n elif not isinstance(x, List):\n return x\n elif x[0] == 'if':\n _, test, conseq, alt = x\n exp = conseq if eval(test, env) else alt\n return eval(exp, env)\n elif x[0] == 'define':\n _, symbol, exp = x\n env[symbol] = eval(exp, env)\n else:\n proc = eval(x[0], env)\n args = [eval(arg, env) for arg in x[1:]]\n return proc(*args)\n\n\ndef tokenize(chars: str) ->list:\n \"\"\"convert a string of characters into a list of tokens\"\"\"\n return chars.replace('(', ' ( ').replace(')', ' ) ').split()\n\n\ndef parse(program: str) ->Exp:\n \"\"\"Read a scheme expression from a string\"\"\"\n return read_from_tokens(tokenize(program))\n\n\ndef read_from_tokens(tokens: list) ->Exp:\n \"\"\"Read an expression from a sequence of tokens\"\"\"\n if len(tokens) == 0:\n raise SyntaxError('unexpected EOF')\n token = tokens.pop(0)\n if token == '(':\n L = []\n while tokens[0] != ')':\n L.append(read_from_tokens(tokens))\n tokens.pop(0)\n return L\n elif token == ')':\n raise SyntaxError('unexpected )')\n else:\n return atom(token)\n\n\ndef atom(token: str) ->Atom:\n \"\"\"Numbers become numbers; every other token is a symbol\"\"\"\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token)\n\n\nprogram = '(begin (define r 10) (* pi (* r r)))'\nprint(eval(parse(program)))\n",
"step-5": "import math\nimport operator as op\n\nSymbol = str\nNumber = (int, float)\nAtom = (Symbol, Number)\nList = list\nExp = (Atom, List)\nEnv = dict\n\ndef standard_env() -> Env:\n \"An environment with some scheme standard procedures\"\n env = Env()\n env.update(vars(math)) # sin, cos, sqrt, pi ...\n env.update({\n '+':op.add, '-':op.sub, '*':op.mul, '/':op.truediv,\n '>':op.gt, '>':op.lt, '>=':op.ge, '<=':op.le, '=':op.eq,\n 'abs':abs,\n 'append':op.add,\n 'apply':lambda proc, args: proc(*args),\n 'begin':lambda *x: x[-1],\n 'car':lambda x: x[0],\n 'cdr':lambda x: x[1:],\n 'cons':lambda x,y: [x] + y,\n 'eq?':op.is_,\n 'expt':pow,\n 'equal?':op.eq,\n 'length':len,\n 'list':lambda *x: List(x),\n 'list?':lambda x: isinstance(x, List),\n 'map':map,\n 'max':max,\n 'min':min,\n 'not':op.not_,\n 'null?':lambda x: x == [],\n 'number?':lambda x: isinstance(x, Number),\n 'print':print,\n 'procedure?':callable,\n 'round':round,\n 'symbol?':lambda x: isinstance(x, Symbol),\n })\n return env\n\nglobal_env = standard_env()\n\ndef eval(x: Exp, env=global_env) -> Exp:\n \"Evaluate an expression in an environment.\"\n if isinstance(x, Symbol): # variable reference\n return env[x]\n elif not isinstance(x, List): # constant number\n return x\n elif x[0] == 'if': # conditional\n (_, test, conseq, alt) = x\n exp = (conseq if eval(test, env) else alt)\n return eval(exp, env)\n elif x[0] == 'define': # definition\n (_, symbol, exp) = x\n env[symbol] = eval(exp, env)\n else: # procedure call\n proc = eval(x[0], env)\n args = [eval(arg, env) for arg in x[1:]]\n return proc(*args)\n\ndef tokenize(chars: str) -> list:\n \"convert a string of characters into a list of tokens\"\n return chars.replace('(', ' ( ').replace(')', ' ) ').split()\n\ndef parse(program: str) -> Exp:\n \"Read a scheme expression from a string\"\n return read_from_tokens(tokenize(program))\n\ndef read_from_tokens(tokens: list) -> Exp:\n \"Read an expression from a sequence of tokens\"\n if len(tokens) == 0:\n raise SyntaxError('unexpected EOF')\n token = tokens.pop(0)\n if token == '(':\n L = []\n while tokens[0] != ')':\n L.append(read_from_tokens(tokens))\n tokens.pop(0) # pop off ')'\n return L\n elif token == ')':\n raise SyntaxError('unexpected )')\n else:\n return atom(token)\n\ndef atom(token: str) -> Atom:\n \"Numbers become numbers; every other token is a symbol\"\n try: return int(token)\n except ValueError:\n try: return float(token)\n except ValueError:\n return Symbol(token)\n\nprogram = \"(begin (define r 10) (* pi (* r r)))\"\nprint(eval(parse(program)))\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
# coding: utf-8
"""
__author__: onur koc
"""
import numpy as np
import matplotlib.pyplot as plt
from mpldatacursor import datacursor
#optional to annotate any clicked point
# ------------
# Input values
# ------------
gamma = 23
# Specific weight of the rock mass [kN/m³]
H = 270
# Overburden [m]
nu = 0.3
# Poisson's ratio of the rock [-]
E = 300000
# Modulus of elasticity of the rock [kPa]
p_o = gamma * H
# In-situ stress [kPa]
D = 9
# Diameter of the tunnel [m]
c = 300
# Cohesion of the rock [kPa]
phi = 28
# Friction angle of the rock [deg]
Phi = np.deg2rad(phi)
# Convertion from degrees to radians [rad]
# --------------------------------
# Input values for support members
# --------------------------------
f_ck = 35
# Uniaxial compressive strength of the sprayed concrete [MPa]
E_c = 30000
# Young's modulus of the sprayed concrete [MPa]
nu_c = 0.2
# Poisson's ratio of the sprayed concrete [-]
t_c = 0.25
# Thickness of the sprayed concrete [m]
dis_sup = 0
# Distance of the support member to the face
# Other calculated values
p_i = np.arange(0, p_o, 100)
# Support pressure (an array from zero to insitu stress) [kPa]
sigma_cm = 2 * c * np.cos(Phi) / (1 - np.sin(Phi))
# Uniaxial strength of the rock mass [kPa]
k = (1 + np.sin(Phi)) / (1 - np.sin(Phi))
# Slope defined by the Mohr-Coulomb criterion [-]
# ----------------------------
# Analysis of tunnel behaviour
# ----------------------------
# Tunnel wall displacement
p_cr = (2 * p_o - sigma_cm) / (1 + k)
# Critical support pressure [kPa]
# Note: if the critical support pressure is smaller than the internal
# support pressure then failure does not occur
r_o = D / 2
# Radius of the tunnel [m]
u_ie = r_o * (1 + nu) / E * (p_o - p_i)
# Inward radial elastic displacement (Pi is a variable) [m]
r_p = r_o*(2*(p_o*(k-1)+sigma_cm)/(1+k)/((k-1)*p_i+sigma_cm))**(1/(k-1))
# Radius of the plastic zone [m]
u_ip = r_o*(1+nu)/E*(2*(1-nu)*(p_o-p_cr) * (r_p/r_o)**2-(1-2*nu)*(p_o-p_i))
# Inward radial plastic displacement (Pi is a variable) [m]
x = []
for i in range(len(p_i)):
if p_i[i] > p_cr:
x.append(u_ie[i])
else:
x.append(u_ip[i])
u_annot = r_o * (1+nu) / E * (p_o-p_cr)
# The abscissa of the ordinate: p_cr
# Logitudinal displacement profile
r_pm = r_o * ((2 * (p_o * (k-1) + sigma_cm)) / ((1+k) * sigma_cm))**(1/(k-1))
# Maximum plastic zone radius [m]
u_im = r_o * (1+nu)/E*(2*(1-nu)*(p_o-p_cr)*(r_pm/r_o)**2-(1-2*nu)*(p_o))
# Maximum displacement [m] - r_p = r_pm; p_i = 0
u_if = (u_im / 3) * np.exp(-0.15 * (r_pm / r_o))
# Displacement at the tunnel face (by Vlachopoulus and Diederichs) [m]
# Displacement ahead of the face
x_ = np.arange(-25, 40, 1)
# Distance from tunnel face (an array from -25m ahead and 40m behind the face)
# [m]
u_ix_a = (u_if) * np.exp(x_ / r_o)
# Tunnel wall displacement ahead of the face (x < 0) [m]
# Displacement behind the face
u_ix_b = u_im*(1-(1-u_if/u_im) * np.exp((-3*x_/r_o) / (2*r_pm/r_o)))
# Tunnel wall displacement behind the face (x > 0) [m]
x__ = []
for i in range(len(x_)):
if x_[i] < 0:
x__.append(u_ix_a[i])
else:
x__.append(u_ix_b[i])
lambda_face = u_if / u_im
# -----------------------
# Analysis of the support
# -----------------------
# Panet curve
#u_io = u_if + (u_im-u_if) * (1-(0.84*r_pm/(dis_sup + 0.84*r_pm))**2)
# Tunnel wall displacement at support installation [m]
# Vlachopoulus curve is as follows:
u_io = u_im*(1-(1-u_if/u_im) * np.exp((-3*dis_sup/r_o) / (2*r_pm/r_o)))
K_sc = E_c * (r_o**2 - (r_o-t_c)**2) / (2*(1-nu**2)*(r_o-t_c)*r_o**2)
# The stiffness of the sprayed concrete [MPa/m]
p_scmax = f_ck/2 * (1 - (r_o - t_c)**2 / r_o**2)
# The maximum sprayed concrete pressure [MPa]
u_iy = u_io + p_scmax / K_sc
# Yielding point of the sprayed concrete [m]
point_x = [u_io, u_iy, 1.3*u_iy]
point_y = [0, p_scmax, p_scmax]
# Points for the support yield line
if __name__ == "__main__":
fig, ax1 = plt.subplots(num=1, dpi=125, edgecolor='w')
ax1.plot(x, p_i/1000, lw=1.5, color='blue')
plt.title('Ground Reaction Curve')
ax1.set_ylabel('Support Pressure $P_i\,[MPa]$', fontsize=12)
ax1.set_xlabel('Tunnel Wall Displacement $u_i\,[m]$', fontsize=12)
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = ax1.twinx()
ax2.plot(x__, x_, lw=1.5, color='red')
ax2.set_ylabel('Distance from tunnel face $x\,[m]$', fontsize=12)
# ax2.axhline(y=0, xmin=u_if, xmax=0.045, color='black')
for tl in ax2.get_yticklabels():
tl.set_color('r')
xposition = [u_if]
yposition = [0, 5]
for xc in xposition:
ax2.axvline(x=xc, color='k', linestyle='--', lw=1.0)
for yc in yposition:
ax2.axhline(y=yc, color='k', linestyle='--', lw=1.0)
datacursor(display='multiple', draggable=True)
plt.figure(num=2, dpi=125, edgecolor='b')
plt.plot(x, p_i/1000, 'b-', lw=1.5)
plt.plot(point_x, point_y, 'r-', lw=1.5)
plt.title('Ground Reaction Curve')
plt.ylabel('Support Pressure $P_i\,[MPa]$', fontsize=12)
plt.xlabel('Tunnel Wall Displacement $u_i\,[m]$', fontsize=12)
datacursor(display='multiple', draggable=True)
plt.show()
|
normal
|
{
"blob_id": "f9cc9348d36c131aa3d34e4f78f67b008a1b565a",
"index": 7121,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(len(p_i)):\n if p_i[i] > p_cr:\n x.append(u_ie[i])\n else:\n x.append(u_ip[i])\n<mask token>\nfor i in range(len(x_)):\n if x_[i] < 0:\n x__.append(u_ix_a[i])\n else:\n x__.append(u_ix_b[i])\n<mask token>\nif __name__ == '__main__':\n fig, ax1 = plt.subplots(num=1, dpi=125, edgecolor='w')\n ax1.plot(x, p_i / 1000, lw=1.5, color='blue')\n plt.title('Ground Reaction Curve')\n ax1.set_ylabel('Support Pressure $P_i\\\\,[MPa]$', fontsize=12)\n ax1.set_xlabel('Tunnel Wall Displacement $u_i\\\\,[m]$', fontsize=12)\n for tl in ax1.get_yticklabels():\n tl.set_color('b')\n ax2 = ax1.twinx()\n ax2.plot(x__, x_, lw=1.5, color='red')\n ax2.set_ylabel('Distance from tunnel face $x\\\\,[m]$', fontsize=12)\n for tl in ax2.get_yticklabels():\n tl.set_color('r')\n xposition = [u_if]\n yposition = [0, 5]\n for xc in xposition:\n ax2.axvline(x=xc, color='k', linestyle='--', lw=1.0)\n for yc in yposition:\n ax2.axhline(y=yc, color='k', linestyle='--', lw=1.0)\n datacursor(display='multiple', draggable=True)\n plt.figure(num=2, dpi=125, edgecolor='b')\n plt.plot(x, p_i / 1000, 'b-', lw=1.5)\n plt.plot(point_x, point_y, 'r-', lw=1.5)\n plt.title('Ground Reaction Curve')\n plt.ylabel('Support Pressure $P_i\\\\,[MPa]$', fontsize=12)\n plt.xlabel('Tunnel Wall Displacement $u_i\\\\,[m]$', fontsize=12)\n datacursor(display='multiple', draggable=True)\n plt.show()\n",
"step-3": "<mask token>\ngamma = 23\nH = 270\nnu = 0.3\nE = 300000\np_o = gamma * H\nD = 9\nc = 300\nphi = 28\nPhi = np.deg2rad(phi)\nf_ck = 35\nE_c = 30000\nnu_c = 0.2\nt_c = 0.25\ndis_sup = 0\np_i = np.arange(0, p_o, 100)\nsigma_cm = 2 * c * np.cos(Phi) / (1 - np.sin(Phi))\nk = (1 + np.sin(Phi)) / (1 - np.sin(Phi))\np_cr = (2 * p_o - sigma_cm) / (1 + k)\nr_o = D / 2\nu_ie = r_o * (1 + nu) / E * (p_o - p_i)\nr_p = r_o * (2 * (p_o * (k - 1) + sigma_cm) / (1 + k) / ((k - 1) * p_i +\n sigma_cm)) ** (1 / (k - 1))\nu_ip = r_o * (1 + nu) / E * (2 * (1 - nu) * (p_o - p_cr) * (r_p / r_o) ** 2 -\n (1 - 2 * nu) * (p_o - p_i))\nx = []\nfor i in range(len(p_i)):\n if p_i[i] > p_cr:\n x.append(u_ie[i])\n else:\n x.append(u_ip[i])\nu_annot = r_o * (1 + nu) / E * (p_o - p_cr)\nr_pm = r_o * (2 * (p_o * (k - 1) + sigma_cm) / ((1 + k) * sigma_cm)) ** (1 /\n (k - 1))\nu_im = r_o * (1 + nu) / E * (2 * (1 - nu) * (p_o - p_cr) * (r_pm / r_o) ** \n 2 - (1 - 2 * nu) * p_o)\nu_if = u_im / 3 * np.exp(-0.15 * (r_pm / r_o))\nx_ = np.arange(-25, 40, 1)\nu_ix_a = u_if * np.exp(x_ / r_o)\nu_ix_b = u_im * (1 - (1 - u_if / u_im) * np.exp(-3 * x_ / r_o / (2 * r_pm /\n r_o)))\nx__ = []\nfor i in range(len(x_)):\n if x_[i] < 0:\n x__.append(u_ix_a[i])\n else:\n x__.append(u_ix_b[i])\nlambda_face = u_if / u_im\nu_io = u_im * (1 - (1 - u_if / u_im) * np.exp(-3 * dis_sup / r_o / (2 *\n r_pm / r_o)))\nK_sc = E_c * (r_o ** 2 - (r_o - t_c) ** 2) / (2 * (1 - nu ** 2) * (r_o -\n t_c) * r_o ** 2)\np_scmax = f_ck / 2 * (1 - (r_o - t_c) ** 2 / r_o ** 2)\nu_iy = u_io + p_scmax / K_sc\npoint_x = [u_io, u_iy, 1.3 * u_iy]\npoint_y = [0, p_scmax, p_scmax]\nif __name__ == '__main__':\n fig, ax1 = plt.subplots(num=1, dpi=125, edgecolor='w')\n ax1.plot(x, p_i / 1000, lw=1.5, color='blue')\n plt.title('Ground Reaction Curve')\n ax1.set_ylabel('Support Pressure $P_i\\\\,[MPa]$', fontsize=12)\n ax1.set_xlabel('Tunnel Wall Displacement $u_i\\\\,[m]$', fontsize=12)\n for tl in ax1.get_yticklabels():\n tl.set_color('b')\n ax2 = ax1.twinx()\n ax2.plot(x__, x_, lw=1.5, color='red')\n ax2.set_ylabel('Distance from tunnel face $x\\\\,[m]$', fontsize=12)\n for tl in ax2.get_yticklabels():\n tl.set_color('r')\n xposition = [u_if]\n yposition = [0, 5]\n for xc in xposition:\n ax2.axvline(x=xc, color='k', linestyle='--', lw=1.0)\n for yc in yposition:\n ax2.axhline(y=yc, color='k', linestyle='--', lw=1.0)\n datacursor(display='multiple', draggable=True)\n plt.figure(num=2, dpi=125, edgecolor='b')\n plt.plot(x, p_i / 1000, 'b-', lw=1.5)\n plt.plot(point_x, point_y, 'r-', lw=1.5)\n plt.title('Ground Reaction Curve')\n plt.ylabel('Support Pressure $P_i\\\\,[MPa]$', fontsize=12)\n plt.xlabel('Tunnel Wall Displacement $u_i\\\\,[m]$', fontsize=12)\n datacursor(display='multiple', draggable=True)\n plt.show()\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpldatacursor import datacursor\ngamma = 23\nH = 270\nnu = 0.3\nE = 300000\np_o = gamma * H\nD = 9\nc = 300\nphi = 28\nPhi = np.deg2rad(phi)\nf_ck = 35\nE_c = 30000\nnu_c = 0.2\nt_c = 0.25\ndis_sup = 0\np_i = np.arange(0, p_o, 100)\nsigma_cm = 2 * c * np.cos(Phi) / (1 - np.sin(Phi))\nk = (1 + np.sin(Phi)) / (1 - np.sin(Phi))\np_cr = (2 * p_o - sigma_cm) / (1 + k)\nr_o = D / 2\nu_ie = r_o * (1 + nu) / E * (p_o - p_i)\nr_p = r_o * (2 * (p_o * (k - 1) + sigma_cm) / (1 + k) / ((k - 1) * p_i +\n sigma_cm)) ** (1 / (k - 1))\nu_ip = r_o * (1 + nu) / E * (2 * (1 - nu) * (p_o - p_cr) * (r_p / r_o) ** 2 -\n (1 - 2 * nu) * (p_o - p_i))\nx = []\nfor i in range(len(p_i)):\n if p_i[i] > p_cr:\n x.append(u_ie[i])\n else:\n x.append(u_ip[i])\nu_annot = r_o * (1 + nu) / E * (p_o - p_cr)\nr_pm = r_o * (2 * (p_o * (k - 1) + sigma_cm) / ((1 + k) * sigma_cm)) ** (1 /\n (k - 1))\nu_im = r_o * (1 + nu) / E * (2 * (1 - nu) * (p_o - p_cr) * (r_pm / r_o) ** \n 2 - (1 - 2 * nu) * p_o)\nu_if = u_im / 3 * np.exp(-0.15 * (r_pm / r_o))\nx_ = np.arange(-25, 40, 1)\nu_ix_a = u_if * np.exp(x_ / r_o)\nu_ix_b = u_im * (1 - (1 - u_if / u_im) * np.exp(-3 * x_ / r_o / (2 * r_pm /\n r_o)))\nx__ = []\nfor i in range(len(x_)):\n if x_[i] < 0:\n x__.append(u_ix_a[i])\n else:\n x__.append(u_ix_b[i])\nlambda_face = u_if / u_im\nu_io = u_im * (1 - (1 - u_if / u_im) * np.exp(-3 * dis_sup / r_o / (2 *\n r_pm / r_o)))\nK_sc = E_c * (r_o ** 2 - (r_o - t_c) ** 2) / (2 * (1 - nu ** 2) * (r_o -\n t_c) * r_o ** 2)\np_scmax = f_ck / 2 * (1 - (r_o - t_c) ** 2 / r_o ** 2)\nu_iy = u_io + p_scmax / K_sc\npoint_x = [u_io, u_iy, 1.3 * u_iy]\npoint_y = [0, p_scmax, p_scmax]\nif __name__ == '__main__':\n fig, ax1 = plt.subplots(num=1, dpi=125, edgecolor='w')\n ax1.plot(x, p_i / 1000, lw=1.5, color='blue')\n plt.title('Ground Reaction Curve')\n ax1.set_ylabel('Support Pressure $P_i\\\\,[MPa]$', fontsize=12)\n ax1.set_xlabel('Tunnel Wall Displacement $u_i\\\\,[m]$', fontsize=12)\n for tl in ax1.get_yticklabels():\n tl.set_color('b')\n ax2 = ax1.twinx()\n ax2.plot(x__, x_, lw=1.5, color='red')\n ax2.set_ylabel('Distance from tunnel face $x\\\\,[m]$', fontsize=12)\n for tl in ax2.get_yticklabels():\n tl.set_color('r')\n xposition = [u_if]\n yposition = [0, 5]\n for xc in xposition:\n ax2.axvline(x=xc, color='k', linestyle='--', lw=1.0)\n for yc in yposition:\n ax2.axhline(y=yc, color='k', linestyle='--', lw=1.0)\n datacursor(display='multiple', draggable=True)\n plt.figure(num=2, dpi=125, edgecolor='b')\n plt.plot(x, p_i / 1000, 'b-', lw=1.5)\n plt.plot(point_x, point_y, 'r-', lw=1.5)\n plt.title('Ground Reaction Curve')\n plt.ylabel('Support Pressure $P_i\\\\,[MPa]$', fontsize=12)\n plt.xlabel('Tunnel Wall Displacement $u_i\\\\,[m]$', fontsize=12)\n datacursor(display='multiple', draggable=True)\n plt.show()\n",
"step-5": "# coding: utf-8\r\n\"\"\"\r\n__author__: onur koc\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpldatacursor import datacursor \r\n#optional to annotate any clicked point\r\n\r\n# ------------\r\n# Input values\r\n# ------------\r\ngamma = 23\r\n# Specific weight of the rock mass [kN/m³]\r\nH = 270\r\n# Overburden [m]\r\nnu = 0.3\r\n# Poisson's ratio of the rock [-]\r\nE = 300000\r\n# Modulus of elasticity of the rock [kPa]\r\np_o = gamma * H\r\n# In-situ stress [kPa]\r\nD = 9\r\n# Diameter of the tunnel [m]\r\nc = 300\r\n# Cohesion of the rock [kPa]\r\nphi = 28\r\n# Friction angle of the rock [deg]\r\nPhi = np.deg2rad(phi)\r\n# Convertion from degrees to radians [rad]\r\n\r\n# --------------------------------\r\n# Input values for support members\r\n# --------------------------------\r\n\r\nf_ck = 35\r\n# Uniaxial compressive strength of the sprayed concrete [MPa]\r\nE_c = 30000\r\n# Young's modulus of the sprayed concrete [MPa]\r\nnu_c = 0.2\r\n# Poisson's ratio of the sprayed concrete [-]\r\nt_c = 0.25\r\n# Thickness of the sprayed concrete [m]\r\ndis_sup = 0\r\n# Distance of the support member to the face\r\n\r\n# Other calculated values\r\n\r\np_i = np.arange(0, p_o, 100)\r\n# Support pressure (an array from zero to insitu stress) [kPa]\r\n\r\nsigma_cm = 2 * c * np.cos(Phi) / (1 - np.sin(Phi))\r\n# Uniaxial strength of the rock mass [kPa]\r\nk = (1 + np.sin(Phi)) / (1 - np.sin(Phi))\r\n# Slope defined by the Mohr-Coulomb criterion [-]\r\n\r\n# ----------------------------\r\n# Analysis of tunnel behaviour\r\n# ----------------------------\r\n\r\n# Tunnel wall displacement\r\n\r\np_cr = (2 * p_o - sigma_cm) / (1 + k)\r\n# Critical support pressure [kPa]\r\n# Note: if the critical support pressure is smaller than the internal\r\n# support pressure then failure does not occur\r\n\r\nr_o = D / 2\r\n# Radius of the tunnel [m]\r\n\r\nu_ie = r_o * (1 + nu) / E * (p_o - p_i)\r\n# Inward radial elastic displacement (Pi is a variable) [m]\r\n\r\nr_p = r_o*(2*(p_o*(k-1)+sigma_cm)/(1+k)/((k-1)*p_i+sigma_cm))**(1/(k-1))\r\n# Radius of the plastic zone [m]\r\n\r\nu_ip = r_o*(1+nu)/E*(2*(1-nu)*(p_o-p_cr) * (r_p/r_o)**2-(1-2*nu)*(p_o-p_i))\r\n# Inward radial plastic displacement (Pi is a variable) [m]\r\n\r\nx = []\r\n\r\nfor i in range(len(p_i)):\r\n if p_i[i] > p_cr:\r\n x.append(u_ie[i])\r\n else:\r\n x.append(u_ip[i])\r\n\r\nu_annot = r_o * (1+nu) / E * (p_o-p_cr)\r\n# The abscissa of the ordinate: p_cr\r\n\r\n\r\n# Logitudinal displacement profile\r\n\r\nr_pm = r_o * ((2 * (p_o * (k-1) + sigma_cm)) / ((1+k) * sigma_cm))**(1/(k-1))\r\n# Maximum plastic zone radius [m]\r\n\r\nu_im = r_o * (1+nu)/E*(2*(1-nu)*(p_o-p_cr)*(r_pm/r_o)**2-(1-2*nu)*(p_o))\r\n# Maximum displacement [m] - r_p = r_pm; p_i = 0\r\n\r\nu_if = (u_im / 3) * np.exp(-0.15 * (r_pm / r_o))\r\n# Displacement at the tunnel face (by Vlachopoulus and Diederichs) [m]\r\n\r\n# Displacement ahead of the face\r\n\r\nx_ = np.arange(-25, 40, 1)\r\n# Distance from tunnel face (an array from -25m ahead and 40m behind the face)\r\n# [m]\r\n\r\nu_ix_a = (u_if) * np.exp(x_ / r_o)\r\n# Tunnel wall displacement ahead of the face (x < 0) [m]\r\n\r\n# Displacement behind the face\r\n\r\nu_ix_b = u_im*(1-(1-u_if/u_im) * np.exp((-3*x_/r_o) / (2*r_pm/r_o)))\r\n# Tunnel wall displacement behind the face (x > 0) [m]\r\n\r\nx__ = []\r\n\r\nfor i in range(len(x_)):\r\n if x_[i] < 0:\r\n x__.append(u_ix_a[i])\r\n else:\r\n x__.append(u_ix_b[i])\r\n\r\nlambda_face = u_if / u_im\r\n\r\n# -----------------------\r\n# Analysis of the support\r\n# -----------------------\r\n\r\n# Panet curve\r\n#u_io = u_if + (u_im-u_if) * (1-(0.84*r_pm/(dis_sup + 0.84*r_pm))**2)\r\n# Tunnel wall displacement at support installation [m]\r\n\r\n# Vlachopoulus curve is as follows:\r\nu_io = u_im*(1-(1-u_if/u_im) * np.exp((-3*dis_sup/r_o) / (2*r_pm/r_o)))\r\n\r\nK_sc = E_c * (r_o**2 - (r_o-t_c)**2) / (2*(1-nu**2)*(r_o-t_c)*r_o**2)\r\n# The stiffness of the sprayed concrete [MPa/m]\r\n\r\np_scmax = f_ck/2 * (1 - (r_o - t_c)**2 / r_o**2)\r\n# The maximum sprayed concrete pressure [MPa]\r\n\r\nu_iy = u_io + p_scmax / K_sc\r\n# Yielding point of the sprayed concrete [m]\r\n\r\npoint_x = [u_io, u_iy, 1.3*u_iy]\r\npoint_y = [0, p_scmax, p_scmax]\r\n# Points for the support yield line\r\n\r\nif __name__ == \"__main__\":\r\n \r\n fig, ax1 = plt.subplots(num=1, dpi=125, edgecolor='w')\r\n ax1.plot(x, p_i/1000, lw=1.5, color='blue')\r\n plt.title('Ground Reaction Curve')\r\n ax1.set_ylabel('Support Pressure $P_i\\,[MPa]$', fontsize=12)\r\n ax1.set_xlabel('Tunnel Wall Displacement $u_i\\,[m]$', fontsize=12)\r\n for tl in ax1.get_yticklabels():\r\n tl.set_color('b')\r\n \r\n ax2 = ax1.twinx()\r\n ax2.plot(x__, x_, lw=1.5, color='red')\r\n ax2.set_ylabel('Distance from tunnel face $x\\,[m]$', fontsize=12)\r\n # ax2.axhline(y=0, xmin=u_if, xmax=0.045, color='black')\r\n for tl in ax2.get_yticklabels():\r\n tl.set_color('r')\r\n xposition = [u_if]\r\n yposition = [0, 5]\r\n for xc in xposition:\r\n ax2.axvline(x=xc, color='k', linestyle='--', lw=1.0)\r\n for yc in yposition:\r\n ax2.axhline(y=yc, color='k', linestyle='--', lw=1.0)\r\n datacursor(display='multiple', draggable=True)\r\n \r\n plt.figure(num=2, dpi=125, edgecolor='b')\r\n plt.plot(x, p_i/1000, 'b-', lw=1.5)\r\n plt.plot(point_x, point_y, 'r-', lw=1.5)\r\n plt.title('Ground Reaction Curve')\r\n plt.ylabel('Support Pressure $P_i\\,[MPa]$', fontsize=12)\r\n plt.xlabel('Tunnel Wall Displacement $u_i\\,[m]$', fontsize=12)\r\n datacursor(display='multiple', draggable=True)\r\n plt.show()\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cv2 as cv
import numpy as np
import pytesseract as tes
text = get_text_from_image("resizedReceipt.jpg")
print(text)
def get_text_from_image(imageName):
img = preprocess(imageName)
result = tes.image_to_string(img)
return result
def preprocess(image_name):
image = cv.imread(image_name)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
receiptBox = find_receipt_box(gray)
M, w, h = perspective_transform(receiptBox)
receiptImg = apply_perspective_correction(gray, M, w, h)
receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)
return receiptImg
def find_receipt_box(image):
"""
Finds a contour around the receipt in the given image.
Returns the bounding box and the binary image
"""
# gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
gray = cv.medianBlur(image, 15, 0)
_, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)
k = np.ones((25, 25))
thresh = cv.erode(thresh, k, iterations=1)
thresh = cv.dilate(thresh, k, iterations=1)
contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
contours = sorted(contours[0], key=cv.contourArea, reverse=True)
contour = contours[0]
rect = cv.minAreaRect(contour)
box = cv.boxPoints(rect)
box = np.int0(box)
return box
def perspective_transform(contour):
"""Produces the transformation matrix and the new size for perspective correction"""
ord_rect = np.float32(order_rect(contour))
(tl, tr, br, bl) = ord_rect
dist_top = np.linalg.norm(tl - tr)
dist_btm = np.linalg.norm(bl - br)
width = max(dist_btm, dist_top)
dist_left = np.linalg.norm(tl - tr)
dist_right = np.linalg.norm(tr - br)
height = max(dist_left, dist_right)
dest_corners = np.array([
[0, 0],
[width - 1, 0],
[width - 1, height - 1],
[0, height - 1]
], dtype=ord_rect.dtype)
M = cv.getPerspectiveTransform(ord_rect, dest_corners)
return M, width, height
def order_rect(pts):
"""
orders a rectangle in the order top-left, top-right,
bottom-right, bottom-left
"""
new = np.zeros((4, 2), dtype="int64")
s = pts.sum(axis=1)
new[0] = pts[np.argmin(s)]
new[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
new[1] = pts[np.argmin(diff)]
new[3] = pts[np.argmax(diff)]
return new
def apply_perspective_correction(image, M, width, height):
"""Crops the contour and applies perspective correction"""
warped = cv.warpPerspective(image, M, (width, height))
return warped
|
normal
|
{
"blob_id": "e480136aca96e45cc8a7ca34c1a9d09b96a5a4da",
"index": 4152,
"step-1": "<mask token>\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\n<mask token>\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n tl, tr, br, bl = ord_rect\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1\n ], [0, height - 1]], dtype=ord_rect.dtype)\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype='int64')\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n",
"step-2": "<mask token>\nprint(text)\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\ndef preprocess(image_name):\n image = cv.imread(image_name)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n receiptBox = find_receipt_box(gray)\n M, w, h = perspective_transform(receiptBox)\n receiptImg = apply_perspective_correction(gray, M, w, h)\n receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)\n return receiptImg\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n tl, tr, br, bl = ord_rect\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1\n ], [0, height - 1]], dtype=ord_rect.dtype)\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype='int64')\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n",
"step-3": "<mask token>\ntext = get_text_from_image('resizedReceipt.jpg')\nprint(text)\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\ndef preprocess(image_name):\n image = cv.imread(image_name)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n receiptBox = find_receipt_box(gray)\n M, w, h = perspective_transform(receiptBox)\n receiptImg = apply_perspective_correction(gray, M, w, h)\n receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)\n return receiptImg\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n tl, tr, br, bl = ord_rect\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1\n ], [0, height - 1]], dtype=ord_rect.dtype)\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype='int64')\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n",
"step-4": "import cv2 as cv\nimport numpy as np\nimport pytesseract as tes\ntext = get_text_from_image('resizedReceipt.jpg')\nprint(text)\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\ndef preprocess(image_name):\n image = cv.imread(image_name)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n receiptBox = find_receipt_box(gray)\n M, w, h = perspective_transform(receiptBox)\n receiptImg = apply_perspective_correction(gray, M, w, h)\n receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)\n return receiptImg\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n tl, tr, br, bl = ord_rect\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1\n ], [0, height - 1]], dtype=ord_rect.dtype)\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype='int64')\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n",
"step-5": "import cv2 as cv\nimport numpy as np\nimport pytesseract as tes\n\n\ntext = get_text_from_image(\"resizedReceipt.jpg\")\nprint(text)\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\ndef preprocess(image_name):\n image = cv.imread(image_name)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n receiptBox = find_receipt_box(gray)\n M, w, h = perspective_transform(receiptBox)\n receiptImg = apply_perspective_correction(gray, M, w, h)\n receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)\n return receiptImg\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n # gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n (tl, tr, br, bl) = ord_rect\n\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n\n dest_corners = np.array([\n [0, 0],\n [width - 1, 0],\n [width - 1, height - 1],\n [0, height - 1]\n ], dtype=ord_rect.dtype)\n\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype=\"int64\")\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
from tkinter import *
import psycopg2
import sys
import pprint
import Base_de_datos
import MergeSort
class Cliente:
def __init__(self,id=None,nombre=None):
self.id=id
self.nombre=nombre
def ingresar(self):
self.ventanaIngresar= Toplevel()
self.ventanaIngresar.geometry("570x400")
self.ventanaIngresar.title("Cliente")
img = PhotoImage(file="C:/Users/checo/Desktop/41-INVERSION-MEDIOS-DIGITALES.png")
imagen= Label(self.ventanaIngresar, image=img)
imagen.pack()
Label(self.ventanaIngresar, text="Cliente",font=("Cambria",14)).place(x=5,y=0)
Label(self.ventanaIngresar, text="Id: ",font=("Cambria",11)).place(x=0,y=30)
Label(self.ventanaIngresar, text="Nombre: ",font=("Cambria",11)).place(x=0,y=60)
self.id=StringVar()
Entry(self.ventanaIngresar, textvariable=self.id).place(x=30,y=30)
self.nombre=StringVar()
Entry(self.ventanaIngresar, textvariable=self.nombre).place(x=65,y=60)
Button(self.ventanaIngresar,text="Guardar",font=("Cambria",11),
width=15,command=self.BD).place(x=420,y=5)
#Button(self.ventanaIngresar,text="Modificar",font=("Cambria",11),
# width=15).place(x=420,y=365)
Button(self.ventanaIngresar,text="Mostrar",font=("Cambria",11),
width=15,command=self.Mostrar).place(x=0,y=365)
Button(self.ventanaIngresar,text="Ordenar",font=("Cambria",11),
width=15, command=self.ordenamiento).place(x=220,y=365)
self.ventanaIngresar.mainloop()
def BD(self):
conectar=Base_de_datos.BaseDeDatos()
comando="INSERT INTO public.cliente(id, nombre) VALUES('"+self.id.get()+"','"+self.nombre.get()+"')"
print(comando)
conectar.cursor.execute(comando)
def Mostrar(self):
comando="SELECT * FROM cliente;"
conectar=Base_de_datos.BaseDeDatos()
conectar.cursor.execute(comando)
Scroll=Scrollbar(self.ventanaIngresar, orient=VERTICAL)
self.listbox=Listbox(self.ventanaIngresar, font=("Cambria",9), borderwidth=0, yscrollcommand=Scroll.set,height=15,relief="sunken",width=60)
self.listbox.place(x=5, y=90)
Scroll.config(command=self.listbox.yview)
Scroll.pack(side=RIGHT, fill=Y)
for dato1, dato2 in enumerate(conectar.cursor.fetchall()):
self.listbox.insert(0, "Id: {}".format(dato2[0]))
self.listbox.insert(1, "Nombre: {}".format(dato2[1]))
self.listbox.insert(2, " ")
def ordenamiento(self):
comando="SELECT id FROM cliente;"
conectar=Base_de_datos.BaseDeDatos()
conectar.cursor.execute(comando)
rows= conectar.cursor.fetchall()
ordenar=MergeSort.merge_sort(rows)
print(ordenar)
|
normal
|
{
"blob_id": "63d9aa55463123f32fd608ada83e555be4b5fe2c",
"index": 6946,
"step-1": "<mask token>\n\n\nclass Cliente:\n <mask token>\n <mask token>\n\n def BD(self):\n conectar = Base_de_datos.BaseDeDatos()\n comando = (\"INSERT INTO public.cliente(id, nombre) VALUES('\" + self\n .id.get() + \"','\" + self.nombre.get() + \"')\")\n print(comando)\n conectar.cursor.execute(comando)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Cliente:\n\n def __init__(self, id=None, nombre=None):\n self.id = id\n self.nombre = nombre\n\n def ingresar(self):\n self.ventanaIngresar = Toplevel()\n self.ventanaIngresar.geometry('570x400')\n self.ventanaIngresar.title('Cliente')\n img = PhotoImage(file=\n 'C:/Users/checo/Desktop/41-INVERSION-MEDIOS-DIGITALES.png')\n imagen = Label(self.ventanaIngresar, image=img)\n imagen.pack()\n Label(self.ventanaIngresar, text='Cliente', font=('Cambria', 14)\n ).place(x=5, y=0)\n Label(self.ventanaIngresar, text='Id: ', font=('Cambria', 11)).place(x\n =0, y=30)\n Label(self.ventanaIngresar, text='Nombre: ', font=('Cambria', 11)\n ).place(x=0, y=60)\n self.id = StringVar()\n Entry(self.ventanaIngresar, textvariable=self.id).place(x=30, y=30)\n self.nombre = StringVar()\n Entry(self.ventanaIngresar, textvariable=self.nombre).place(x=65, y=60)\n Button(self.ventanaIngresar, text='Guardar', font=('Cambria', 11),\n width=15, command=self.BD).place(x=420, y=5)\n Button(self.ventanaIngresar, text='Mostrar', font=('Cambria', 11),\n width=15, command=self.Mostrar).place(x=0, y=365)\n Button(self.ventanaIngresar, text='Ordenar', font=('Cambria', 11),\n width=15, command=self.ordenamiento).place(x=220, y=365)\n self.ventanaIngresar.mainloop()\n\n def BD(self):\n conectar = Base_de_datos.BaseDeDatos()\n comando = (\"INSERT INTO public.cliente(id, nombre) VALUES('\" + self\n .id.get() + \"','\" + self.nombre.get() + \"')\")\n print(comando)\n conectar.cursor.execute(comando)\n <mask token>\n\n def ordenamiento(self):\n comando = 'SELECT id FROM cliente;'\n conectar = Base_de_datos.BaseDeDatos()\n conectar.cursor.execute(comando)\n rows = conectar.cursor.fetchall()\n ordenar = MergeSort.merge_sort(rows)\n print(ordenar)\n",
"step-3": "<mask token>\n\n\nclass Cliente:\n\n def __init__(self, id=None, nombre=None):\n self.id = id\n self.nombre = nombre\n\n def ingresar(self):\n self.ventanaIngresar = Toplevel()\n self.ventanaIngresar.geometry('570x400')\n self.ventanaIngresar.title('Cliente')\n img = PhotoImage(file=\n 'C:/Users/checo/Desktop/41-INVERSION-MEDIOS-DIGITALES.png')\n imagen = Label(self.ventanaIngresar, image=img)\n imagen.pack()\n Label(self.ventanaIngresar, text='Cliente', font=('Cambria', 14)\n ).place(x=5, y=0)\n Label(self.ventanaIngresar, text='Id: ', font=('Cambria', 11)).place(x\n =0, y=30)\n Label(self.ventanaIngresar, text='Nombre: ', font=('Cambria', 11)\n ).place(x=0, y=60)\n self.id = StringVar()\n Entry(self.ventanaIngresar, textvariable=self.id).place(x=30, y=30)\n self.nombre = StringVar()\n Entry(self.ventanaIngresar, textvariable=self.nombre).place(x=65, y=60)\n Button(self.ventanaIngresar, text='Guardar', font=('Cambria', 11),\n width=15, command=self.BD).place(x=420, y=5)\n Button(self.ventanaIngresar, text='Mostrar', font=('Cambria', 11),\n width=15, command=self.Mostrar).place(x=0, y=365)\n Button(self.ventanaIngresar, text='Ordenar', font=('Cambria', 11),\n width=15, command=self.ordenamiento).place(x=220, y=365)\n self.ventanaIngresar.mainloop()\n\n def BD(self):\n conectar = Base_de_datos.BaseDeDatos()\n comando = (\"INSERT INTO public.cliente(id, nombre) VALUES('\" + self\n .id.get() + \"','\" + self.nombre.get() + \"')\")\n print(comando)\n conectar.cursor.execute(comando)\n\n def Mostrar(self):\n comando = 'SELECT * FROM cliente;'\n conectar = Base_de_datos.BaseDeDatos()\n conectar.cursor.execute(comando)\n Scroll = Scrollbar(self.ventanaIngresar, orient=VERTICAL)\n self.listbox = Listbox(self.ventanaIngresar, font=('Cambria', 9),\n borderwidth=0, yscrollcommand=Scroll.set, height=15, relief=\n 'sunken', width=60)\n self.listbox.place(x=5, y=90)\n Scroll.config(command=self.listbox.yview)\n Scroll.pack(side=RIGHT, fill=Y)\n for dato1, dato2 in enumerate(conectar.cursor.fetchall()):\n self.listbox.insert(0, 'Id: {}'.format(dato2[0]))\n self.listbox.insert(1, 'Nombre: {}'.format(dato2[1]))\n self.listbox.insert(2, ' ')\n\n def ordenamiento(self):\n comando = 'SELECT id FROM cliente;'\n conectar = Base_de_datos.BaseDeDatos()\n conectar.cursor.execute(comando)\n rows = conectar.cursor.fetchall()\n ordenar = MergeSort.merge_sort(rows)\n print(ordenar)\n",
"step-4": "from tkinter import *\nimport psycopg2\nimport sys\nimport pprint\nimport Base_de_datos\nimport MergeSort\n\n\nclass Cliente:\n\n def __init__(self, id=None, nombre=None):\n self.id = id\n self.nombre = nombre\n\n def ingresar(self):\n self.ventanaIngresar = Toplevel()\n self.ventanaIngresar.geometry('570x400')\n self.ventanaIngresar.title('Cliente')\n img = PhotoImage(file=\n 'C:/Users/checo/Desktop/41-INVERSION-MEDIOS-DIGITALES.png')\n imagen = Label(self.ventanaIngresar, image=img)\n imagen.pack()\n Label(self.ventanaIngresar, text='Cliente', font=('Cambria', 14)\n ).place(x=5, y=0)\n Label(self.ventanaIngresar, text='Id: ', font=('Cambria', 11)).place(x\n =0, y=30)\n Label(self.ventanaIngresar, text='Nombre: ', font=('Cambria', 11)\n ).place(x=0, y=60)\n self.id = StringVar()\n Entry(self.ventanaIngresar, textvariable=self.id).place(x=30, y=30)\n self.nombre = StringVar()\n Entry(self.ventanaIngresar, textvariable=self.nombre).place(x=65, y=60)\n Button(self.ventanaIngresar, text='Guardar', font=('Cambria', 11),\n width=15, command=self.BD).place(x=420, y=5)\n Button(self.ventanaIngresar, text='Mostrar', font=('Cambria', 11),\n width=15, command=self.Mostrar).place(x=0, y=365)\n Button(self.ventanaIngresar, text='Ordenar', font=('Cambria', 11),\n width=15, command=self.ordenamiento).place(x=220, y=365)\n self.ventanaIngresar.mainloop()\n\n def BD(self):\n conectar = Base_de_datos.BaseDeDatos()\n comando = (\"INSERT INTO public.cliente(id, nombre) VALUES('\" + self\n .id.get() + \"','\" + self.nombre.get() + \"')\")\n print(comando)\n conectar.cursor.execute(comando)\n\n def Mostrar(self):\n comando = 'SELECT * FROM cliente;'\n conectar = Base_de_datos.BaseDeDatos()\n conectar.cursor.execute(comando)\n Scroll = Scrollbar(self.ventanaIngresar, orient=VERTICAL)\n self.listbox = Listbox(self.ventanaIngresar, font=('Cambria', 9),\n borderwidth=0, yscrollcommand=Scroll.set, height=15, relief=\n 'sunken', width=60)\n self.listbox.place(x=5, y=90)\n Scroll.config(command=self.listbox.yview)\n Scroll.pack(side=RIGHT, fill=Y)\n for dato1, dato2 in enumerate(conectar.cursor.fetchall()):\n self.listbox.insert(0, 'Id: {}'.format(dato2[0]))\n self.listbox.insert(1, 'Nombre: {}'.format(dato2[1]))\n self.listbox.insert(2, ' ')\n\n def ordenamiento(self):\n comando = 'SELECT id FROM cliente;'\n conectar = Base_de_datos.BaseDeDatos()\n conectar.cursor.execute(comando)\n rows = conectar.cursor.fetchall()\n ordenar = MergeSort.merge_sort(rows)\n print(ordenar)\n",
"step-5": "from tkinter import *\r\nimport psycopg2\r\nimport sys\r\nimport pprint\r\nimport Base_de_datos\r\nimport MergeSort\r\n\r\nclass Cliente:\r\n def __init__(self,id=None,nombre=None):\r\n self.id=id\r\n self.nombre=nombre\r\n def ingresar(self):\r\n self.ventanaIngresar= Toplevel()\r\n self.ventanaIngresar.geometry(\"570x400\")\r\n self.ventanaIngresar.title(\"Cliente\")\r\n img = PhotoImage(file=\"C:/Users/checo/Desktop/41-INVERSION-MEDIOS-DIGITALES.png\")\r\n imagen= Label(self.ventanaIngresar, image=img)\r\n imagen.pack()\r\n Label(self.ventanaIngresar, text=\"Cliente\",font=(\"Cambria\",14)).place(x=5,y=0)\r\n Label(self.ventanaIngresar, text=\"Id: \",font=(\"Cambria\",11)).place(x=0,y=30)\r\n Label(self.ventanaIngresar, text=\"Nombre: \",font=(\"Cambria\",11)).place(x=0,y=60)\r\n\r\n self.id=StringVar()\r\n Entry(self.ventanaIngresar, textvariable=self.id).place(x=30,y=30)\r\n self.nombre=StringVar()\r\n Entry(self.ventanaIngresar, textvariable=self.nombre).place(x=65,y=60) \r\n \r\n Button(self.ventanaIngresar,text=\"Guardar\",font=(\"Cambria\",11),\r\n width=15,command=self.BD).place(x=420,y=5)\r\n \r\n #Button(self.ventanaIngresar,text=\"Modificar\",font=(\"Cambria\",11),\r\n # width=15).place(x=420,y=365)\r\n \r\n Button(self.ventanaIngresar,text=\"Mostrar\",font=(\"Cambria\",11),\r\n width=15,command=self.Mostrar).place(x=0,y=365)\r\n \r\n Button(self.ventanaIngresar,text=\"Ordenar\",font=(\"Cambria\",11),\r\n width=15, command=self.ordenamiento).place(x=220,y=365)\r\n \r\n self.ventanaIngresar.mainloop()\r\n \r\n def BD(self):\r\n conectar=Base_de_datos.BaseDeDatos()\r\n comando=\"INSERT INTO public.cliente(id, nombre) VALUES('\"+self.id.get()+\"','\"+self.nombre.get()+\"')\"\r\n print(comando)\r\n conectar.cursor.execute(comando)\r\n def Mostrar(self):\r\n comando=\"SELECT * FROM cliente;\"\r\n conectar=Base_de_datos.BaseDeDatos()\r\n conectar.cursor.execute(comando)\r\n Scroll=Scrollbar(self.ventanaIngresar, orient=VERTICAL)\r\n self.listbox=Listbox(self.ventanaIngresar, font=(\"Cambria\",9), borderwidth=0, yscrollcommand=Scroll.set,height=15,relief=\"sunken\",width=60)\r\n self.listbox.place(x=5, y=90)\r\n Scroll.config(command=self.listbox.yview)\r\n Scroll.pack(side=RIGHT, fill=Y)\r\n for dato1, dato2 in enumerate(conectar.cursor.fetchall()):\r\n self.listbox.insert(0, \"Id: {}\".format(dato2[0]))\r\n self.listbox.insert(1, \"Nombre: {}\".format(dato2[1]))\r\n self.listbox.insert(2, \" \")\r\n def ordenamiento(self):\r\n comando=\"SELECT id FROM cliente;\"\r\n conectar=Base_de_datos.BaseDeDatos()\r\n conectar.cursor.execute(comando)\r\n rows= conectar.cursor.fetchall()\r\n ordenar=MergeSort.merge_sort(rows)\r\n print(ordenar)\r\n\r\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
from ctypes import CDLL
svg2pdf = CDLL("./libsvg2pdf.so")
svg2pdf.svg2pdf("report.svg", "teste2.pdf")
svg2pdf.svg2pdf2("report.svg", "teste3.pdf")
|
normal
|
{
"blob_id": "9c85252b4048b5412978b3ac05cd6dde4479e3bf",
"index": 3333,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsvg2pdf.svg2pdf('report.svg', 'teste2.pdf')\nsvg2pdf.svg2pdf2('report.svg', 'teste3.pdf')\n",
"step-3": "<mask token>\nsvg2pdf = CDLL('./libsvg2pdf.so')\nsvg2pdf.svg2pdf('report.svg', 'teste2.pdf')\nsvg2pdf.svg2pdf2('report.svg', 'teste3.pdf')\n",
"step-4": "from ctypes import CDLL\nsvg2pdf = CDLL('./libsvg2pdf.so')\nsvg2pdf.svg2pdf('report.svg', 'teste2.pdf')\nsvg2pdf.svg2pdf2('report.svg', 'teste3.pdf')\n",
"step-5": "from ctypes import CDLL\nsvg2pdf = CDLL(\"./libsvg2pdf.so\")\nsvg2pdf.svg2pdf(\"report.svg\", \"teste2.pdf\")\nsvg2pdf.svg2pdf2(\"report.svg\", \"teste3.pdf\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def del_ops3(str1, str2):
# find all common letters in both strings
common1 = [x for x in str1 if x in str2]
common2 = [x for x in str2 if x in str1]
if len(common2) < len(common1):
common1, common2 = common2, common1
# find total of strings with 0, 1, or 2 characters, (2 chars - only if c1 != c2)
if len(common1) == 0 or len(common2) == 0:
total = len(str1) + len(str2)
elif (len(common1) == 1 or len(common2) == 1) or (len(common1) == 2 and len(common2) == 2 and common1 != common2):
total = (len(str1) - 1) + (len(str2) - 1)
# else, if 2 characters in c1, c2 and c1 != c2 or > 2 characters in c1, c2
else:
# create references to c2 indexes of each letter in c1
refs = defaultdict(list)
for i, letter in enumerate(common2):
refs[letter].append(i)
# find all letters that follow each other (same order) in both strings
substring = [] # substring == all common letters in same sequence in both strings
previous = min(refs[common1[0]])
for i, letter in enumerate(common1):
# if any c2 index of the current letter in c1 is > the c2 index of previous letter:
# the current letter follows the previous letter in both c1 and c2
if any([i > previous for i in refs[letter]]) and all([i != previous for i in refs[letter]]):
# if the same letter at the same index is not already in substring:
if all([hash(x) != hash(common2[previous]) for x in substring]):
substring.append(common2[previous])
substring.append(letter)
previous = min([x for x in refs[letter] if x >= previous])
# next iteration of previous is always == the smallest index
# of the current letter that is >= current iteration of previous
# (always > previous if not first iteration in c1)
# indexes are never repeated or skipped
# elif the letter does not follow the same letter in both strings:
# previous = smallest c2 index of letter that broke sequence/did not follow in both strings
elif all(refs[letter]) < previous:
previous = min([x for x in refs[letter]])
print(i, previous, letter, substring)
# total == total of all letters - (number of letters in substring * 2)
total = (len(str1) - len(substring)) + (len(str2) - len(substring))
return "".join(substring)
|
normal
|
{
"blob_id": "f9d1013fa278b9078e603b012abbdde0be2e0962",
"index": 7926,
"step-1": "<mask token>\n",
"step-2": "def del_ops3(str1, str2):\n common1 = [x for x in str1 if x in str2]\n common2 = [x for x in str2 if x in str1]\n if len(common2) < len(common1):\n common1, common2 = common2, common1\n if len(common1) == 0 or len(common2) == 0:\n total = len(str1) + len(str2)\n elif (len(common1) == 1 or len(common2) == 1) or len(common1) == 2 and len(\n common2) == 2 and common1 != common2:\n total = len(str1) - 1 + (len(str2) - 1)\n else:\n refs = defaultdict(list)\n for i, letter in enumerate(common2):\n refs[letter].append(i)\n substring = []\n previous = min(refs[common1[0]])\n for i, letter in enumerate(common1):\n if any([(i > previous) for i in refs[letter]]) and all([(i !=\n previous) for i in refs[letter]]):\n if all([(hash(x) != hash(common2[previous])) for x in\n substring]):\n substring.append(common2[previous])\n substring.append(letter)\n previous = min([x for x in refs[letter] if x >= previous])\n elif all(refs[letter]) < previous:\n previous = min([x for x in refs[letter]])\n print(i, previous, letter, substring)\n total = len(str1) - len(substring) + (len(str2) - len(substring))\n return ''.join(substring)\n",
"step-3": "def del_ops3(str1, str2):\n\n # find all common letters in both strings\n common1 = [x for x in str1 if x in str2]\n common2 = [x for x in str2 if x in str1]\n if len(common2) < len(common1):\n common1, common2 = common2, common1\n\n # find total of strings with 0, 1, or 2 characters, (2 chars - only if c1 != c2)\n if len(common1) == 0 or len(common2) == 0:\n total = len(str1) + len(str2)\n elif (len(common1) == 1 or len(common2) == 1) or (len(common1) == 2 and len(common2) == 2 and common1 != common2):\n total = (len(str1) - 1) + (len(str2) - 1)\n\n # else, if 2 characters in c1, c2 and c1 != c2 or > 2 characters in c1, c2\n else:\n\n # create references to c2 indexes of each letter in c1\n refs = defaultdict(list)\n for i, letter in enumerate(common2):\n refs[letter].append(i)\n\n # find all letters that follow each other (same order) in both strings\n substring = [] # substring == all common letters in same sequence in both strings\n previous = min(refs[common1[0]])\n for i, letter in enumerate(common1):\n\n # if any c2 index of the current letter in c1 is > the c2 index of previous letter:\n # the current letter follows the previous letter in both c1 and c2\n if any([i > previous for i in refs[letter]]) and all([i != previous for i in refs[letter]]):\n\n # if the same letter at the same index is not already in substring:\n if all([hash(x) != hash(common2[previous]) for x in substring]):\n substring.append(common2[previous])\n\n substring.append(letter)\n previous = min([x for x in refs[letter] if x >= previous])\n # next iteration of previous is always == the smallest index\n # of the current letter that is >= current iteration of previous\n # (always > previous if not first iteration in c1)\n # indexes are never repeated or skipped\n\n # elif the letter does not follow the same letter in both strings:\n # previous = smallest c2 index of letter that broke sequence/did not follow in both strings\n elif all(refs[letter]) < previous:\n previous = min([x for x in refs[letter]])\n print(i, previous, letter, substring)\n # total == total of all letters - (number of letters in substring * 2)\n total = (len(str1) - len(substring)) + (len(str2) - len(substring))\n\n return \"\".join(substring)\n \n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
marks = {
"S":"subject",
"O":"object",
"A":"attribute",
"C":"clause",
}
marks_reverse = {
"subject":"S",
"object":"O",
"attribute":"A",
"clause":"C",
}
|
normal
|
{
"blob_id": "c66b07c45f4a675a6c7fcec82048a3197910d0d8",
"index": 3435,
"step-1": "<mask token>\n",
"step-2": "marks = {'S': 'subject', 'O': 'object', 'A': 'attribute', 'C': 'clause'}\nmarks_reverse = {'subject': 'S', 'object': 'O', 'attribute': 'A', 'clause': 'C'\n }\n",
"step-3": "marks = {\n \"S\":\"subject\",\n \"O\":\"object\",\n \"A\":\"attribute\",\n \"C\":\"clause\",\n}\nmarks_reverse = {\n \"subject\":\"S\",\n \"object\":\"O\",\n \"attribute\":\"A\",\n \"clause\":\"C\",\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 24 18:50:16 2018
@author: User
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 23 19:05:42 2018
@author: User
"""
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import lxml
import html5lib
import csv
path = 'E:/Data Science/BI/Rocket Project/0000001750/0000001750__2006-09-01.htm'
path1='E:/Data Science/BI/Rocket Project/0000001750/Output_2006.csv'
#extracting the summary compensation table from html file
dfhtml = pd.read_html(path,match="Bonus")
len(dfhtml)
dfhtml
type(dfhtml)
#Converting list to string and removing the NaN
htmltxt=str(dfhtml)
txtnew=htmltxt.replace("NaN","")
print(txtnew)
#writing the list to text file
f=open('E:/Data Science/BI/Rocket Project/0000001750/Output_2006.txt','w')
f.writelines(str(txtnew))
f.close()
#df1=dfhtml[0].replace(np.NaN,np.nan)
df2=dfhtml[0].dropna(axis=1, how='all')
df2=df2.dropna(thresh=1)
#df2.iloc[0:2,:] # Displaying the Rows with the Titles only.
|
normal
|
{
"blob_id": "c7768e44464703552f579a1ec68b58fd9746a381",
"index": 8743,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlen(dfhtml)\ndfhtml\ntype(dfhtml)\n<mask token>\nprint(txtnew)\n<mask token>\nf.writelines(str(txtnew))\nf.close()\n<mask token>\n",
"step-3": "<mask token>\npath = (\n 'E:/Data Science/BI/Rocket Project/0000001750/0000001750__2006-09-01.htm')\npath1 = 'E:/Data Science/BI/Rocket Project/0000001750/Output_2006.csv'\ndfhtml = pd.read_html(path, match='Bonus')\nlen(dfhtml)\ndfhtml\ntype(dfhtml)\nhtmltxt = str(dfhtml)\ntxtnew = htmltxt.replace('NaN', '')\nprint(txtnew)\nf = open('E:/Data Science/BI/Rocket Project/0000001750/Output_2006.txt', 'w')\nf.writelines(str(txtnew))\nf.close()\ndf2 = dfhtml[0].dropna(axis=1, how='all')\ndf2 = df2.dropna(thresh=1)\n",
"step-4": "<mask token>\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\nimport lxml\nimport html5lib\nimport csv\npath = (\n 'E:/Data Science/BI/Rocket Project/0000001750/0000001750__2006-09-01.htm')\npath1 = 'E:/Data Science/BI/Rocket Project/0000001750/Output_2006.csv'\ndfhtml = pd.read_html(path, match='Bonus')\nlen(dfhtml)\ndfhtml\ntype(dfhtml)\nhtmltxt = str(dfhtml)\ntxtnew = htmltxt.replace('NaN', '')\nprint(txtnew)\nf = open('E:/Data Science/BI/Rocket Project/0000001750/Output_2006.txt', 'w')\nf.writelines(str(txtnew))\nf.close()\ndf2 = dfhtml[0].dropna(axis=1, how='all')\ndf2 = df2.dropna(thresh=1)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 24 18:50:16 2018\r\n\r\n@author: User\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 23 19:05:42 2018\r\n\r\n@author: User\r\n\"\"\"\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport numpy as np\r\nimport lxml\r\nimport html5lib\r\nimport csv\r\n\r\npath = 'E:/Data Science/BI/Rocket Project/0000001750/0000001750__2006-09-01.htm'\r\npath1='E:/Data Science/BI/Rocket Project/0000001750/Output_2006.csv'\r\n\r\n#extracting the summary compensation table from html file\r\ndfhtml = pd.read_html(path,match=\"Bonus\")\r\nlen(dfhtml)\r\ndfhtml\r\ntype(dfhtml)\r\n\r\n#Converting list to string and removing the NaN\r\nhtmltxt=str(dfhtml)\r\ntxtnew=htmltxt.replace(\"NaN\",\"\")\r\nprint(txtnew)\r\n\r\n#writing the list to text file\r\nf=open('E:/Data Science/BI/Rocket Project/0000001750/Output_2006.txt','w')\r\nf.writelines(str(txtnew))\r\nf.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#df1=dfhtml[0].replace(np.NaN,np.nan)\r\ndf2=dfhtml[0].dropna(axis=1, how='all') \r\ndf2=df2.dropna(thresh=1)\r\n#df2.iloc[0:2,:] # Displaying the Rows with the Titles only.\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hilma import Mesh, loadPly, savePly
mesh = Mesh()
loadPly("head.ply", mesh)
verts = []
faces = []
edges = []
uvs = []
for v in mesh.getVertices():
verts.append( (v.x, v.y, v.z) )
for t in mesh.getTrianglesIndices():
faces.append( (t.x, t.y, t.z ) )
for e in mesh.getLinesIndices():
edges.append( (e.x, e.y) )
# print( verts )
# print( faces )
# print(edges)
savePly("out.ply", mesh, False)
|
normal
|
{
"blob_id": "c02af2ecd980da4ceff133c13072ad7c6b724041",
"index": 5329,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nloadPly('head.ply', mesh)\n<mask token>\nfor v in mesh.getVertices():\n verts.append((v.x, v.y, v.z))\nfor t in mesh.getTrianglesIndices():\n faces.append((t.x, t.y, t.z))\nfor e in mesh.getLinesIndices():\n edges.append((e.x, e.y))\nsavePly('out.ply', mesh, False)\n",
"step-3": "<mask token>\nmesh = Mesh()\nloadPly('head.ply', mesh)\nverts = []\nfaces = []\nedges = []\nuvs = []\nfor v in mesh.getVertices():\n verts.append((v.x, v.y, v.z))\nfor t in mesh.getTrianglesIndices():\n faces.append((t.x, t.y, t.z))\nfor e in mesh.getLinesIndices():\n edges.append((e.x, e.y))\nsavePly('out.ply', mesh, False)\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom hilma import Mesh, loadPly, savePly\nmesh = Mesh()\nloadPly('head.ply', mesh)\nverts = []\nfaces = []\nedges = []\nuvs = []\nfor v in mesh.getVertices():\n verts.append((v.x, v.y, v.z))\nfor t in mesh.getTrianglesIndices():\n faces.append((t.x, t.y, t.z))\nfor e in mesh.getLinesIndices():\n edges.append((e.x, e.y))\nsavePly('out.ply', mesh, False)\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom hilma import Mesh, loadPly, savePly\n\nmesh = Mesh()\nloadPly(\"head.ply\", mesh)\n\nverts = []\nfaces = []\nedges = []\nuvs = []\n\n\nfor v in mesh.getVertices():\n verts.append( (v.x, v.y, v.z) )\n\nfor t in mesh.getTrianglesIndices():\n faces.append( (t.x, t.y, t.z ) )\n\nfor e in mesh.getLinesIndices():\n edges.append( (e.x, e.y) )\n\n# print( verts )\n# print( faces )\n# print(edges)\n\nsavePly(\"out.ply\", mesh, False)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
with expression [as var]
#...BODY...
#object is the result of the expression and must have __enter__ and __exit__ methods
#result of the expression must be context manager - implements context management protocol
#https://www.python.org/dev/peps/pep-0343/
# This PEP adds a new statement "with" to the Python language to make
# it possible to factor out standard uses of try/finally statements.
# In this PEP, context managers provide __enter__() and __exit__()
# methods that are invoked on entry to and exit from the body of the
# with statement.
|
normal
|
{
"blob_id": "e1787fd4be66d19ab83ece44eacfd96cb488b504",
"index": 722,
"step-1": "with expression [as var]\n\t#...BODY...\n\n#object is the result of the expression and must have __enter__ and __exit__ methods\n#result of the expression must be context manager - implements context management protocol\n\n#https://www.python.org/dev/peps/pep-0343/\n# This PEP adds a new statement \"with\" to the Python language to make\n# it possible to factor out standard uses of try/finally statements.\n\n# In this PEP, context managers provide __enter__() and __exit__()\n# methods that are invoked on entry to and exit from the body of the\n# with statement.",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
The Snail v 2
"Buy the dips! ... then wait"
STRATEGY
1. Selects coins that are X% (percent_below) below their X day (LIMIT) maximum
2. ** NEW ** Finds movement (MOVEMENT) range over X Days
- if MOVEMENT* > TAKE_PROFIT coins pass to 3
3. Check coins are not already owned
4. Uses MACD to check if coins are currently on an uptrend
5. Adds coins that pass all above tests to Signal file for the Bot to buy (ordered by Potential Profit from High to Low)
* MOVEMENT
Looks at the fluctuation in price over LIMIT days and compares to your TAKE_PROFIT settings.
i.e. if your TAKE_PROFIT is 3%, but the movement is only 1%, then you wont hit TP and will be left holding the coin
This can be turned off if you want.
STRATEGY SETTINGS
LIMIT = 4
INTERVAL = '1d'
profit_min = 15
profit_max = 100 # only required if you want to limit max profit
percent_below = 0.6 # change risk level: 0.7 = 70% below high_price, 0.5 = 50% below high_price
MOVEMENT = True #
OTHER SETTINGS
BVT or OLORIN Fork.
Set True / False for compatibility
WINDOWS (WINDOWS OS)
Set True / False for compatibility
DISCORD
send message to Discord - Set True / False
CONFIG.YML SETTINGS
CHANGE_IN_PRICE: 100 REQUIRED
Do NOT use pausebotmod as it will prevent the_snail from buying - The Snail buys the dips
Developed by scoobie
Thanks to
@vyacheslav for optimising the code with async and adding list sorting,
@Kevin.Butters for the meticulous testing and reporting,
@OlorinSledge for the coding advice and a great fork
DISCLAIMER
CHECK YOU HAVE ALL THE REQUIRED IMPORTS INSTALLED
Developed for OlorinSledge fork - no support for any others as I don't use them.
Troubleshooting and help - please use the #troubleshooting channel
Settings - the settings in this file are what I currently use, please don't DM me for the 'best' settings - for me, these are the best so far.
There's a lot of options to adjust the strategy, test them out and share your results in #bot-strategies so others can learn from them too
Hope the Snail makes you rich!
"""
import os
import re
import aiohttp
import asyncio
import time
import json
from datetime import datetime, timedelta
from kucoin.client import Client
from helpers.parameters import parse_args, load_config
import pandas as pd
import pandas_ta as ta
import ccxt
from tradingview_ta import TA_Handler, Interval, Exchange
import requests
# Load creds modules
from helpers.handle_creds import (
load_correct_creds, load_discord_creds
)
# Settings
args = parse_args()
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_creds = load_config(creds_file)
parsed_config = load_config(config_file)
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
EX_PAIRS = parsed_config['trading_options']['FIATS']
TEST_MODE = parsed_config['script_options']['TEST_MODE']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
DISCORD_WEBHOOK = load_discord_creds(parsed_creds)
# Load creds for correct environment
access_key, secret_key, passphrase_key = load_correct_creds(parsed_creds)
client = Client(access_key, secret_key, passphrase_key)
# If True, an updated list of coins will be generated from the site - http://edgesforledges.com/watchlists/binance.
# If False, then the list you create in TICKERS_LIST = 'tickers.txt' will be used.
CREATE_TICKER_LIST = True
# When creating a ticker list from the source site:
# http://edgesforledges.com you can use the parameter (all or innovation-zone).
# ticker_type = 'innovation-zone'
ticker_type = 'all'
if CREATE_TICKER_LIST:
TICKERS_LIST = 'tickers_all_USDT.txt'
else:
TICKERS_LIST = 'tickers_all_USDT.txt'
# System Settings
BVT = False
OLORIN = True # if not using Olorin Sledge Fork set to False
if OLORIN:
signal_file_type = '.buy'
else:
signal_file_type = '.exs'
# if using Windows OS set to True, else set to False
WINDOWS = True
# send message to discord
DISCORD = True
# Strategy Settings
LIMIT = 4
INTERVAL = '1day'
profit_min = 15
profit_max = 100 # only required if you want to limit max profit
percent_below = 0.7 # change risk level: 0.7 = 70% below high_price, 0.5 = 50% below high_price
MOVEMENT = True
# Display Setttings
all_info = False
class TextColors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
YELLOW = '\033[33m'
TURQUOISE = '\033[36m'
UNDERLINE = '\033[4m'
END = '\033[0m'
ITALICS = '\033[3m'
def msg_discord(msg):
message = msg + '\n\n'
mUrl = "https://discordapp.com/api/webhooks/"+DISCORD_WEBHOOK
data = {"content": message}
response = requests.post(mUrl, json=data)
def get_price(client_api):
initial_price = {}
tickers = [line.strip() for line in open(TICKERS_LIST)]
prices = client_api.get_ticker()
for coin in prices['ticker']:
for item in tickers:
if item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS):
initial_price[coin['symbol']] = {'symbol': coin['symbol'],
'price': coin['last'],
'time': datetime.now(),
'price_list': [],
'change_price': 0.0,
'cov': 0.0}
return initial_price
async def create_urls(ticker_list, interval) -> dict:
coins_urls = {}
if INTERVAL == '1day':
st = datetime.now() - timedelta(days=float(LIMIT))
et = datetime.now()
start_time = int(st.timestamp())
stop_time = int(et.timestamp())
for coin in ticker_list:
if type(coin) == dict:
if all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS):
coins_urls[coin['symbol']] = {'symbol': coin['symbol'],
'url': f"https://api.kucoin.com/api/v1/market/candles?symbol"
f"{coin['symbol']}&type={interval}&startAt={start_time}&endAt={stop_time}"}
else:
coins_urls[coin] = {'symbol': coin,
'url': f"https://api.kucoin.com/api/v1/market/candles?symbol={coin}&type={interval}&startAt={start_time}&endAt={stop_time}"}
return coins_urls
async def get(session: aiohttp.ClientSession, url) -> dict:
data = {}
symbol = re.findall(r'=\w+', url)[0][1:]
try:
resp = await session.request('GET', url=url)
data['symbol'] = symbol
# data['last_price'] = await get_last_price(session=session, symbol=symbol)
data['data'] = await resp.json()
except Exception as e:
print(e)
return data
async def get_historical_data(ticker_list, interval):
urls = await create_urls(ticker_list=ticker_list, interval=interval)
if WINDOWS:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
async with aiohttp.ClientSession() as session:
tasks = []
for url in urls:
link = urls[url]['url']
tasks.append(get(session=session, url=link))
response = await asyncio.gather(*tasks, return_exceptions=True)
return response
def get_prices_high_low(list_coins, interval):
if WINDOWS:
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
prices_low_high = {}
hist_data = asyncio.run(get_historical_data(ticker_list=list_coins, interval=interval))
for item in hist_data:
coin_symbol = item['symbol']
h_p = []
l_p = []
try:
for i in item['data']['data']:
close_time = i[0]
open_price = float(i[1])
close_price = float(i[2])
high_price = float(i[3])
low_price = float(i[4])
volume = float(i[5])
quote_volume = i[6]
h_p.append(high_price)
l_p.append(low_price)
except Exception as e:
print(f'Exception {e}')
continue
prices_low_high[coin_symbol] = {'symbol': coin_symbol, 'high_price': h_p, 'low_price': l_p, 'current_potential': 0.0}
return prices_low_high
def do_work():
while True:
init_price = get_price(client)
coins = get_prices_high_low(init_price, INTERVAL)
print(f'{TextColors.TURQUOISE}The Snail is checking for potential profit and buy signals{TextColors.DEFAULT}')
if os.path.exists(f'signals/snail_scan{signal_file_type}'):
os.remove(f'signals/snail_scan{signal_file_type}')
current_potential_list = []
held_coins_list = {}
if TEST_MODE:
coin_path = 'test_coins_bought.json'
elif BVT:
coin_path = 'coins_bought.json'
else:
coin_path = 'live_coins_bought.json'
if os.path.isfile(coin_path) and os.stat(coin_path).st_size != 0:
with open(coin_path) as file:
held_coins_list = json.load(file)
for coin in coins:
if len(coins[coin]['high_price']) == LIMIT:
high_price = float(max(coins[coin]['high_price']))
low_price = float(min(coins[coin]['low_price']))
last_price = float(init_price[coin + PAIR_WITH]['price'])
# Calculation
range = high_price - low_price
potential = (low_price / high_price) * 100
buy_above = low_price * 1.00
buy_below = high_price - (range * percent_below) # percent below affects Risk
max_potential = potential * 0.98
min_potential = potential * 0.6
safe_potential = potential - 12
current_range = high_price - last_price
current_potential = ((high_price / last_price) * 100) - 100
coins[coin]['current_potential'] = current_potential
movement = (low_price / range)
# print(f'{coin} {potential:.2f}% {movement:.2f}%')
if MOVEMENT:
if profit_min < current_potential < profit_max and last_price < buy_below and movement >= TAKE_PROFIT and coin not in held_coins_list:
current_potential_list.append(coins[coin])
else:
if profit_min < current_potential < profit_max and last_price < buy_below and coin not in held_coins_list:
current_potential_list.append(coins[coin])
if current_potential_list:
# print(current_potential_list)
exchange = ccxt.binance()
macd_list = []
for i in current_potential_list:
coin = i['symbol'] + PAIR_WITH
current_potential = i['current_potential']
macd1 = exchange.fetch_ohlcv(coin, timeframe='1m', limit=36)
macd5 = exchange.fetch_ohlcv(coin, timeframe='5m', limit=36)
macd15 = exchange.fetch_ohlcv(coin, timeframe='15m', limit=36)
try:
macd1day = exchange.fetch_ohlcv(coin, timeframe='1d', limit=36)
except Exception as e:
print(f'{coin} Exception {e}')
continue
macdbtc = exchange.fetch_ohlcv('BTCUSDT', timeframe='1m', limit=36)
df1 = pd.DataFrame(macd1, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
df5 = pd.DataFrame(macd5, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
df15 = pd.DataFrame(macd15, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
df1day = pd.DataFrame(macd1day, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
dfbtc = pd.DataFrame(macdbtc, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
# Wait for 1 sec to prevent kucoin query limit
time.sleep(1)
try:
macd1 = df1.ta.macd(fast=12, slow=26)
macd5 = df5.ta.macd(fast=12, slow=26)
macd15 = df15.ta.macd(fast=12, slow=26)
macd1day = df1day.ta.macd(fast=12, slow=26)
macdbtc = dfbtc.ta.macd(fast=12, slow=26)
get_hist1 = macd1.iloc[35, 1]
get_hist5 = macd5.iloc[35, 1]
get_hist15 = macd15.iloc[35, 1]
get_hist1day = macd1day.iloc[35, 1]
get_histbtc = macdbtc.iloc[35, 1]
except Exception as e:
print(f'{coin} Exception {e}')
continue
if all_info:
if get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >= 0 and get_hist1day >= 0 and get_histbtc >= 0:
print(f'MACD HIST {coin} {current_potential:2f}% {TextColors.SELL_PROFIT}{get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}{TextColors.DEFAULT}')
else:
print(f'MACD HIST {coin} {current_potential:2f}% {get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}')
if get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >= 0 and get_hist1day >= 0 and get_histbtc >= 0:
# Add to coins for Snail to scan
print(f'{TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}\n')
macd_list.append(coins[coin])
# else:
# print(f'Do NOT buy {coin}')
if macd_list:
# print(macd_list)
sort_list = sorted(macd_list, key=lambda x: x[f'current_potential'], reverse=True)
for i in sort_list:
coin = i['symbol']
current_potential = i['current_potential']
last_price = float(init_price[coin + PAIR_WITH]['price'])
# print(f'list {coin} {last_price}')
high_price = float(max(coins[coin]['high_price']))
# print(f'list {coin} {high_price}')
low_price = float(min(coins[coin]['low_price']))
# print(f'list {coin} {low_price}')
range = high_price - low_price
potential = (low_price / high_price) * 100
buy_above = low_price * 1.00
buy_below = high_price - (range * percent_below)
current_range = high_price - last_price
if all_info:
print(f'\nPrice: ${last_price:.3f}\n'
f'High: ${high_price:.3f}\n'
# f'Plan: TP {TP}% TTP {TTP}%\n'
f'Day Max Range: ${range:.3f}\n'
f'Current Range: ${current_range:.3f} \n'
# f'Daily Range: ${range:.3f}\n'
# f'Current Range ${current_range:.3f} \n'
# f'Potential profit before safety: {potential:.0f}%\n'
# f'Buy above: ${buy_above:.3f}\n'
f'Buy Below: ${buy_below:.3f}\n'
f'Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}'
# f'Max Profit {max_potential:.2f}%\n'
# f'Min Profit {min_potential:.2f}%\n'
)
# print(f'Adding {TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} to buy list')
# add to signal
with open(f'signals/snail_scan{signal_file_type}', 'a+') as f:
f.write(str(coin + PAIR_WITH) + '\n')
# else:
# print(f'{TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} may not be profitable at this time')
snail_coins = len(current_potential_list)
macd_coins = len(macd_list)
snail_discord = f'Snail found {snail_coins} coins and MACD approved {macd_coins}'
if DISCORD:
msg_discord(snail_discord)
print(f'{TextColors.TURQUOISE}Snail found {snail_coins} coins and MACD approved {macd_coins} coins. L: {LIMIT}days Min: {profit_min}% Risk: {percent_below * 100}% {TextColors.DEFAULT}')
time.sleep(180)
|
normal
|
{
"blob_id": "77f94ecd205ae9f240f25d959a6d5cd9cf844d86",
"index": 844,
"step-1": "<mask token>\n\n\nclass TextColors:\n BUY = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n SELL_LOSS = '\\x1b[91m'\n SELL_PROFIT = '\\x1b[32m'\n DIM = '\\x1b[2m\\x1b[35m'\n DEFAULT = '\\x1b[39m'\n YELLOW = '\\x1b[33m'\n TURQUOISE = '\\x1b[36m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n ITALICS = '\\x1b[3m'\n\n\n<mask token>\n\n\ndef get_price(client_api):\n initial_price = {}\n tickers = [line.strip() for line in open(TICKERS_LIST)]\n prices = client_api.get_ticker()\n for coin in prices['ticker']:\n for item in tickers:\n if item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH\n not in coin['symbol'] for item in EX_PAIRS):\n initial_price[coin['symbol']] = {'symbol': coin['symbol'],\n 'price': coin['last'], 'time': datetime.now(),\n 'price_list': [], 'change_price': 0.0, 'cov': 0.0}\n return initial_price\n\n\n<mask token>\n\n\ndef get_prices_high_low(list_coins, interval):\n if WINDOWS:\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n prices_low_high = {}\n hist_data = asyncio.run(get_historical_data(ticker_list=list_coins,\n interval=interval))\n for item in hist_data:\n coin_symbol = item['symbol']\n h_p = []\n l_p = []\n try:\n for i in item['data']['data']:\n close_time = i[0]\n open_price = float(i[1])\n close_price = float(i[2])\n high_price = float(i[3])\n low_price = float(i[4])\n volume = float(i[5])\n quote_volume = i[6]\n h_p.append(high_price)\n l_p.append(low_price)\n except Exception as e:\n print(f'Exception {e}')\n continue\n prices_low_high[coin_symbol] = {'symbol': coin_symbol, 'high_price':\n h_p, 'low_price': l_p, 'current_potential': 0.0}\n return prices_low_high\n\n\ndef do_work():\n while True:\n init_price = get_price(client)\n coins = get_prices_high_low(init_price, INTERVAL)\n print(\n f'{TextColors.TURQUOISE}The Snail is checking for potential profit and buy signals{TextColors.DEFAULT}'\n )\n if os.path.exists(f'signals/snail_scan{signal_file_type}'):\n os.remove(f'signals/snail_scan{signal_file_type}')\n current_potential_list = []\n held_coins_list = {}\n if TEST_MODE:\n coin_path = 'test_coins_bought.json'\n elif BVT:\n coin_path = 'coins_bought.json'\n else:\n coin_path = 'live_coins_bought.json'\n if os.path.isfile(coin_path) and os.stat(coin_path).st_size != 0:\n with open(coin_path) as file:\n held_coins_list = json.load(file)\n for coin in coins:\n if len(coins[coin]['high_price']) == LIMIT:\n high_price = float(max(coins[coin]['high_price']))\n low_price = float(min(coins[coin]['low_price']))\n last_price = float(init_price[coin + PAIR_WITH]['price'])\n range = high_price - low_price\n potential = low_price / high_price * 100\n buy_above = low_price * 1.0\n buy_below = high_price - range * percent_below\n max_potential = potential * 0.98\n min_potential = potential * 0.6\n safe_potential = potential - 12\n current_range = high_price - last_price\n current_potential = high_price / last_price * 100 - 100\n coins[coin]['current_potential'] = current_potential\n movement = low_price / range\n if MOVEMENT:\n if (profit_min < current_potential < profit_max and \n last_price < buy_below and movement >= TAKE_PROFIT and\n coin not in held_coins_list):\n current_potential_list.append(coins[coin])\n elif profit_min < current_potential < profit_max and last_price < buy_below and coin not in held_coins_list:\n current_potential_list.append(coins[coin])\n if current_potential_list:\n exchange = ccxt.binance()\n macd_list = []\n for i in current_potential_list:\n coin = i['symbol'] + PAIR_WITH\n current_potential = i['current_potential']\n macd1 = exchange.fetch_ohlcv(coin, timeframe='1m', limit=36)\n macd5 = exchange.fetch_ohlcv(coin, timeframe='5m', limit=36)\n macd15 = exchange.fetch_ohlcv(coin, timeframe='15m', limit=36)\n try:\n macd1day = exchange.fetch_ohlcv(coin, timeframe='1d',\n limit=36)\n except Exception as e:\n print(f'{coin} Exception {e}')\n continue\n macdbtc = exchange.fetch_ohlcv('BTCUSDT', timeframe='1m',\n limit=36)\n df1 = pd.DataFrame(macd1, columns=['time', 'open', 'high',\n 'low', 'close', 'volume'])\n df5 = pd.DataFrame(macd5, columns=['time', 'open', 'high',\n 'low', 'close', 'volume'])\n df15 = pd.DataFrame(macd15, columns=['time', 'open', 'high',\n 'low', 'close', 'volume'])\n df1day = pd.DataFrame(macd1day, columns=['time', 'open',\n 'high', 'low', 'close', 'volume'])\n dfbtc = pd.DataFrame(macdbtc, columns=['time', 'open',\n 'high', 'low', 'close', 'volume'])\n time.sleep(1)\n try:\n macd1 = df1.ta.macd(fast=12, slow=26)\n macd5 = df5.ta.macd(fast=12, slow=26)\n macd15 = df15.ta.macd(fast=12, slow=26)\n macd1day = df1day.ta.macd(fast=12, slow=26)\n macdbtc = dfbtc.ta.macd(fast=12, slow=26)\n get_hist1 = macd1.iloc[35, 1]\n get_hist5 = macd5.iloc[35, 1]\n get_hist15 = macd15.iloc[35, 1]\n get_hist1day = macd1day.iloc[35, 1]\n get_histbtc = macdbtc.iloc[35, 1]\n except Exception as e:\n print(f'{coin} Exception {e}')\n continue\n if all_info:\n if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >=\n 0 and get_hist1day >= 0 and get_histbtc >= 0):\n print(\n f'MACD HIST {coin} {current_potential:2f}% {TextColors.SELL_PROFIT}{get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}{TextColors.DEFAULT}'\n )\n else:\n print(\n f'MACD HIST {coin} {current_potential:2f}% {get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}'\n )\n if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >= 0 and\n get_hist1day >= 0 and get_histbtc >= 0):\n print(\n f\"\"\"{TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}\n\"\"\"\n )\n macd_list.append(coins[coin])\n if macd_list:\n sort_list = sorted(macd_list, key=lambda x: x[\n f'current_potential'], reverse=True)\n for i in sort_list:\n coin = i['symbol']\n current_potential = i['current_potential']\n last_price = float(init_price[coin + PAIR_WITH]['price'])\n high_price = float(max(coins[coin]['high_price']))\n low_price = float(min(coins[coin]['low_price']))\n range = high_price - low_price\n potential = low_price / high_price * 100\n buy_above = low_price * 1.0\n buy_below = high_price - range * percent_below\n current_range = high_price - last_price\n if all_info:\n print(\n f\"\"\"\nPrice: ${last_price:.3f}\nHigh: ${high_price:.3f}\nDay Max Range: ${range:.3f}\nCurrent Range: ${current_range:.3f} \nBuy Below: ${buy_below:.3f}\nPotential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}\"\"\"\n )\n with open(f'signals/snail_scan{signal_file_type}', 'a+'\n ) as f:\n f.write(str(coin + PAIR_WITH) + '\\n')\n snail_coins = len(current_potential_list)\n macd_coins = len(macd_list)\n snail_discord = (\n f'Snail found {snail_coins} coins and MACD approved {macd_coins}'\n )\n if DISCORD:\n msg_discord(snail_discord)\n print(\n f'{TextColors.TURQUOISE}Snail found {snail_coins} coins and MACD approved {macd_coins} coins. L: {LIMIT}days Min: {profit_min}% Risk: {percent_below * 100}% {TextColors.DEFAULT}'\n )\n time.sleep(180)\n",
"step-2": "<mask token>\nif CREATE_TICKER_LIST:\n TICKERS_LIST = 'tickers_all_USDT.txt'\nelse:\n TICKERS_LIST = 'tickers_all_USDT.txt'\n<mask token>\nif OLORIN:\n signal_file_type = '.buy'\nelse:\n signal_file_type = '.exs'\n<mask token>\n\n\nclass TextColors:\n BUY = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n SELL_LOSS = '\\x1b[91m'\n SELL_PROFIT = '\\x1b[32m'\n DIM = '\\x1b[2m\\x1b[35m'\n DEFAULT = '\\x1b[39m'\n YELLOW = '\\x1b[33m'\n TURQUOISE = '\\x1b[36m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n ITALICS = '\\x1b[3m'\n\n\ndef msg_discord(msg):\n message = msg + '\\n\\n'\n mUrl = 'https://discordapp.com/api/webhooks/' + DISCORD_WEBHOOK\n data = {'content': message}\n response = requests.post(mUrl, json=data)\n\n\ndef get_price(client_api):\n initial_price = {}\n tickers = [line.strip() for line in open(TICKERS_LIST)]\n prices = client_api.get_ticker()\n for coin in prices['ticker']:\n for item in tickers:\n if item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH\n not in coin['symbol'] for item in EX_PAIRS):\n initial_price[coin['symbol']] = {'symbol': coin['symbol'],\n 'price': coin['last'], 'time': datetime.now(),\n 'price_list': [], 'change_price': 0.0, 'cov': 0.0}\n return initial_price\n\n\nasync def create_urls(ticker_list, interval) ->dict:\n coins_urls = {}\n if INTERVAL == '1day':\n st = datetime.now() - timedelta(days=float(LIMIT))\n et = datetime.now()\n start_time = int(st.timestamp())\n stop_time = int(et.timestamp())\n for coin in ticker_list:\n if type(coin) == dict:\n if all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS\n ):\n coins_urls[coin['symbol']] = {'symbol': coin['symbol'],\n 'url':\n f\"https://api.kucoin.com/api/v1/market/candles?symbol{coin['symbol']}&type={interval}&startAt={start_time}&endAt={stop_time}\"\n }\n else:\n coins_urls[coin] = {'symbol': coin, 'url':\n f'https://api.kucoin.com/api/v1/market/candles?symbol={coin}&type={interval}&startAt={start_time}&endAt={stop_time}'\n }\n return coins_urls\n\n\nasync def get(session: aiohttp.ClientSession, url) ->dict:\n data = {}\n symbol = re.findall('=\\\\w+', url)[0][1:]\n try:\n resp = await session.request('GET', url=url)\n data['symbol'] = symbol\n data['data'] = await resp.json()\n except Exception as e:\n print(e)\n return data\n\n\nasync def get_historical_data(ticker_list, interval):\n urls = await create_urls(ticker_list=ticker_list, interval=interval)\n if WINDOWS:\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n async with aiohttp.ClientSession() as session:\n tasks = []\n for url in urls:\n link = urls[url]['url']\n tasks.append(get(session=session, url=link))\n response = await asyncio.gather(*tasks, return_exceptions=True)\n return response\n\n\ndef get_prices_high_low(list_coins, interval):\n if WINDOWS:\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n prices_low_high = {}\n hist_data = asyncio.run(get_historical_data(ticker_list=list_coins,\n interval=interval))\n for item in hist_data:\n coin_symbol = item['symbol']\n h_p = []\n l_p = []\n try:\n for i in item['data']['data']:\n close_time = i[0]\n open_price = float(i[1])\n close_price = float(i[2])\n high_price = float(i[3])\n low_price = float(i[4])\n volume = float(i[5])\n quote_volume = i[6]\n h_p.append(high_price)\n l_p.append(low_price)\n except Exception as e:\n print(f'Exception {e}')\n continue\n prices_low_high[coin_symbol] = {'symbol': coin_symbol, 'high_price':\n h_p, 'low_price': l_p, 'current_potential': 0.0}\n return prices_low_high\n\n\ndef do_work():\n while True:\n init_price = get_price(client)\n coins = get_prices_high_low(init_price, INTERVAL)\n print(\n f'{TextColors.TURQUOISE}The Snail is checking for potential profit and buy signals{TextColors.DEFAULT}'\n )\n if os.path.exists(f'signals/snail_scan{signal_file_type}'):\n os.remove(f'signals/snail_scan{signal_file_type}')\n current_potential_list = []\n held_coins_list = {}\n if TEST_MODE:\n coin_path = 'test_coins_bought.json'\n elif BVT:\n coin_path = 'coins_bought.json'\n else:\n coin_path = 'live_coins_bought.json'\n if os.path.isfile(coin_path) and os.stat(coin_path).st_size != 0:\n with open(coin_path) as file:\n held_coins_list = json.load(file)\n for coin in coins:\n if len(coins[coin]['high_price']) == LIMIT:\n high_price = float(max(coins[coin]['high_price']))\n low_price = float(min(coins[coin]['low_price']))\n last_price = float(init_price[coin + PAIR_WITH]['price'])\n range = high_price - low_price\n potential = low_price / high_price * 100\n buy_above = low_price * 1.0\n buy_below = high_price - range * percent_below\n max_potential = potential * 0.98\n min_potential = potential * 0.6\n safe_potential = potential - 12\n current_range = high_price - last_price\n current_potential = high_price / last_price * 100 - 100\n coins[coin]['current_potential'] = current_potential\n movement = low_price / range\n if MOVEMENT:\n if (profit_min < current_potential < profit_max and \n last_price < buy_below and movement >= TAKE_PROFIT and\n coin not in held_coins_list):\n current_potential_list.append(coins[coin])\n elif profit_min < current_potential < profit_max and last_price < buy_below and coin not in held_coins_list:\n current_potential_list.append(coins[coin])\n if current_potential_list:\n exchange = ccxt.binance()\n macd_list = []\n for i in current_potential_list:\n coin = i['symbol'] + PAIR_WITH\n current_potential = i['current_potential']\n macd1 = exchange.fetch_ohlcv(coin, timeframe='1m', limit=36)\n macd5 = exchange.fetch_ohlcv(coin, timeframe='5m', limit=36)\n macd15 = exchange.fetch_ohlcv(coin, timeframe='15m', limit=36)\n try:\n macd1day = exchange.fetch_ohlcv(coin, timeframe='1d',\n limit=36)\n except Exception as e:\n print(f'{coin} Exception {e}')\n continue\n macdbtc = exchange.fetch_ohlcv('BTCUSDT', timeframe='1m',\n limit=36)\n df1 = pd.DataFrame(macd1, columns=['time', 'open', 'high',\n 'low', 'close', 'volume'])\n df5 = pd.DataFrame(macd5, columns=['time', 'open', 'high',\n 'low', 'close', 'volume'])\n df15 = pd.DataFrame(macd15, columns=['time', 'open', 'high',\n 'low', 'close', 'volume'])\n df1day = pd.DataFrame(macd1day, columns=['time', 'open',\n 'high', 'low', 'close', 'volume'])\n dfbtc = pd.DataFrame(macdbtc, columns=['time', 'open',\n 'high', 'low', 'close', 'volume'])\n time.sleep(1)\n try:\n macd1 = df1.ta.macd(fast=12, slow=26)\n macd5 = df5.ta.macd(fast=12, slow=26)\n macd15 = df15.ta.macd(fast=12, slow=26)\n macd1day = df1day.ta.macd(fast=12, slow=26)\n macdbtc = dfbtc.ta.macd(fast=12, slow=26)\n get_hist1 = macd1.iloc[35, 1]\n get_hist5 = macd5.iloc[35, 1]\n get_hist15 = macd15.iloc[35, 1]\n get_hist1day = macd1day.iloc[35, 1]\n get_histbtc = macdbtc.iloc[35, 1]\n except Exception as e:\n print(f'{coin} Exception {e}')\n continue\n if all_info:\n if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >=\n 0 and get_hist1day >= 0 and get_histbtc >= 0):\n print(\n f'MACD HIST {coin} {current_potential:2f}% {TextColors.SELL_PROFIT}{get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}{TextColors.DEFAULT}'\n )\n else:\n print(\n f'MACD HIST {coin} {current_potential:2f}% {get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}'\n )\n if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >= 0 and\n get_hist1day >= 0 and get_histbtc >= 0):\n print(\n f\"\"\"{TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}\n\"\"\"\n )\n macd_list.append(coins[coin])\n if macd_list:\n sort_list = sorted(macd_list, key=lambda x: x[\n f'current_potential'], reverse=True)\n for i in sort_list:\n coin = i['symbol']\n current_potential = i['current_potential']\n last_price = float(init_price[coin + PAIR_WITH]['price'])\n high_price = float(max(coins[coin]['high_price']))\n low_price = float(min(coins[coin]['low_price']))\n range = high_price - low_price\n potential = low_price / high_price * 100\n buy_above = low_price * 1.0\n buy_below = high_price - range * percent_below\n current_range = high_price - last_price\n if all_info:\n print(\n f\"\"\"\nPrice: ${last_price:.3f}\nHigh: ${high_price:.3f}\nDay Max Range: ${range:.3f}\nCurrent Range: ${current_range:.3f} \nBuy Below: ${buy_below:.3f}\nPotential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}\"\"\"\n )\n with open(f'signals/snail_scan{signal_file_type}', 'a+'\n ) as f:\n f.write(str(coin + PAIR_WITH) + '\\n')\n snail_coins = len(current_potential_list)\n macd_coins = len(macd_list)\n snail_discord = (\n f'Snail found {snail_coins} coins and MACD approved {macd_coins}'\n )\n if DISCORD:\n msg_discord(snail_discord)\n print(\n f'{TextColors.TURQUOISE}Snail found {snail_coins} coins and MACD approved {macd_coins} coins. L: {LIMIT}days Min: {profit_min}% Risk: {percent_below * 100}% {TextColors.DEFAULT}'\n )\n time.sleep(180)\n",
"step-3": "<mask token>\nargs = parse_args()\nDEFAULT_CONFIG_FILE = 'config.yml'\nDEFAULT_CREDS_FILE = 'creds.yml'\nconfig_file = args.config if args.config else DEFAULT_CONFIG_FILE\ncreds_file = args.creds if args.creds else DEFAULT_CREDS_FILE\nparsed_creds = load_config(creds_file)\nparsed_config = load_config(config_file)\nPAIR_WITH = parsed_config['trading_options']['PAIR_WITH']\nEX_PAIRS = parsed_config['trading_options']['FIATS']\nTEST_MODE = parsed_config['script_options']['TEST_MODE']\nTAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']\nDISCORD_WEBHOOK = load_discord_creds(parsed_creds)\naccess_key, secret_key, passphrase_key = load_correct_creds(parsed_creds)\nclient = Client(access_key, secret_key, passphrase_key)\nCREATE_TICKER_LIST = True\nticker_type = 'all'\nif CREATE_TICKER_LIST:\n TICKERS_LIST = 'tickers_all_USDT.txt'\nelse:\n TICKERS_LIST = 'tickers_all_USDT.txt'\nBVT = False\nOLORIN = True\nif OLORIN:\n signal_file_type = '.buy'\nelse:\n signal_file_type = '.exs'\nWINDOWS = True\nDISCORD = True\nLIMIT = 4\nINTERVAL = '1day'\nprofit_min = 15\nprofit_max = 100\npercent_below = 0.7\nMOVEMENT = True\nall_info = False\n\n\nclass TextColors:\n BUY = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n SELL_LOSS = '\\x1b[91m'\n SELL_PROFIT = '\\x1b[32m'\n DIM = '\\x1b[2m\\x1b[35m'\n DEFAULT = '\\x1b[39m'\n YELLOW = '\\x1b[33m'\n TURQUOISE = '\\x1b[36m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n ITALICS = '\\x1b[3m'\n\n\ndef msg_discord(msg):\n message = msg + '\\n\\n'\n mUrl = 'https://discordapp.com/api/webhooks/' + DISCORD_WEBHOOK\n data = {'content': message}\n response = requests.post(mUrl, json=data)\n\n\ndef get_price(client_api):\n initial_price = {}\n tickers = [line.strip() for line in open(TICKERS_LIST)]\n prices = client_api.get_ticker()\n for coin in prices['ticker']:\n for item in tickers:\n if item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH\n not in coin['symbol'] for item in EX_PAIRS):\n initial_price[coin['symbol']] = {'symbol': coin['symbol'],\n 'price': coin['last'], 'time': datetime.now(),\n 'price_list': [], 'change_price': 0.0, 'cov': 0.0}\n return initial_price\n\n\nasync def create_urls(ticker_list, interval) ->dict:\n coins_urls = {}\n if INTERVAL == '1day':\n st = datetime.now() - timedelta(days=float(LIMIT))\n et = datetime.now()\n start_time = int(st.timestamp())\n stop_time = int(et.timestamp())\n for coin in ticker_list:\n if type(coin) == dict:\n if all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS\n ):\n coins_urls[coin['symbol']] = {'symbol': coin['symbol'],\n 'url':\n f\"https://api.kucoin.com/api/v1/market/candles?symbol{coin['symbol']}&type={interval}&startAt={start_time}&endAt={stop_time}\"\n }\n else:\n coins_urls[coin] = {'symbol': coin, 'url':\n f'https://api.kucoin.com/api/v1/market/candles?symbol={coin}&type={interval}&startAt={start_time}&endAt={stop_time}'\n }\n return coins_urls\n\n\nasync def get(session: aiohttp.ClientSession, url) ->dict:\n data = {}\n symbol = re.findall('=\\\\w+', url)[0][1:]\n try:\n resp = await session.request('GET', url=url)\n data['symbol'] = symbol\n data['data'] = await resp.json()\n except Exception as e:\n print(e)\n return data\n\n\nasync def get_historical_data(ticker_list, interval):\n urls = await create_urls(ticker_list=ticker_list, interval=interval)\n if WINDOWS:\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n async with aiohttp.ClientSession() as session:\n tasks = []\n for url in urls:\n link = urls[url]['url']\n tasks.append(get(session=session, url=link))\n response = await asyncio.gather(*tasks, return_exceptions=True)\n return response\n\n\ndef get_prices_high_low(list_coins, interval):\n if WINDOWS:\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n prices_low_high = {}\n hist_data = asyncio.run(get_historical_data(ticker_list=list_coins,\n interval=interval))\n for item in hist_data:\n coin_symbol = item['symbol']\n h_p = []\n l_p = []\n try:\n for i in item['data']['data']:\n close_time = i[0]\n open_price = float(i[1])\n close_price = float(i[2])\n high_price = float(i[3])\n low_price = float(i[4])\n volume = float(i[5])\n quote_volume = i[6]\n h_p.append(high_price)\n l_p.append(low_price)\n except Exception as e:\n print(f'Exception {e}')\n continue\n prices_low_high[coin_symbol] = {'symbol': coin_symbol, 'high_price':\n h_p, 'low_price': l_p, 'current_potential': 0.0}\n return prices_low_high\n\n\ndef do_work():\n while True:\n init_price = get_price(client)\n coins = get_prices_high_low(init_price, INTERVAL)\n print(\n f'{TextColors.TURQUOISE}The Snail is checking for potential profit and buy signals{TextColors.DEFAULT}'\n )\n if os.path.exists(f'signals/snail_scan{signal_file_type}'):\n os.remove(f'signals/snail_scan{signal_file_type}')\n current_potential_list = []\n held_coins_list = {}\n if TEST_MODE:\n coin_path = 'test_coins_bought.json'\n elif BVT:\n coin_path = 'coins_bought.json'\n else:\n coin_path = 'live_coins_bought.json'\n if os.path.isfile(coin_path) and os.stat(coin_path).st_size != 0:\n with open(coin_path) as file:\n held_coins_list = json.load(file)\n for coin in coins:\n if len(coins[coin]['high_price']) == LIMIT:\n high_price = float(max(coins[coin]['high_price']))\n low_price = float(min(coins[coin]['low_price']))\n last_price = float(init_price[coin + PAIR_WITH]['price'])\n range = high_price - low_price\n potential = low_price / high_price * 100\n buy_above = low_price * 1.0\n buy_below = high_price - range * percent_below\n max_potential = potential * 0.98\n min_potential = potential * 0.6\n safe_potential = potential - 12\n current_range = high_price - last_price\n current_potential = high_price / last_price * 100 - 100\n coins[coin]['current_potential'] = current_potential\n movement = low_price / range\n if MOVEMENT:\n if (profit_min < current_potential < profit_max and \n last_price < buy_below and movement >= TAKE_PROFIT and\n coin not in held_coins_list):\n current_potential_list.append(coins[coin])\n elif profit_min < current_potential < profit_max and last_price < buy_below and coin not in held_coins_list:\n current_potential_list.append(coins[coin])\n if current_potential_list:\n exchange = ccxt.binance()\n macd_list = []\n for i in current_potential_list:\n coin = i['symbol'] + PAIR_WITH\n current_potential = i['current_potential']\n macd1 = exchange.fetch_ohlcv(coin, timeframe='1m', limit=36)\n macd5 = exchange.fetch_ohlcv(coin, timeframe='5m', limit=36)\n macd15 = exchange.fetch_ohlcv(coin, timeframe='15m', limit=36)\n try:\n macd1day = exchange.fetch_ohlcv(coin, timeframe='1d',\n limit=36)\n except Exception as e:\n print(f'{coin} Exception {e}')\n continue\n macdbtc = exchange.fetch_ohlcv('BTCUSDT', timeframe='1m',\n limit=36)\n df1 = pd.DataFrame(macd1, columns=['time', 'open', 'high',\n 'low', 'close', 'volume'])\n df5 = pd.DataFrame(macd5, columns=['time', 'open', 'high',\n 'low', 'close', 'volume'])\n df15 = pd.DataFrame(macd15, columns=['time', 'open', 'high',\n 'low', 'close', 'volume'])\n df1day = pd.DataFrame(macd1day, columns=['time', 'open',\n 'high', 'low', 'close', 'volume'])\n dfbtc = pd.DataFrame(macdbtc, columns=['time', 'open',\n 'high', 'low', 'close', 'volume'])\n time.sleep(1)\n try:\n macd1 = df1.ta.macd(fast=12, slow=26)\n macd5 = df5.ta.macd(fast=12, slow=26)\n macd15 = df15.ta.macd(fast=12, slow=26)\n macd1day = df1day.ta.macd(fast=12, slow=26)\n macdbtc = dfbtc.ta.macd(fast=12, slow=26)\n get_hist1 = macd1.iloc[35, 1]\n get_hist5 = macd5.iloc[35, 1]\n get_hist15 = macd15.iloc[35, 1]\n get_hist1day = macd1day.iloc[35, 1]\n get_histbtc = macdbtc.iloc[35, 1]\n except Exception as e:\n print(f'{coin} Exception {e}')\n continue\n if all_info:\n if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >=\n 0 and get_hist1day >= 0 and get_histbtc >= 0):\n print(\n f'MACD HIST {coin} {current_potential:2f}% {TextColors.SELL_PROFIT}{get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}{TextColors.DEFAULT}'\n )\n else:\n print(\n f'MACD HIST {coin} {current_potential:2f}% {get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}'\n )\n if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >= 0 and\n get_hist1day >= 0 and get_histbtc >= 0):\n print(\n f\"\"\"{TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}\n\"\"\"\n )\n macd_list.append(coins[coin])\n if macd_list:\n sort_list = sorted(macd_list, key=lambda x: x[\n f'current_potential'], reverse=True)\n for i in sort_list:\n coin = i['symbol']\n current_potential = i['current_potential']\n last_price = float(init_price[coin + PAIR_WITH]['price'])\n high_price = float(max(coins[coin]['high_price']))\n low_price = float(min(coins[coin]['low_price']))\n range = high_price - low_price\n potential = low_price / high_price * 100\n buy_above = low_price * 1.0\n buy_below = high_price - range * percent_below\n current_range = high_price - last_price\n if all_info:\n print(\n f\"\"\"\nPrice: ${last_price:.3f}\nHigh: ${high_price:.3f}\nDay Max Range: ${range:.3f}\nCurrent Range: ${current_range:.3f} \nBuy Below: ${buy_below:.3f}\nPotential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}\"\"\"\n )\n with open(f'signals/snail_scan{signal_file_type}', 'a+'\n ) as f:\n f.write(str(coin + PAIR_WITH) + '\\n')\n snail_coins = len(current_potential_list)\n macd_coins = len(macd_list)\n snail_discord = (\n f'Snail found {snail_coins} coins and MACD approved {macd_coins}'\n )\n if DISCORD:\n msg_discord(snail_discord)\n print(\n f'{TextColors.TURQUOISE}Snail found {snail_coins} coins and MACD approved {macd_coins} coins. L: {LIMIT}days Min: {profit_min}% Risk: {percent_below * 100}% {TextColors.DEFAULT}'\n )\n time.sleep(180)\n",
"step-4": "<mask token>\nimport os\nimport re\nimport aiohttp\nimport asyncio\nimport time\nimport json\nfrom datetime import datetime, timedelta\nfrom kucoin.client import Client\nfrom helpers.parameters import parse_args, load_config\nimport pandas as pd\nimport pandas_ta as ta\nimport ccxt\nfrom tradingview_ta import TA_Handler, Interval, Exchange\nimport requests\nfrom helpers.handle_creds import load_correct_creds, load_discord_creds\nargs = parse_args()\nDEFAULT_CONFIG_FILE = 'config.yml'\nDEFAULT_CREDS_FILE = 'creds.yml'\nconfig_file = args.config if args.config else DEFAULT_CONFIG_FILE\ncreds_file = args.creds if args.creds else DEFAULT_CREDS_FILE\nparsed_creds = load_config(creds_file)\nparsed_config = load_config(config_file)\nPAIR_WITH = parsed_config['trading_options']['PAIR_WITH']\nEX_PAIRS = parsed_config['trading_options']['FIATS']\nTEST_MODE = parsed_config['script_options']['TEST_MODE']\nTAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']\nDISCORD_WEBHOOK = load_discord_creds(parsed_creds)\naccess_key, secret_key, passphrase_key = load_correct_creds(parsed_creds)\nclient = Client(access_key, secret_key, passphrase_key)\nCREATE_TICKER_LIST = True\nticker_type = 'all'\nif CREATE_TICKER_LIST:\n TICKERS_LIST = 'tickers_all_USDT.txt'\nelse:\n TICKERS_LIST = 'tickers_all_USDT.txt'\nBVT = False\nOLORIN = True\nif OLORIN:\n signal_file_type = '.buy'\nelse:\n signal_file_type = '.exs'\nWINDOWS = True\nDISCORD = True\nLIMIT = 4\nINTERVAL = '1day'\nprofit_min = 15\nprofit_max = 100\npercent_below = 0.7\nMOVEMENT = True\nall_info = False\n\n\nclass TextColors:\n BUY = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n SELL_LOSS = '\\x1b[91m'\n SELL_PROFIT = '\\x1b[32m'\n DIM = '\\x1b[2m\\x1b[35m'\n DEFAULT = '\\x1b[39m'\n YELLOW = '\\x1b[33m'\n TURQUOISE = '\\x1b[36m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n ITALICS = '\\x1b[3m'\n\n\ndef msg_discord(msg):\n message = msg + '\\n\\n'\n mUrl = 'https://discordapp.com/api/webhooks/' + DISCORD_WEBHOOK\n data = {'content': message}\n response = requests.post(mUrl, json=data)\n\n\ndef get_price(client_api):\n initial_price = {}\n tickers = [line.strip() for line in open(TICKERS_LIST)]\n prices = client_api.get_ticker()\n for coin in prices['ticker']:\n for item in tickers:\n if item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH\n not in coin['symbol'] for item in EX_PAIRS):\n initial_price[coin['symbol']] = {'symbol': coin['symbol'],\n 'price': coin['last'], 'time': datetime.now(),\n 'price_list': [], 'change_price': 0.0, 'cov': 0.0}\n return initial_price\n\n\nasync def create_urls(ticker_list, interval) ->dict:\n coins_urls = {}\n if INTERVAL == '1day':\n st = datetime.now() - timedelta(days=float(LIMIT))\n et = datetime.now()\n start_time = int(st.timestamp())\n stop_time = int(et.timestamp())\n for coin in ticker_list:\n if type(coin) == dict:\n if all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS\n ):\n coins_urls[coin['symbol']] = {'symbol': coin['symbol'],\n 'url':\n f\"https://api.kucoin.com/api/v1/market/candles?symbol{coin['symbol']}&type={interval}&startAt={start_time}&endAt={stop_time}\"\n }\n else:\n coins_urls[coin] = {'symbol': coin, 'url':\n f'https://api.kucoin.com/api/v1/market/candles?symbol={coin}&type={interval}&startAt={start_time}&endAt={stop_time}'\n }\n return coins_urls\n\n\nasync def get(session: aiohttp.ClientSession, url) ->dict:\n data = {}\n symbol = re.findall('=\\\\w+', url)[0][1:]\n try:\n resp = await session.request('GET', url=url)\n data['symbol'] = symbol\n data['data'] = await resp.json()\n except Exception as e:\n print(e)\n return data\n\n\nasync def get_historical_data(ticker_list, interval):\n urls = await create_urls(ticker_list=ticker_list, interval=interval)\n if WINDOWS:\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n async with aiohttp.ClientSession() as session:\n tasks = []\n for url in urls:\n link = urls[url]['url']\n tasks.append(get(session=session, url=link))\n response = await asyncio.gather(*tasks, return_exceptions=True)\n return response\n\n\ndef get_prices_high_low(list_coins, interval):\n if WINDOWS:\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n prices_low_high = {}\n hist_data = asyncio.run(get_historical_data(ticker_list=list_coins,\n interval=interval))\n for item in hist_data:\n coin_symbol = item['symbol']\n h_p = []\n l_p = []\n try:\n for i in item['data']['data']:\n close_time = i[0]\n open_price = float(i[1])\n close_price = float(i[2])\n high_price = float(i[3])\n low_price = float(i[4])\n volume = float(i[5])\n quote_volume = i[6]\n h_p.append(high_price)\n l_p.append(low_price)\n except Exception as e:\n print(f'Exception {e}')\n continue\n prices_low_high[coin_symbol] = {'symbol': coin_symbol, 'high_price':\n h_p, 'low_price': l_p, 'current_potential': 0.0}\n return prices_low_high\n\n\ndef do_work():\n while True:\n init_price = get_price(client)\n coins = get_prices_high_low(init_price, INTERVAL)\n print(\n f'{TextColors.TURQUOISE}The Snail is checking for potential profit and buy signals{TextColors.DEFAULT}'\n )\n if os.path.exists(f'signals/snail_scan{signal_file_type}'):\n os.remove(f'signals/snail_scan{signal_file_type}')\n current_potential_list = []\n held_coins_list = {}\n if TEST_MODE:\n coin_path = 'test_coins_bought.json'\n elif BVT:\n coin_path = 'coins_bought.json'\n else:\n coin_path = 'live_coins_bought.json'\n if os.path.isfile(coin_path) and os.stat(coin_path).st_size != 0:\n with open(coin_path) as file:\n held_coins_list = json.load(file)\n for coin in coins:\n if len(coins[coin]['high_price']) == LIMIT:\n high_price = float(max(coins[coin]['high_price']))\n low_price = float(min(coins[coin]['low_price']))\n last_price = float(init_price[coin + PAIR_WITH]['price'])\n range = high_price - low_price\n potential = low_price / high_price * 100\n buy_above = low_price * 1.0\n buy_below = high_price - range * percent_below\n max_potential = potential * 0.98\n min_potential = potential * 0.6\n safe_potential = potential - 12\n current_range = high_price - last_price\n current_potential = high_price / last_price * 100 - 100\n coins[coin]['current_potential'] = current_potential\n movement = low_price / range\n if MOVEMENT:\n if (profit_min < current_potential < profit_max and \n last_price < buy_below and movement >= TAKE_PROFIT and\n coin not in held_coins_list):\n current_potential_list.append(coins[coin])\n elif profit_min < current_potential < profit_max and last_price < buy_below and coin not in held_coins_list:\n current_potential_list.append(coins[coin])\n if current_potential_list:\n exchange = ccxt.binance()\n macd_list = []\n for i in current_potential_list:\n coin = i['symbol'] + PAIR_WITH\n current_potential = i['current_potential']\n macd1 = exchange.fetch_ohlcv(coin, timeframe='1m', limit=36)\n macd5 = exchange.fetch_ohlcv(coin, timeframe='5m', limit=36)\n macd15 = exchange.fetch_ohlcv(coin, timeframe='15m', limit=36)\n try:\n macd1day = exchange.fetch_ohlcv(coin, timeframe='1d',\n limit=36)\n except Exception as e:\n print(f'{coin} Exception {e}')\n continue\n macdbtc = exchange.fetch_ohlcv('BTCUSDT', timeframe='1m',\n limit=36)\n df1 = pd.DataFrame(macd1, columns=['time', 'open', 'high',\n 'low', 'close', 'volume'])\n df5 = pd.DataFrame(macd5, columns=['time', 'open', 'high',\n 'low', 'close', 'volume'])\n df15 = pd.DataFrame(macd15, columns=['time', 'open', 'high',\n 'low', 'close', 'volume'])\n df1day = pd.DataFrame(macd1day, columns=['time', 'open',\n 'high', 'low', 'close', 'volume'])\n dfbtc = pd.DataFrame(macdbtc, columns=['time', 'open',\n 'high', 'low', 'close', 'volume'])\n time.sleep(1)\n try:\n macd1 = df1.ta.macd(fast=12, slow=26)\n macd5 = df5.ta.macd(fast=12, slow=26)\n macd15 = df15.ta.macd(fast=12, slow=26)\n macd1day = df1day.ta.macd(fast=12, slow=26)\n macdbtc = dfbtc.ta.macd(fast=12, slow=26)\n get_hist1 = macd1.iloc[35, 1]\n get_hist5 = macd5.iloc[35, 1]\n get_hist15 = macd15.iloc[35, 1]\n get_hist1day = macd1day.iloc[35, 1]\n get_histbtc = macdbtc.iloc[35, 1]\n except Exception as e:\n print(f'{coin} Exception {e}')\n continue\n if all_info:\n if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >=\n 0 and get_hist1day >= 0 and get_histbtc >= 0):\n print(\n f'MACD HIST {coin} {current_potential:2f}% {TextColors.SELL_PROFIT}{get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}{TextColors.DEFAULT}'\n )\n else:\n print(\n f'MACD HIST {coin} {current_potential:2f}% {get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}'\n )\n if (get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >= 0 and\n get_hist1day >= 0 and get_histbtc >= 0):\n print(\n f\"\"\"{TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}\n\"\"\"\n )\n macd_list.append(coins[coin])\n if macd_list:\n sort_list = sorted(macd_list, key=lambda x: x[\n f'current_potential'], reverse=True)\n for i in sort_list:\n coin = i['symbol']\n current_potential = i['current_potential']\n last_price = float(init_price[coin + PAIR_WITH]['price'])\n high_price = float(max(coins[coin]['high_price']))\n low_price = float(min(coins[coin]['low_price']))\n range = high_price - low_price\n potential = low_price / high_price * 100\n buy_above = low_price * 1.0\n buy_below = high_price - range * percent_below\n current_range = high_price - last_price\n if all_info:\n print(\n f\"\"\"\nPrice: ${last_price:.3f}\nHigh: ${high_price:.3f}\nDay Max Range: ${range:.3f}\nCurrent Range: ${current_range:.3f} \nBuy Below: ${buy_below:.3f}\nPotential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}\"\"\"\n )\n with open(f'signals/snail_scan{signal_file_type}', 'a+'\n ) as f:\n f.write(str(coin + PAIR_WITH) + '\\n')\n snail_coins = len(current_potential_list)\n macd_coins = len(macd_list)\n snail_discord = (\n f'Snail found {snail_coins} coins and MACD approved {macd_coins}'\n )\n if DISCORD:\n msg_discord(snail_discord)\n print(\n f'{TextColors.TURQUOISE}Snail found {snail_coins} coins and MACD approved {macd_coins} coins. L: {LIMIT}days Min: {profit_min}% Risk: {percent_below * 100}% {TextColors.DEFAULT}'\n )\n time.sleep(180)\n",
"step-5": "\"\"\"\nThe Snail v 2\n\"Buy the dips! ... then wait\"\n\nSTRATEGY\n1. Selects coins that are X% (percent_below) below their X day (LIMIT) maximum\n2. ** NEW ** Finds movement (MOVEMENT) range over X Days\n - if MOVEMENT* > TAKE_PROFIT coins pass to 3\n3. Check coins are not already owned\n4. Uses MACD to check if coins are currently on an uptrend\n5. Adds coins that pass all above tests to Signal file for the Bot to buy (ordered by Potential Profit from High to Low)\n\n* MOVEMENT\n Looks at the fluctuation in price over LIMIT days and compares to your TAKE_PROFIT settings.\n i.e. if your TAKE_PROFIT is 3%, but the movement is only 1%, then you wont hit TP and will be left holding the coin\n This can be turned off if you want.\n\n\nSTRATEGY SETTINGS\nLIMIT = 4\nINTERVAL = '1d'\nprofit_min = 15\nprofit_max = 100 # only required if you want to limit max profit\npercent_below = 0.6 # change risk level: 0.7 = 70% below high_price, 0.5 = 50% below high_price\nMOVEMENT = True #\n\nOTHER SETTINGS\nBVT or OLORIN Fork.\nSet True / False for compatibility\n\nWINDOWS (WINDOWS OS)\nSet True / False for compatibility\n\nDISCORD\nsend message to Discord - Set True / False\n\n\nCONFIG.YML SETTINGS\nCHANGE_IN_PRICE: 100 REQUIRED\nDo NOT use pausebotmod as it will prevent the_snail from buying - The Snail buys the dips\n\nDeveloped by scoobie\nThanks to\n@vyacheslav for optimising the code with async and adding list sorting,\n@Kevin.Butters for the meticulous testing and reporting,\n@OlorinSledge for the coding advice and a great fork\n\nDISCLAIMER\nCHECK YOU HAVE ALL THE REQUIRED IMPORTS INSTALLED\nDeveloped for OlorinSledge fork - no support for any others as I don't use them.\nTroubleshooting and help - please use the #troubleshooting channel\nSettings - the settings in this file are what I currently use, please don't DM me for the 'best' settings - for me, these are the best so far.\nThere's a lot of options to adjust the strategy, test them out and share your results in #bot-strategies so others can learn from them too\n\nHope the Snail makes you rich!\n\n\"\"\"\n\nimport os\nimport re\nimport aiohttp\nimport asyncio\nimport time\nimport json\nfrom datetime import datetime, timedelta\nfrom kucoin.client import Client\nfrom helpers.parameters import parse_args, load_config\nimport pandas as pd\nimport pandas_ta as ta\nimport ccxt\nfrom tradingview_ta import TA_Handler, Interval, Exchange\nimport requests\n\n# Load creds modules\nfrom helpers.handle_creds import (\n\tload_correct_creds, load_discord_creds\n)\n\n# Settings\nargs = parse_args()\nDEFAULT_CONFIG_FILE = 'config.yml'\nDEFAULT_CREDS_FILE = 'creds.yml'\n\n\nconfig_file = args.config if args.config else DEFAULT_CONFIG_FILE\ncreds_file = args.creds if args.creds else DEFAULT_CREDS_FILE\nparsed_creds = load_config(creds_file)\nparsed_config = load_config(config_file)\n\n# Load trading vars\nPAIR_WITH = parsed_config['trading_options']['PAIR_WITH']\nEX_PAIRS = parsed_config['trading_options']['FIATS']\nTEST_MODE = parsed_config['script_options']['TEST_MODE']\nTAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']\nDISCORD_WEBHOOK = load_discord_creds(parsed_creds)\n\n# Load creds for correct environment\naccess_key, secret_key, passphrase_key = load_correct_creds(parsed_creds)\nclient = Client(access_key, secret_key, passphrase_key)\n\n\n\n# If True, an updated list of coins will be generated from the site - http://edgesforledges.com/watchlists/binance.\n# If False, then the list you create in TICKERS_LIST = 'tickers.txt' will be used.\nCREATE_TICKER_LIST = True\n\n# When creating a ticker list from the source site:\n# http://edgesforledges.com you can use the parameter (all or innovation-zone).\n# ticker_type = 'innovation-zone'\nticker_type = 'all'\nif CREATE_TICKER_LIST:\n\tTICKERS_LIST = 'tickers_all_USDT.txt'\nelse:\n\tTICKERS_LIST = 'tickers_all_USDT.txt'\n\n# System Settings\nBVT = False\nOLORIN = True # if not using Olorin Sledge Fork set to False\nif OLORIN:\n\tsignal_file_type = '.buy'\nelse:\n\tsignal_file_type = '.exs'\n\n# if using Windows OS set to True, else set to False\nWINDOWS = True\n# send message to discord\nDISCORD = True\n\n# Strategy Settings\nLIMIT = 4\nINTERVAL = '1day'\nprofit_min = 15\nprofit_max = 100 # only required if you want to limit max profit\npercent_below = 0.7 # change risk level: 0.7 = 70% below high_price, 0.5 = 50% below high_price\nMOVEMENT = True\n\n# Display Setttings\nall_info = False\n\n\nclass TextColors:\n\tBUY = '\\033[92m'\n\tWARNING = '\\033[93m'\n\tSELL_LOSS = '\\033[91m'\n\tSELL_PROFIT = '\\033[32m'\n\tDIM = '\\033[2m\\033[35m'\n\tDEFAULT = '\\033[39m'\n\tYELLOW = '\\033[33m'\n\tTURQUOISE = '\\033[36m'\n\tUNDERLINE = '\\033[4m'\n\tEND = '\\033[0m'\n\tITALICS = '\\033[3m'\n\n\ndef msg_discord(msg):\n\n\tmessage = msg + '\\n\\n'\n\n\tmUrl = \"https://discordapp.com/api/webhooks/\"+DISCORD_WEBHOOK\n\tdata = {\"content\": message}\n\tresponse = requests.post(mUrl, json=data)\n\n\ndef get_price(client_api):\n\tinitial_price = {}\n\ttickers = [line.strip() for line in open(TICKERS_LIST)]\n\tprices = client_api.get_ticker()\n\n\tfor coin in prices['ticker']:\n\t\tfor item in tickers:\n\t\t\tif item + PAIR_WITH == coin['symbol'] and all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS):\n\t\t\t\tinitial_price[coin['symbol']] = {'symbol': coin['symbol'],\n\t\t\t\t\t\t\t\t\t\t\t\t 'price': coin['last'],\n\t\t\t\t\t\t\t\t\t\t\t\t 'time': datetime.now(),\n\t\t\t\t\t\t\t\t\t\t\t\t 'price_list': [],\n\t\t\t\t\t\t\t\t\t\t\t\t 'change_price': 0.0,\n\t\t\t\t\t\t\t\t\t\t\t\t 'cov': 0.0}\n\treturn initial_price\n\n\nasync def create_urls(ticker_list, interval) -> dict:\n\tcoins_urls = {}\n\n\tif INTERVAL == '1day':\n \t\tst = datetime.now() - timedelta(days=float(LIMIT))\n\t\t\t\n\tet = datetime.now()\n\tstart_time = int(st.timestamp())\n\tstop_time = int(et.timestamp())\n\n\tfor coin in ticker_list:\n\t\tif type(coin) == dict:\n\t\t\tif all(item + PAIR_WITH not in coin['symbol'] for item in EX_PAIRS):\n\t\t\t\tcoins_urls[coin['symbol']] = {'symbol': coin['symbol'],\n\t\t\t\t\t\t\t\t\t\t\t 'url': f\"https://api.kucoin.com/api/v1/market/candles?symbol\"\n f\"{coin['symbol']}&type={interval}&startAt={start_time}&endAt={stop_time}\"}\n\t\telse:\n\t\t\tcoins_urls[coin] = {'symbol': coin,\n\t\t\t\t\t\t\t\t'url': f\"https://api.kucoin.com/api/v1/market/candles?symbol={coin}&type={interval}&startAt={start_time}&endAt={stop_time}\"}\n\n\treturn coins_urls\n\n\nasync def get(session: aiohttp.ClientSession, url) -> dict:\n\tdata = {}\n\tsymbol = re.findall(r'=\\w+', url)[0][1:]\n\ttry:\n\t\tresp = await session.request('GET', url=url)\n\t\tdata['symbol'] = symbol\n\t\t# data['last_price'] = await get_last_price(session=session, symbol=symbol)\n\t\tdata['data'] = await resp.json()\n\texcept Exception as e:\n\t\tprint(e)\n\treturn data\n\n\nasync def get_historical_data(ticker_list, interval):\n\turls = await create_urls(ticker_list=ticker_list, interval=interval)\n\tif WINDOWS:\n\t\tasyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n\tasync with aiohttp.ClientSession() as session:\n\t\ttasks = []\n\t\tfor url in urls:\n\t\t\tlink = urls[url]['url']\n\t\t\ttasks.append(get(session=session, url=link))\n\t\tresponse = await asyncio.gather(*tasks, return_exceptions=True)\n\t\treturn response\n\n\ndef get_prices_high_low(list_coins, interval):\n\tif WINDOWS:\n\t\tasyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n\t\n\tprices_low_high = {}\n\thist_data = asyncio.run(get_historical_data(ticker_list=list_coins, interval=interval))\n\tfor item in hist_data:\n\t\tcoin_symbol = item['symbol']\n\t\th_p = []\n\t\tl_p = []\n\t\ttry:\n\t\t\tfor i in item['data']['data']:\n\t\t\t\tclose_time = i[0]\n\t\t\t\topen_price = float(i[1])\n\t\t\t\tclose_price = float(i[2])\n\t\t\t\thigh_price = float(i[3])\n\t\t\t\tlow_price = float(i[4])\n\t\t\t\tvolume = float(i[5])\n\t\t\t\tquote_volume = i[6]\n\t\t\t\th_p.append(high_price)\n\t\t\t\tl_p.append(low_price)\n\t\texcept Exception as e:\n\t\t\t\t\tprint(f'Exception {e}')\n\t\t\t\t\tcontinue\n\n\t\tprices_low_high[coin_symbol] = {'symbol': coin_symbol, 'high_price': h_p, 'low_price': l_p, 'current_potential': 0.0}\n\n\treturn prices_low_high\n\n\ndef do_work(): \t\n\twhile True:\n \t\n\t\tinit_price = get_price(client)\n\t\tcoins = get_prices_high_low(init_price, INTERVAL)\n\t\tprint(f'{TextColors.TURQUOISE}The Snail is checking for potential profit and buy signals{TextColors.DEFAULT}')\n\t\tif os.path.exists(f'signals/snail_scan{signal_file_type}'):\n\t\t\tos.remove(f'signals/snail_scan{signal_file_type}')\n\n\t\tcurrent_potential_list = []\n\t\theld_coins_list = {}\n\n\t\tif TEST_MODE:\n\t\t\tcoin_path = 'test_coins_bought.json'\n\t\telif BVT:\n\t\t\tcoin_path = 'coins_bought.json'\n\t\telse:\n\t\t\tcoin_path = 'live_coins_bought.json'\n\t\tif os.path.isfile(coin_path) and os.stat(coin_path).st_size != 0:\n\t\t\twith open(coin_path) as file:\n\t\t\t\theld_coins_list = json.load(file)\n\n\t\tfor coin in coins:\n\t\t\tif len(coins[coin]['high_price']) == LIMIT:\n\t\t\t\thigh_price = float(max(coins[coin]['high_price']))\n\t\t\t\tlow_price = float(min(coins[coin]['low_price']))\n\t\t\t\tlast_price = float(init_price[coin + PAIR_WITH]['price'])\n\n\t\t\t\t# Calculation\n\t\t\t\trange = high_price - low_price\n\t\t\t\tpotential = (low_price / high_price) * 100\n\t\t\t\tbuy_above = low_price * 1.00\n\t\t\t\tbuy_below = high_price - (range * percent_below) # percent below affects Risk\n\t\t\t\tmax_potential = potential * 0.98\n\t\t\t\tmin_potential = potential * 0.6\n\t\t\t\tsafe_potential = potential - 12\n\t\t\t\tcurrent_range = high_price - last_price\n\t\t\t\tcurrent_potential = ((high_price / last_price) * 100) - 100\n\t\t\t\tcoins[coin]['current_potential'] = current_potential\n\t\t\t\tmovement = (low_price / range)\n#\t\t\t\tprint(f'{coin} {potential:.2f}% {movement:.2f}%')\n\n\t\t\t\tif MOVEMENT:\n\t\t\t\t\tif profit_min < current_potential < profit_max and last_price < buy_below and movement >= TAKE_PROFIT and coin not in held_coins_list:\n\t\t\t\t\t\tcurrent_potential_list.append(coins[coin])\n\t\t\t\telse:\n\t\t\t\t\tif profit_min < current_potential < profit_max and last_price < buy_below and coin not in held_coins_list:\n\t\t\t\t\t\tcurrent_potential_list.append(coins[coin])\n\n\t\tif current_potential_list:\n\t\t\t# print(current_potential_list)\n\t\t\texchange = ccxt.binance()\n\t\t\tmacd_list = []\n\n\t\t\tfor i in current_potential_list:\n\t\t\t\tcoin = i['symbol'] + PAIR_WITH\n\t\t\t\tcurrent_potential = i['current_potential']\n\t\t\t\tmacd1 = exchange.fetch_ohlcv(coin, timeframe='1m', limit=36)\n\t\t\t\tmacd5 = exchange.fetch_ohlcv(coin, timeframe='5m', limit=36)\n\t\t\t\tmacd15 = exchange.fetch_ohlcv(coin, timeframe='15m', limit=36)\n\t\t\t\ttry:\n\t\t\t\t\tmacd1day = exchange.fetch_ohlcv(coin, timeframe='1d', limit=36)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(f'{coin} Exception {e}')\n\t\t\t\t\tcontinue\n\t\t\t\tmacdbtc = exchange.fetch_ohlcv('BTCUSDT', timeframe='1m', limit=36)\n\n\t\t\t\tdf1 = pd.DataFrame(macd1, columns=['time', 'open', 'high', 'low', 'close', 'volume'])\n\t\t\t\tdf5 = pd.DataFrame(macd5, columns=['time', 'open', 'high', 'low', 'close', 'volume'])\n\t\t\t\tdf15 = pd.DataFrame(macd15, columns=['time', 'open', 'high', 'low', 'close', 'volume'])\n\t\t\t\tdf1day = pd.DataFrame(macd1day, columns=['time', 'open', 'high', 'low', 'close', 'volume'])\n\t\t\t\tdfbtc = pd.DataFrame(macdbtc, columns=['time', 'open', 'high', 'low', 'close', 'volume'])\n\n\t\t\t\t# Wait for 1 sec to prevent kucoin query limit\n\t\t\t\ttime.sleep(1)\n\n\t\t\t\ttry:\n\t\t\t\t\tmacd1 = df1.ta.macd(fast=12, slow=26)\n\t\t\t\t\tmacd5 = df5.ta.macd(fast=12, slow=26)\n\t\t\t\t\tmacd15 = df15.ta.macd(fast=12, slow=26)\n\t\t\t\t\tmacd1day = df1day.ta.macd(fast=12, slow=26)\n\t\t\t\t\tmacdbtc = dfbtc.ta.macd(fast=12, slow=26)\n\n\t\t\t\t\tget_hist1 = macd1.iloc[35, 1]\n\t\t\t\t\tget_hist5 = macd5.iloc[35, 1]\n\t\t\t\t\tget_hist15 = macd15.iloc[35, 1]\t\t\t\t\n\t\t\t\t\tget_hist1day = macd1day.iloc[35, 1]\n\t\t\t\t\tget_histbtc = macdbtc.iloc[35, 1]\n\t\t\t\t\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(f'{coin} Exception {e}')\n\t\t\t\t\tcontinue\n\n\t\t\t\tif all_info:\n\t\t\t\t\tif get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >= 0 and get_hist1day >= 0 and get_histbtc >= 0:\n\t\t\t\t\t\tprint(f'MACD HIST {coin} {current_potential:2f}% {TextColors.SELL_PROFIT}{get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}{TextColors.DEFAULT}')\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(f'MACD HIST {coin} {current_potential:2f}% {get_hist1} {get_hist5} {get_hist15} {get_hist1day} {get_histbtc}')\n\n\t\t\t\tif get_hist1 >= 0 and get_hist5 >= 0 and get_hist15 >= 0 and get_hist1day >= 0 and get_histbtc >= 0:\n\t\t\t\t\t# Add to coins for Snail to scan\n\t\t\t\t\tprint(f'{TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}\\n')\n\t\t\t\t\tmacd_list.append(coins[coin])\n\t\t\t\t# else:\n\t\t\t\t# print(f'Do NOT buy {coin}')\n\n\t\t\tif macd_list:\n\n\t\t\t\t# print(macd_list)\n\t\t\t\tsort_list = sorted(macd_list, key=lambda x: x[f'current_potential'], reverse=True)\n\t\t\t\tfor i in sort_list:\n\t\t\t\t\tcoin = i['symbol']\n\t\t\t\t\tcurrent_potential = i['current_potential']\n\t\t\t\t\tlast_price = float(init_price[coin + PAIR_WITH]['price'])\n\t\t\t\t\t# print(f'list {coin} {last_price}')\n\t\t\t\t\thigh_price = float(max(coins[coin]['high_price']))\n\t\t\t\t\t# print(f'list {coin} {high_price}')\n\t\t\t\t\tlow_price = float(min(coins[coin]['low_price']))\n\t\t\t\t\t# print(f'list {coin} {low_price}')\n\t\t\t\t\trange = high_price - low_price\n\t\t\t\t\tpotential = (low_price / high_price) * 100\n\t\t\t\t\tbuy_above = low_price * 1.00\n\t\t\t\t\tbuy_below = high_price - (range * percent_below)\n\t\t\t\t\tcurrent_range = high_price - last_price\n\n\t\t\t\t\tif all_info:\n\t\t\t\t\t\tprint(f'\\nPrice: ${last_price:.3f}\\n'\n\t\t\t\t\t\t\tf'High: ${high_price:.3f}\\n'\n\t\t\t\t\t\t\t# f'Plan: TP {TP}% TTP {TTP}%\\n'\n\t\t\t\t\t\t\tf'Day Max Range: ${range:.3f}\\n'\n\t\t\t\t\t\t\tf'Current Range: ${current_range:.3f} \\n'\n\t\t\t\t\t\t\t# f'Daily Range: ${range:.3f}\\n'\n\t\t\t\t\t\t\t# f'Current Range ${current_range:.3f} \\n'\n\t\t\t\t\t\t\t# f'Potential profit before safety: {potential:.0f}%\\n'\n\t\t\t\t\t\t\t# f'Buy above: ${buy_above:.3f}\\n'\n\t\t\t\t\t\t\tf'Buy Below: ${buy_below:.3f}\\n'\n\t\t\t\t\t\t\tf'Potential profit: {TextColors.TURQUOISE}{current_potential:.0f}%{TextColors.DEFAULT}'\n\t\t\t\t\t\t\t# f'Max Profit {max_potential:.2f}%\\n'\n\t\t\t\t\t\t\t# f'Min Profit {min_potential:.2f}%\\n'\n\t\t\t\t\t\t\t)\n\t\t\t\t\t# print(f'Adding {TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} to buy list')\n\n\t\t\t\t\t# add to signal\n\t\t\t\t\twith open(f'signals/snail_scan{signal_file_type}', 'a+') as f:\n\t\t\t\t\t\tf.write(str(coin + PAIR_WITH) + '\\n')\n\n\t\t\t# else:\n\t\t\t# print(f'{TextColors.TURQUOISE}{coin}{TextColors.DEFAULT} may not be profitable at this time')\n\t\t\tsnail_coins = len(current_potential_list)\n\t\t\tmacd_coins = len(macd_list)\n\t\t\tsnail_discord = f'Snail found {snail_coins} coins and MACD approved {macd_coins}'\n\t\t\tif DISCORD:\n\t\t\t\tmsg_discord(snail_discord)\n\t\t\tprint(f'{TextColors.TURQUOISE}Snail found {snail_coins} coins and MACD approved {macd_coins} coins. L: {LIMIT}days Min: {profit_min}% Risk: {percent_below * 100}% {TextColors.DEFAULT}')\n\t\t\ttime.sleep(180)\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
import torch
import torch.nn as nn
from tqdm import tqdm
import torch.nn.functional as F
import torch.multiprocessing as mp
from policy_network import Policy_Network
from util import safe_log
from util import index2word, rearrange_vector_list, get_num_gpus, set_seed
class TestWorker(mp.Process):
def __init__(self, args, worker_id, env, d_entity_neighours, d_entity2bucketid, d_action_space_buckets, d_entity2id, d_relation2id, reqa_checkpoint_path, d_results, word_num, entity_num, relation_num, keqa_checkpoint_path, return_trace = False):
super().__init__(name='test-worker-%02d' % (worker_id))
self.args = args
self.seed = args.seed + worker_id
self.fix_batch_size = args.batch_size
self.use_keqa_vector = args.use_keqa_vector
self.max_hop = args.max_hop
self.beam_size = args.beam_size
self.return_trace = return_trace
self.d_entity_neighours = d_entity_neighours
self.d_entity2bucketid = d_entity2bucketid
self.d_action_space_buckets = d_action_space_buckets
self.id2entity = index2word(d_entity2id)
self.id2relation = index2word(d_relation2id)
self.worker_id = worker_id
self.gpu_id = self.worker_id % get_num_gpus()
self.env = env
self.d_results = d_results
self.reqa_checkpoint_path = reqa_checkpoint_path
self.word_num = word_num
self.entity_num = entity_num
self.relation_num = relation_num
self.keqa_checkpoint_path = keqa_checkpoint_path
def run(self):
set_seed(self.seed)
self.model = Policy_Network(self.args, self.word_num, self.entity_num, self.relation_num, self.keqa_checkpoint_path, self.gpu_id)
self.model.load(self.reqa_checkpoint_path)
self.model.cuda(self.gpu_id)
self.model.eval()
self.env.set_model(self.model)
self.env.set_gpu_id(self.gpu_id)
total_data_num = len(self.env.d_dataset)
hits_1_num = 0
with torch.no_grad():
for example_id in tqdm(range(0, len(self.env.d_dataset), self.fix_batch_size), desc=self.name, position=self.worker_id):
idx = range(example_id, example_id + self.fix_batch_size)
self.env.reset(idx)
self.batch_size = self.env.batch_size
batch_hits1 = self.rollout()
hits_1_num += batch_hits1
hits_1_result = 1.0 * hits_1_num / total_data_num
self.d_results['hits@1'] = hits_1_result
def rollout(self):
batch_question, batch_question_len, batch_head, batch_answers = self.env.return_batch_data()
if self.return_trace:
l_search_trace = []
l_log_action_probs = []
batch_pred_vector = None
if self.use_keqa_vector:
batch_pred_vector = self.model.get_anticipated_entity_vector(batch_head, batch_question, batch_question_len, self.d_entity_neighours)
log_action_prob = torch.zeros(self.batch_size).cuda(self.gpu_id)
for t in range(self.max_hop):
path_trace, path_hidden = self.env.observe()
last_r, e_t = path_trace[-1]
batch_path_hidden = path_hidden[-1][0][-1, :, :]
k = int(e_t.size()[0] / self.batch_size)
beam_question = batch_question.unsqueeze(1).repeat(1, k, 1).view(self.batch_size * k, -1)
beam_question_len = batch_question_len.unsqueeze(1).repeat(1, k).view(self.batch_size * k)
beam_pred_vector = None
if self.use_keqa_vector:
beam_pred_vector = batch_pred_vector.unsqueeze(1).repeat(1, k, 1).view(self.batch_size * k, -1)
db_outcomes, _, _, inv_offset = self.model.transit(t, e_t, beam_question, beam_question_len, batch_path_hidden, self.d_entity2bucketid, self.d_action_space_buckets, last_r, False, beam_pred_vector)
db_action_spaces = [action_space for action_space, _ in db_outcomes]
db_action_dist = [action_dist for _, action_dist in db_outcomes]
action_space = self.pad_and_cat_action_space(db_action_spaces, inv_offset)
action_dist = self.pad_and_cat(db_action_dist, padding_value=0)[inv_offset]
log_action_dist = log_action_prob.view(-1, 1) + safe_log(action_dist)
if self.return_trace:
print(t)
print(last_r, e_t)
print("----")
print(action_space[0])
print(F.softmax(log_action_dist.view(-1)).view(self.batch_size * k, -1))
print("------------------------")
if t == self.max_hop - 1:
action, log_action_prob, action_offset = self.top_k_answer_unique(log_action_dist, action_space)
else:
action, log_action_prob, action_offset = self.top_k_action(log_action_dist, action_space)
path_list, (h_t, c_t) = self.model.update_path(action, path_hidden, offset = action_offset)
self.env.step(action, path_list, (h_t, c_t))
if self.return_trace:
rearrange_vector_list(l_log_action_probs, action_offset)
l_log_action_probs.append(log_action_prob)
self.adjust_search_trace(l_search_trace, action_offset)
l_search_trace.append(action)
batch_pred_e2 = action[1].view(self.batch_size, -1)
batch_pred_e2_top1 = batch_pred_e2[:, 0].view(self.batch_size, -1)
batch_hits1 = torch.sum(torch.gather(batch_answers, 1, batch_pred_e2_top1).view(-1)).item()
if self.return_trace:
self.print_search_trace(batch_head, l_search_trace, l_log_action_probs)
return batch_hits1
def top_k_action(self, log_action_dist, action_space):
full_size = len(log_action_dist)
last_k = int(full_size / self.batch_size)
(r_space, e_space), _ = action_space
action_space_size = r_space.size()[1]
log_action_dist = log_action_dist.view(self.batch_size, -1)
beam_action_space_size = log_action_dist.size()[1]
k = min(self.beam_size, beam_action_space_size)
log_action_prob, action_ind = torch.topk(log_action_dist, k)
next_r = torch.gather(r_space.view(self.batch_size, -1), 1, action_ind).view(-1)
next_e = torch.gather(e_space.view(self.batch_size, -1), 1, action_ind).view(-1)
log_action_prob = log_action_prob.view(-1)
action_beam_offset = action_ind // action_space_size
action_batch_offset = (torch.arange(self.batch_size).cuda(self.gpu_id) * last_k).unsqueeze(1)
action_offset = (action_batch_offset + action_beam_offset).view(-1)
return (next_r, next_e), log_action_prob, action_offset
def top_k_answer_unique(self, log_action_dist, action_space):
full_size = len(log_action_dist)
last_k = int(full_size / self.batch_size)
(r_space, e_space), _ = action_space
action_space_size = r_space.size()[1]
r_space = r_space.view(self.batch_size, -1)
e_space = e_space.view(self.batch_size, -1)
log_action_dist = log_action_dist.view(self.batch_size, -1)
beam_action_space_size = log_action_dist.size()[1]
k = min(self.beam_size, beam_action_space_size)
next_r_list, next_e_list = [], []
log_action_prob_list = []
action_offset_list = []
for i in range(self.batch_size):
log_action_dist_b = log_action_dist[i]
r_space_b = r_space[i]
e_space_b = e_space[i]
unique_e_space_b = torch.unique(e_space_b.data.cpu()).cuda(self.gpu_id)
unique_log_action_dist, unique_idx = self.unique_max(unique_e_space_b, e_space_b, log_action_dist_b)
k_prime = min(len(unique_e_space_b), k)
top_unique_log_action_dist, top_unique_idx2 = torch.topk(unique_log_action_dist, k_prime)
top_unique_idx = unique_idx[top_unique_idx2]
top_unique_beam_offset = top_unique_idx // action_space_size
top_r = r_space_b[top_unique_idx]
top_e = e_space_b[top_unique_idx]
next_r_list.append(top_r.unsqueeze(0))
next_e_list.append(top_e.unsqueeze(0))
log_action_prob_list.append(top_unique_log_action_dist.unsqueeze(0))
top_unique_batch_offset = i * last_k
top_unique_action_offset = top_unique_batch_offset + top_unique_beam_offset
action_offset_list.append(top_unique_action_offset.unsqueeze(0))
next_r = self.pad_and_cat(next_r_list, padding_value=0).view(-1)
next_e = self.pad_and_cat(next_e_list, padding_value=0).view(-1)
log_action_prob = self.pad_and_cat(log_action_prob_list, padding_value = -float("inf"))
action_offset = self.pad_and_cat(action_offset_list, padding_value=-1)
return (next_r, next_e), log_action_prob.view(-1), action_offset.view(-1)
def sync_model(self):
self.model.load_state_dict(self.shared_model.state_dict())
def pad_and_cat_action_space(self, action_spaces, inv_offset):
db_r_space, db_e_space, db_action_mask = [], [], []
for (r_space, e_space), action_mask in action_spaces:
db_r_space.append(r_space)
db_e_space.append(e_space)
db_action_mask.append(action_mask)
r_space = self.pad_and_cat(db_r_space, padding_value=0)[inv_offset]
e_space = self.pad_and_cat(db_e_space, padding_value=0)[inv_offset]
action_mask = self.pad_and_cat(db_action_mask, padding_value=0)[inv_offset]
action_space = ((r_space, e_space), action_mask)
return action_space
def pad_and_cat(self, a, padding_value, padding_dim=1):
max_dim_size = max([x.size()[padding_dim] for x in a])
padded_a = []
for x in a:
if x.size()[padding_dim] < max_dim_size:
res_len = max_dim_size - x.size()[1]
pad = nn.ConstantPad1d((0, res_len), padding_value)
padded_a.append(pad(x))
else:
padded_a.append(x)
return torch.cat(padded_a, dim=0).cuda(self.gpu_id)
def unique_max(self, unique_x, x, values, marker_2D=None):
unique_interval = 100
HUGE_INT = 1e31
unique_values, unique_indices = [], []
for i in range(0, len(unique_x), unique_interval):
unique_x_b = unique_x[i:i+unique_interval]
marker_2D = (unique_x_b.unsqueeze(1) == x.unsqueeze(0)).float()
values_2D = marker_2D * values.unsqueeze(0) - (1 - marker_2D) * HUGE_INT
unique_values_b, unique_idx_b = values_2D.max(dim=1)
unique_values.append(unique_values_b)
unique_indices.append(unique_idx_b)
unique_values = torch.cat(unique_values).cuda(self.gpu_id)
unique_idx = torch.cat(unique_indices).cuda(self.gpu_id)
return unique_values, unique_idx
def adjust_search_trace(self, search_trace, action_offset):
for i, (r, e) in enumerate(search_trace):
new_r = r[action_offset]
new_e = e[action_offset]
search_trace[i] = (new_r, new_e)
def print_search_trace(self, batch_head, l_search_trace, l_log_action_probs):
for i in range(self.batch_size):
top_k_edge_labels = []
for k, log_action_prob in enumerate(l_log_action_probs):
beam_size = len(log_action_prob)
for j in range(beam_size):
ind = i * beam_size + j
r = self.id2relation[int(l_search_trace[k][0][ind])]
e = self.id2entity[int(l_search_trace[k][1][ind])]
if r.endswith('_inverse'):
edge_label = '<-{}-{} {}'.format(r[:-8], e, float(log_action_prob[ind]))
else:
edge_label = '-{}->{} {}'.format(r, e, float(log_action_prob[ind]))
if k == 0:
edge_label = self.id2entity[int(batch_head[i])] + edge_label
top_k_edge_labels.append(edge_label)
else:
top_k_edge_labels[j] += edge_label
for i, edge_label in enumerate(top_k_edge_labels):
print(i, edge_label)
print("*****************************")
|
normal
|
{
"blob_id": "c7333d838b87d4c275d9dbb6d7e3047c313b4bc0",
"index": 9212,
"step-1": "<mask token>\n\n\nclass TestWorker(mp.Process):\n <mask token>\n <mask token>\n\n def rollout(self):\n batch_question, batch_question_len, batch_head, batch_answers = (self\n .env.return_batch_data())\n if self.return_trace:\n l_search_trace = []\n l_log_action_probs = []\n batch_pred_vector = None\n if self.use_keqa_vector:\n batch_pred_vector = self.model.get_anticipated_entity_vector(\n batch_head, batch_question, batch_question_len, self.\n d_entity_neighours)\n log_action_prob = torch.zeros(self.batch_size).cuda(self.gpu_id)\n for t in range(self.max_hop):\n path_trace, path_hidden = self.env.observe()\n last_r, e_t = path_trace[-1]\n batch_path_hidden = path_hidden[-1][0][-1, :, :]\n k = int(e_t.size()[0] / self.batch_size)\n beam_question = batch_question.unsqueeze(1).repeat(1, k, 1).view(\n self.batch_size * k, -1)\n beam_question_len = batch_question_len.unsqueeze(1).repeat(1, k\n ).view(self.batch_size * k)\n beam_pred_vector = None\n if self.use_keqa_vector:\n beam_pred_vector = batch_pred_vector.unsqueeze(1).repeat(1,\n k, 1).view(self.batch_size * k, -1)\n db_outcomes, _, _, inv_offset = self.model.transit(t, e_t,\n beam_question, beam_question_len, batch_path_hidden, self.\n d_entity2bucketid, self.d_action_space_buckets, last_r, \n False, beam_pred_vector)\n db_action_spaces = [action_space for action_space, _ in db_outcomes\n ]\n db_action_dist = [action_dist for _, action_dist in db_outcomes]\n action_space = self.pad_and_cat_action_space(db_action_spaces,\n inv_offset)\n action_dist = self.pad_and_cat(db_action_dist, padding_value=0)[\n inv_offset]\n log_action_dist = log_action_prob.view(-1, 1) + safe_log(\n action_dist)\n if self.return_trace:\n print(t)\n print(last_r, e_t)\n print('----')\n print(action_space[0])\n print(F.softmax(log_action_dist.view(-1)).view(self.\n batch_size * k, -1))\n print('------------------------')\n if t == self.max_hop - 1:\n action, log_action_prob, action_offset = (self.\n top_k_answer_unique(log_action_dist, action_space))\n else:\n action, log_action_prob, action_offset = self.top_k_action(\n log_action_dist, action_space)\n path_list, (h_t, c_t) = self.model.update_path(action,\n path_hidden, offset=action_offset)\n self.env.step(action, path_list, (h_t, c_t))\n if self.return_trace:\n rearrange_vector_list(l_log_action_probs, action_offset)\n l_log_action_probs.append(log_action_prob)\n self.adjust_search_trace(l_search_trace, action_offset)\n l_search_trace.append(action)\n batch_pred_e2 = action[1].view(self.batch_size, -1)\n batch_pred_e2_top1 = batch_pred_e2[:, 0].view(self.batch_size, -1)\n batch_hits1 = torch.sum(torch.gather(batch_answers, 1,\n batch_pred_e2_top1).view(-1)).item()\n if self.return_trace:\n self.print_search_trace(batch_head, l_search_trace,\n l_log_action_probs)\n return batch_hits1\n\n def top_k_action(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n log_action_prob, action_ind = torch.topk(log_action_dist, k)\n next_r = torch.gather(r_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n next_e = torch.gather(e_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n log_action_prob = log_action_prob.view(-1)\n action_beam_offset = action_ind // action_space_size\n action_batch_offset = (torch.arange(self.batch_size).cuda(self.\n gpu_id) * last_k).unsqueeze(1)\n action_offset = (action_batch_offset + action_beam_offset).view(-1)\n return (next_r, next_e), log_action_prob, action_offset\n\n def top_k_answer_unique(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n r_space = r_space.view(self.batch_size, -1)\n e_space = e_space.view(self.batch_size, -1)\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n next_r_list, next_e_list = [], []\n log_action_prob_list = []\n action_offset_list = []\n for i in range(self.batch_size):\n log_action_dist_b = log_action_dist[i]\n r_space_b = r_space[i]\n e_space_b = e_space[i]\n unique_e_space_b = torch.unique(e_space_b.data.cpu()).cuda(self\n .gpu_id)\n unique_log_action_dist, unique_idx = self.unique_max(\n unique_e_space_b, e_space_b, log_action_dist_b)\n k_prime = min(len(unique_e_space_b), k)\n top_unique_log_action_dist, top_unique_idx2 = torch.topk(\n unique_log_action_dist, k_prime)\n top_unique_idx = unique_idx[top_unique_idx2]\n top_unique_beam_offset = top_unique_idx // action_space_size\n top_r = r_space_b[top_unique_idx]\n top_e = e_space_b[top_unique_idx]\n next_r_list.append(top_r.unsqueeze(0))\n next_e_list.append(top_e.unsqueeze(0))\n log_action_prob_list.append(top_unique_log_action_dist.unsqueeze(0)\n )\n top_unique_batch_offset = i * last_k\n top_unique_action_offset = (top_unique_batch_offset +\n top_unique_beam_offset)\n action_offset_list.append(top_unique_action_offset.unsqueeze(0))\n next_r = self.pad_and_cat(next_r_list, padding_value=0).view(-1)\n next_e = self.pad_and_cat(next_e_list, padding_value=0).view(-1)\n log_action_prob = self.pad_and_cat(log_action_prob_list,\n padding_value=-float('inf'))\n action_offset = self.pad_and_cat(action_offset_list, padding_value=-1)\n return (next_r, next_e), log_action_prob.view(-1), action_offset.view(\n -1)\n\n def sync_model(self):\n self.model.load_state_dict(self.shared_model.state_dict())\n\n def pad_and_cat_action_space(self, action_spaces, inv_offset):\n db_r_space, db_e_space, db_action_mask = [], [], []\n for (r_space, e_space), action_mask in action_spaces:\n db_r_space.append(r_space)\n db_e_space.append(e_space)\n db_action_mask.append(action_mask)\n r_space = self.pad_and_cat(db_r_space, padding_value=0)[inv_offset]\n e_space = self.pad_and_cat(db_e_space, padding_value=0)[inv_offset]\n action_mask = self.pad_and_cat(db_action_mask, padding_value=0)[\n inv_offset]\n action_space = (r_space, e_space), action_mask\n return action_space\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestWorker(mp.Process):\n <mask token>\n\n def run(self):\n set_seed(self.seed)\n self.model = Policy_Network(self.args, self.word_num, self.\n entity_num, self.relation_num, self.keqa_checkpoint_path, self.\n gpu_id)\n self.model.load(self.reqa_checkpoint_path)\n self.model.cuda(self.gpu_id)\n self.model.eval()\n self.env.set_model(self.model)\n self.env.set_gpu_id(self.gpu_id)\n total_data_num = len(self.env.d_dataset)\n hits_1_num = 0\n with torch.no_grad():\n for example_id in tqdm(range(0, len(self.env.d_dataset), self.\n fix_batch_size), desc=self.name, position=self.worker_id):\n idx = range(example_id, example_id + self.fix_batch_size)\n self.env.reset(idx)\n self.batch_size = self.env.batch_size\n batch_hits1 = self.rollout()\n hits_1_num += batch_hits1\n hits_1_result = 1.0 * hits_1_num / total_data_num\n self.d_results['hits@1'] = hits_1_result\n\n def rollout(self):\n batch_question, batch_question_len, batch_head, batch_answers = (self\n .env.return_batch_data())\n if self.return_trace:\n l_search_trace = []\n l_log_action_probs = []\n batch_pred_vector = None\n if self.use_keqa_vector:\n batch_pred_vector = self.model.get_anticipated_entity_vector(\n batch_head, batch_question, batch_question_len, self.\n d_entity_neighours)\n log_action_prob = torch.zeros(self.batch_size).cuda(self.gpu_id)\n for t in range(self.max_hop):\n path_trace, path_hidden = self.env.observe()\n last_r, e_t = path_trace[-1]\n batch_path_hidden = path_hidden[-1][0][-1, :, :]\n k = int(e_t.size()[0] / self.batch_size)\n beam_question = batch_question.unsqueeze(1).repeat(1, k, 1).view(\n self.batch_size * k, -1)\n beam_question_len = batch_question_len.unsqueeze(1).repeat(1, k\n ).view(self.batch_size * k)\n beam_pred_vector = None\n if self.use_keqa_vector:\n beam_pred_vector = batch_pred_vector.unsqueeze(1).repeat(1,\n k, 1).view(self.batch_size * k, -1)\n db_outcomes, _, _, inv_offset = self.model.transit(t, e_t,\n beam_question, beam_question_len, batch_path_hidden, self.\n d_entity2bucketid, self.d_action_space_buckets, last_r, \n False, beam_pred_vector)\n db_action_spaces = [action_space for action_space, _ in db_outcomes\n ]\n db_action_dist = [action_dist for _, action_dist in db_outcomes]\n action_space = self.pad_and_cat_action_space(db_action_spaces,\n inv_offset)\n action_dist = self.pad_and_cat(db_action_dist, padding_value=0)[\n inv_offset]\n log_action_dist = log_action_prob.view(-1, 1) + safe_log(\n action_dist)\n if self.return_trace:\n print(t)\n print(last_r, e_t)\n print('----')\n print(action_space[0])\n print(F.softmax(log_action_dist.view(-1)).view(self.\n batch_size * k, -1))\n print('------------------------')\n if t == self.max_hop - 1:\n action, log_action_prob, action_offset = (self.\n top_k_answer_unique(log_action_dist, action_space))\n else:\n action, log_action_prob, action_offset = self.top_k_action(\n log_action_dist, action_space)\n path_list, (h_t, c_t) = self.model.update_path(action,\n path_hidden, offset=action_offset)\n self.env.step(action, path_list, (h_t, c_t))\n if self.return_trace:\n rearrange_vector_list(l_log_action_probs, action_offset)\n l_log_action_probs.append(log_action_prob)\n self.adjust_search_trace(l_search_trace, action_offset)\n l_search_trace.append(action)\n batch_pred_e2 = action[1].view(self.batch_size, -1)\n batch_pred_e2_top1 = batch_pred_e2[:, 0].view(self.batch_size, -1)\n batch_hits1 = torch.sum(torch.gather(batch_answers, 1,\n batch_pred_e2_top1).view(-1)).item()\n if self.return_trace:\n self.print_search_trace(batch_head, l_search_trace,\n l_log_action_probs)\n return batch_hits1\n\n def top_k_action(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n log_action_prob, action_ind = torch.topk(log_action_dist, k)\n next_r = torch.gather(r_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n next_e = torch.gather(e_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n log_action_prob = log_action_prob.view(-1)\n action_beam_offset = action_ind // action_space_size\n action_batch_offset = (torch.arange(self.batch_size).cuda(self.\n gpu_id) * last_k).unsqueeze(1)\n action_offset = (action_batch_offset + action_beam_offset).view(-1)\n return (next_r, next_e), log_action_prob, action_offset\n\n def top_k_answer_unique(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n r_space = r_space.view(self.batch_size, -1)\n e_space = e_space.view(self.batch_size, -1)\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n next_r_list, next_e_list = [], []\n log_action_prob_list = []\n action_offset_list = []\n for i in range(self.batch_size):\n log_action_dist_b = log_action_dist[i]\n r_space_b = r_space[i]\n e_space_b = e_space[i]\n unique_e_space_b = torch.unique(e_space_b.data.cpu()).cuda(self\n .gpu_id)\n unique_log_action_dist, unique_idx = self.unique_max(\n unique_e_space_b, e_space_b, log_action_dist_b)\n k_prime = min(len(unique_e_space_b), k)\n top_unique_log_action_dist, top_unique_idx2 = torch.topk(\n unique_log_action_dist, k_prime)\n top_unique_idx = unique_idx[top_unique_idx2]\n top_unique_beam_offset = top_unique_idx // action_space_size\n top_r = r_space_b[top_unique_idx]\n top_e = e_space_b[top_unique_idx]\n next_r_list.append(top_r.unsqueeze(0))\n next_e_list.append(top_e.unsqueeze(0))\n log_action_prob_list.append(top_unique_log_action_dist.unsqueeze(0)\n )\n top_unique_batch_offset = i * last_k\n top_unique_action_offset = (top_unique_batch_offset +\n top_unique_beam_offset)\n action_offset_list.append(top_unique_action_offset.unsqueeze(0))\n next_r = self.pad_and_cat(next_r_list, padding_value=0).view(-1)\n next_e = self.pad_and_cat(next_e_list, padding_value=0).view(-1)\n log_action_prob = self.pad_and_cat(log_action_prob_list,\n padding_value=-float('inf'))\n action_offset = self.pad_and_cat(action_offset_list, padding_value=-1)\n return (next_r, next_e), log_action_prob.view(-1), action_offset.view(\n -1)\n\n def sync_model(self):\n self.model.load_state_dict(self.shared_model.state_dict())\n\n def pad_and_cat_action_space(self, action_spaces, inv_offset):\n db_r_space, db_e_space, db_action_mask = [], [], []\n for (r_space, e_space), action_mask in action_spaces:\n db_r_space.append(r_space)\n db_e_space.append(e_space)\n db_action_mask.append(action_mask)\n r_space = self.pad_and_cat(db_r_space, padding_value=0)[inv_offset]\n e_space = self.pad_and_cat(db_e_space, padding_value=0)[inv_offset]\n action_mask = self.pad_and_cat(db_action_mask, padding_value=0)[\n inv_offset]\n action_space = (r_space, e_space), action_mask\n return action_space\n <mask token>\n\n def unique_max(self, unique_x, x, values, marker_2D=None):\n unique_interval = 100\n HUGE_INT = 1e+31\n unique_values, unique_indices = [], []\n for i in range(0, len(unique_x), unique_interval):\n unique_x_b = unique_x[i:i + unique_interval]\n marker_2D = (unique_x_b.unsqueeze(1) == x.unsqueeze(0)).float()\n values_2D = marker_2D * values.unsqueeze(0) - (1 - marker_2D\n ) * HUGE_INT\n unique_values_b, unique_idx_b = values_2D.max(dim=1)\n unique_values.append(unique_values_b)\n unique_indices.append(unique_idx_b)\n unique_values = torch.cat(unique_values).cuda(self.gpu_id)\n unique_idx = torch.cat(unique_indices).cuda(self.gpu_id)\n return unique_values, unique_idx\n\n def adjust_search_trace(self, search_trace, action_offset):\n for i, (r, e) in enumerate(search_trace):\n new_r = r[action_offset]\n new_e = e[action_offset]\n search_trace[i] = new_r, new_e\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestWorker(mp.Process):\n <mask token>\n\n def run(self):\n set_seed(self.seed)\n self.model = Policy_Network(self.args, self.word_num, self.\n entity_num, self.relation_num, self.keqa_checkpoint_path, self.\n gpu_id)\n self.model.load(self.reqa_checkpoint_path)\n self.model.cuda(self.gpu_id)\n self.model.eval()\n self.env.set_model(self.model)\n self.env.set_gpu_id(self.gpu_id)\n total_data_num = len(self.env.d_dataset)\n hits_1_num = 0\n with torch.no_grad():\n for example_id in tqdm(range(0, len(self.env.d_dataset), self.\n fix_batch_size), desc=self.name, position=self.worker_id):\n idx = range(example_id, example_id + self.fix_batch_size)\n self.env.reset(idx)\n self.batch_size = self.env.batch_size\n batch_hits1 = self.rollout()\n hits_1_num += batch_hits1\n hits_1_result = 1.0 * hits_1_num / total_data_num\n self.d_results['hits@1'] = hits_1_result\n\n def rollout(self):\n batch_question, batch_question_len, batch_head, batch_answers = (self\n .env.return_batch_data())\n if self.return_trace:\n l_search_trace = []\n l_log_action_probs = []\n batch_pred_vector = None\n if self.use_keqa_vector:\n batch_pred_vector = self.model.get_anticipated_entity_vector(\n batch_head, batch_question, batch_question_len, self.\n d_entity_neighours)\n log_action_prob = torch.zeros(self.batch_size).cuda(self.gpu_id)\n for t in range(self.max_hop):\n path_trace, path_hidden = self.env.observe()\n last_r, e_t = path_trace[-1]\n batch_path_hidden = path_hidden[-1][0][-1, :, :]\n k = int(e_t.size()[0] / self.batch_size)\n beam_question = batch_question.unsqueeze(1).repeat(1, k, 1).view(\n self.batch_size * k, -1)\n beam_question_len = batch_question_len.unsqueeze(1).repeat(1, k\n ).view(self.batch_size * k)\n beam_pred_vector = None\n if self.use_keqa_vector:\n beam_pred_vector = batch_pred_vector.unsqueeze(1).repeat(1,\n k, 1).view(self.batch_size * k, -1)\n db_outcomes, _, _, inv_offset = self.model.transit(t, e_t,\n beam_question, beam_question_len, batch_path_hidden, self.\n d_entity2bucketid, self.d_action_space_buckets, last_r, \n False, beam_pred_vector)\n db_action_spaces = [action_space for action_space, _ in db_outcomes\n ]\n db_action_dist = [action_dist for _, action_dist in db_outcomes]\n action_space = self.pad_and_cat_action_space(db_action_spaces,\n inv_offset)\n action_dist = self.pad_and_cat(db_action_dist, padding_value=0)[\n inv_offset]\n log_action_dist = log_action_prob.view(-1, 1) + safe_log(\n action_dist)\n if self.return_trace:\n print(t)\n print(last_r, e_t)\n print('----')\n print(action_space[0])\n print(F.softmax(log_action_dist.view(-1)).view(self.\n batch_size * k, -1))\n print('------------------------')\n if t == self.max_hop - 1:\n action, log_action_prob, action_offset = (self.\n top_k_answer_unique(log_action_dist, action_space))\n else:\n action, log_action_prob, action_offset = self.top_k_action(\n log_action_dist, action_space)\n path_list, (h_t, c_t) = self.model.update_path(action,\n path_hidden, offset=action_offset)\n self.env.step(action, path_list, (h_t, c_t))\n if self.return_trace:\n rearrange_vector_list(l_log_action_probs, action_offset)\n l_log_action_probs.append(log_action_prob)\n self.adjust_search_trace(l_search_trace, action_offset)\n l_search_trace.append(action)\n batch_pred_e2 = action[1].view(self.batch_size, -1)\n batch_pred_e2_top1 = batch_pred_e2[:, 0].view(self.batch_size, -1)\n batch_hits1 = torch.sum(torch.gather(batch_answers, 1,\n batch_pred_e2_top1).view(-1)).item()\n if self.return_trace:\n self.print_search_trace(batch_head, l_search_trace,\n l_log_action_probs)\n return batch_hits1\n\n def top_k_action(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n log_action_prob, action_ind = torch.topk(log_action_dist, k)\n next_r = torch.gather(r_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n next_e = torch.gather(e_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n log_action_prob = log_action_prob.view(-1)\n action_beam_offset = action_ind // action_space_size\n action_batch_offset = (torch.arange(self.batch_size).cuda(self.\n gpu_id) * last_k).unsqueeze(1)\n action_offset = (action_batch_offset + action_beam_offset).view(-1)\n return (next_r, next_e), log_action_prob, action_offset\n\n def top_k_answer_unique(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n r_space = r_space.view(self.batch_size, -1)\n e_space = e_space.view(self.batch_size, -1)\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n next_r_list, next_e_list = [], []\n log_action_prob_list = []\n action_offset_list = []\n for i in range(self.batch_size):\n log_action_dist_b = log_action_dist[i]\n r_space_b = r_space[i]\n e_space_b = e_space[i]\n unique_e_space_b = torch.unique(e_space_b.data.cpu()).cuda(self\n .gpu_id)\n unique_log_action_dist, unique_idx = self.unique_max(\n unique_e_space_b, e_space_b, log_action_dist_b)\n k_prime = min(len(unique_e_space_b), k)\n top_unique_log_action_dist, top_unique_idx2 = torch.topk(\n unique_log_action_dist, k_prime)\n top_unique_idx = unique_idx[top_unique_idx2]\n top_unique_beam_offset = top_unique_idx // action_space_size\n top_r = r_space_b[top_unique_idx]\n top_e = e_space_b[top_unique_idx]\n next_r_list.append(top_r.unsqueeze(0))\n next_e_list.append(top_e.unsqueeze(0))\n log_action_prob_list.append(top_unique_log_action_dist.unsqueeze(0)\n )\n top_unique_batch_offset = i * last_k\n top_unique_action_offset = (top_unique_batch_offset +\n top_unique_beam_offset)\n action_offset_list.append(top_unique_action_offset.unsqueeze(0))\n next_r = self.pad_and_cat(next_r_list, padding_value=0).view(-1)\n next_e = self.pad_and_cat(next_e_list, padding_value=0).view(-1)\n log_action_prob = self.pad_and_cat(log_action_prob_list,\n padding_value=-float('inf'))\n action_offset = self.pad_and_cat(action_offset_list, padding_value=-1)\n return (next_r, next_e), log_action_prob.view(-1), action_offset.view(\n -1)\n\n def sync_model(self):\n self.model.load_state_dict(self.shared_model.state_dict())\n\n def pad_and_cat_action_space(self, action_spaces, inv_offset):\n db_r_space, db_e_space, db_action_mask = [], [], []\n for (r_space, e_space), action_mask in action_spaces:\n db_r_space.append(r_space)\n db_e_space.append(e_space)\n db_action_mask.append(action_mask)\n r_space = self.pad_and_cat(db_r_space, padding_value=0)[inv_offset]\n e_space = self.pad_and_cat(db_e_space, padding_value=0)[inv_offset]\n action_mask = self.pad_and_cat(db_action_mask, padding_value=0)[\n inv_offset]\n action_space = (r_space, e_space), action_mask\n return action_space\n\n def pad_and_cat(self, a, padding_value, padding_dim=1):\n max_dim_size = max([x.size()[padding_dim] for x in a])\n padded_a = []\n for x in a:\n if x.size()[padding_dim] < max_dim_size:\n res_len = max_dim_size - x.size()[1]\n pad = nn.ConstantPad1d((0, res_len), padding_value)\n padded_a.append(pad(x))\n else:\n padded_a.append(x)\n return torch.cat(padded_a, dim=0).cuda(self.gpu_id)\n\n def unique_max(self, unique_x, x, values, marker_2D=None):\n unique_interval = 100\n HUGE_INT = 1e+31\n unique_values, unique_indices = [], []\n for i in range(0, len(unique_x), unique_interval):\n unique_x_b = unique_x[i:i + unique_interval]\n marker_2D = (unique_x_b.unsqueeze(1) == x.unsqueeze(0)).float()\n values_2D = marker_2D * values.unsqueeze(0) - (1 - marker_2D\n ) * HUGE_INT\n unique_values_b, unique_idx_b = values_2D.max(dim=1)\n unique_values.append(unique_values_b)\n unique_indices.append(unique_idx_b)\n unique_values = torch.cat(unique_values).cuda(self.gpu_id)\n unique_idx = torch.cat(unique_indices).cuda(self.gpu_id)\n return unique_values, unique_idx\n\n def adjust_search_trace(self, search_trace, action_offset):\n for i, (r, e) in enumerate(search_trace):\n new_r = r[action_offset]\n new_e = e[action_offset]\n search_trace[i] = new_r, new_e\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass TestWorker(mp.Process):\n\n def __init__(self, args, worker_id, env, d_entity_neighours,\n d_entity2bucketid, d_action_space_buckets, d_entity2id,\n d_relation2id, reqa_checkpoint_path, d_results, word_num,\n entity_num, relation_num, keqa_checkpoint_path, return_trace=False):\n super().__init__(name='test-worker-%02d' % worker_id)\n self.args = args\n self.seed = args.seed + worker_id\n self.fix_batch_size = args.batch_size\n self.use_keqa_vector = args.use_keqa_vector\n self.max_hop = args.max_hop\n self.beam_size = args.beam_size\n self.return_trace = return_trace\n self.d_entity_neighours = d_entity_neighours\n self.d_entity2bucketid = d_entity2bucketid\n self.d_action_space_buckets = d_action_space_buckets\n self.id2entity = index2word(d_entity2id)\n self.id2relation = index2word(d_relation2id)\n self.worker_id = worker_id\n self.gpu_id = self.worker_id % get_num_gpus()\n self.env = env\n self.d_results = d_results\n self.reqa_checkpoint_path = reqa_checkpoint_path\n self.word_num = word_num\n self.entity_num = entity_num\n self.relation_num = relation_num\n self.keqa_checkpoint_path = keqa_checkpoint_path\n\n def run(self):\n set_seed(self.seed)\n self.model = Policy_Network(self.args, self.word_num, self.\n entity_num, self.relation_num, self.keqa_checkpoint_path, self.\n gpu_id)\n self.model.load(self.reqa_checkpoint_path)\n self.model.cuda(self.gpu_id)\n self.model.eval()\n self.env.set_model(self.model)\n self.env.set_gpu_id(self.gpu_id)\n total_data_num = len(self.env.d_dataset)\n hits_1_num = 0\n with torch.no_grad():\n for example_id in tqdm(range(0, len(self.env.d_dataset), self.\n fix_batch_size), desc=self.name, position=self.worker_id):\n idx = range(example_id, example_id + self.fix_batch_size)\n self.env.reset(idx)\n self.batch_size = self.env.batch_size\n batch_hits1 = self.rollout()\n hits_1_num += batch_hits1\n hits_1_result = 1.0 * hits_1_num / total_data_num\n self.d_results['hits@1'] = hits_1_result\n\n def rollout(self):\n batch_question, batch_question_len, batch_head, batch_answers = (self\n .env.return_batch_data())\n if self.return_trace:\n l_search_trace = []\n l_log_action_probs = []\n batch_pred_vector = None\n if self.use_keqa_vector:\n batch_pred_vector = self.model.get_anticipated_entity_vector(\n batch_head, batch_question, batch_question_len, self.\n d_entity_neighours)\n log_action_prob = torch.zeros(self.batch_size).cuda(self.gpu_id)\n for t in range(self.max_hop):\n path_trace, path_hidden = self.env.observe()\n last_r, e_t = path_trace[-1]\n batch_path_hidden = path_hidden[-1][0][-1, :, :]\n k = int(e_t.size()[0] / self.batch_size)\n beam_question = batch_question.unsqueeze(1).repeat(1, k, 1).view(\n self.batch_size * k, -1)\n beam_question_len = batch_question_len.unsqueeze(1).repeat(1, k\n ).view(self.batch_size * k)\n beam_pred_vector = None\n if self.use_keqa_vector:\n beam_pred_vector = batch_pred_vector.unsqueeze(1).repeat(1,\n k, 1).view(self.batch_size * k, -1)\n db_outcomes, _, _, inv_offset = self.model.transit(t, e_t,\n beam_question, beam_question_len, batch_path_hidden, self.\n d_entity2bucketid, self.d_action_space_buckets, last_r, \n False, beam_pred_vector)\n db_action_spaces = [action_space for action_space, _ in db_outcomes\n ]\n db_action_dist = [action_dist for _, action_dist in db_outcomes]\n action_space = self.pad_and_cat_action_space(db_action_spaces,\n inv_offset)\n action_dist = self.pad_and_cat(db_action_dist, padding_value=0)[\n inv_offset]\n log_action_dist = log_action_prob.view(-1, 1) + safe_log(\n action_dist)\n if self.return_trace:\n print(t)\n print(last_r, e_t)\n print('----')\n print(action_space[0])\n print(F.softmax(log_action_dist.view(-1)).view(self.\n batch_size * k, -1))\n print('------------------------')\n if t == self.max_hop - 1:\n action, log_action_prob, action_offset = (self.\n top_k_answer_unique(log_action_dist, action_space))\n else:\n action, log_action_prob, action_offset = self.top_k_action(\n log_action_dist, action_space)\n path_list, (h_t, c_t) = self.model.update_path(action,\n path_hidden, offset=action_offset)\n self.env.step(action, path_list, (h_t, c_t))\n if self.return_trace:\n rearrange_vector_list(l_log_action_probs, action_offset)\n l_log_action_probs.append(log_action_prob)\n self.adjust_search_trace(l_search_trace, action_offset)\n l_search_trace.append(action)\n batch_pred_e2 = action[1].view(self.batch_size, -1)\n batch_pred_e2_top1 = batch_pred_e2[:, 0].view(self.batch_size, -1)\n batch_hits1 = torch.sum(torch.gather(batch_answers, 1,\n batch_pred_e2_top1).view(-1)).item()\n if self.return_trace:\n self.print_search_trace(batch_head, l_search_trace,\n l_log_action_probs)\n return batch_hits1\n\n def top_k_action(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n log_action_prob, action_ind = torch.topk(log_action_dist, k)\n next_r = torch.gather(r_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n next_e = torch.gather(e_space.view(self.batch_size, -1), 1, action_ind\n ).view(-1)\n log_action_prob = log_action_prob.view(-1)\n action_beam_offset = action_ind // action_space_size\n action_batch_offset = (torch.arange(self.batch_size).cuda(self.\n gpu_id) * last_k).unsqueeze(1)\n action_offset = (action_batch_offset + action_beam_offset).view(-1)\n return (next_r, next_e), log_action_prob, action_offset\n\n def top_k_answer_unique(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n r_space = r_space.view(self.batch_size, -1)\n e_space = e_space.view(self.batch_size, -1)\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size)\n next_r_list, next_e_list = [], []\n log_action_prob_list = []\n action_offset_list = []\n for i in range(self.batch_size):\n log_action_dist_b = log_action_dist[i]\n r_space_b = r_space[i]\n e_space_b = e_space[i]\n unique_e_space_b = torch.unique(e_space_b.data.cpu()).cuda(self\n .gpu_id)\n unique_log_action_dist, unique_idx = self.unique_max(\n unique_e_space_b, e_space_b, log_action_dist_b)\n k_prime = min(len(unique_e_space_b), k)\n top_unique_log_action_dist, top_unique_idx2 = torch.topk(\n unique_log_action_dist, k_prime)\n top_unique_idx = unique_idx[top_unique_idx2]\n top_unique_beam_offset = top_unique_idx // action_space_size\n top_r = r_space_b[top_unique_idx]\n top_e = e_space_b[top_unique_idx]\n next_r_list.append(top_r.unsqueeze(0))\n next_e_list.append(top_e.unsqueeze(0))\n log_action_prob_list.append(top_unique_log_action_dist.unsqueeze(0)\n )\n top_unique_batch_offset = i * last_k\n top_unique_action_offset = (top_unique_batch_offset +\n top_unique_beam_offset)\n action_offset_list.append(top_unique_action_offset.unsqueeze(0))\n next_r = self.pad_and_cat(next_r_list, padding_value=0).view(-1)\n next_e = self.pad_and_cat(next_e_list, padding_value=0).view(-1)\n log_action_prob = self.pad_and_cat(log_action_prob_list,\n padding_value=-float('inf'))\n action_offset = self.pad_and_cat(action_offset_list, padding_value=-1)\n return (next_r, next_e), log_action_prob.view(-1), action_offset.view(\n -1)\n\n def sync_model(self):\n self.model.load_state_dict(self.shared_model.state_dict())\n\n def pad_and_cat_action_space(self, action_spaces, inv_offset):\n db_r_space, db_e_space, db_action_mask = [], [], []\n for (r_space, e_space), action_mask in action_spaces:\n db_r_space.append(r_space)\n db_e_space.append(e_space)\n db_action_mask.append(action_mask)\n r_space = self.pad_and_cat(db_r_space, padding_value=0)[inv_offset]\n e_space = self.pad_and_cat(db_e_space, padding_value=0)[inv_offset]\n action_mask = self.pad_and_cat(db_action_mask, padding_value=0)[\n inv_offset]\n action_space = (r_space, e_space), action_mask\n return action_space\n\n def pad_and_cat(self, a, padding_value, padding_dim=1):\n max_dim_size = max([x.size()[padding_dim] for x in a])\n padded_a = []\n for x in a:\n if x.size()[padding_dim] < max_dim_size:\n res_len = max_dim_size - x.size()[1]\n pad = nn.ConstantPad1d((0, res_len), padding_value)\n padded_a.append(pad(x))\n else:\n padded_a.append(x)\n return torch.cat(padded_a, dim=0).cuda(self.gpu_id)\n\n def unique_max(self, unique_x, x, values, marker_2D=None):\n unique_interval = 100\n HUGE_INT = 1e+31\n unique_values, unique_indices = [], []\n for i in range(0, len(unique_x), unique_interval):\n unique_x_b = unique_x[i:i + unique_interval]\n marker_2D = (unique_x_b.unsqueeze(1) == x.unsqueeze(0)).float()\n values_2D = marker_2D * values.unsqueeze(0) - (1 - marker_2D\n ) * HUGE_INT\n unique_values_b, unique_idx_b = values_2D.max(dim=1)\n unique_values.append(unique_values_b)\n unique_indices.append(unique_idx_b)\n unique_values = torch.cat(unique_values).cuda(self.gpu_id)\n unique_idx = torch.cat(unique_indices).cuda(self.gpu_id)\n return unique_values, unique_idx\n\n def adjust_search_trace(self, search_trace, action_offset):\n for i, (r, e) in enumerate(search_trace):\n new_r = r[action_offset]\n new_e = e[action_offset]\n search_trace[i] = new_r, new_e\n\n def print_search_trace(self, batch_head, l_search_trace, l_log_action_probs\n ):\n for i in range(self.batch_size):\n top_k_edge_labels = []\n for k, log_action_prob in enumerate(l_log_action_probs):\n beam_size = len(log_action_prob)\n for j in range(beam_size):\n ind = i * beam_size + j\n r = self.id2relation[int(l_search_trace[k][0][ind])]\n e = self.id2entity[int(l_search_trace[k][1][ind])]\n if r.endswith('_inverse'):\n edge_label = '<-{}-{} {}'.format(r[:-8], e, float(\n log_action_prob[ind]))\n else:\n edge_label = '-{}->{} {}'.format(r, e, float(\n log_action_prob[ind]))\n if k == 0:\n edge_label = self.id2entity[int(batch_head[i])\n ] + edge_label\n top_k_edge_labels.append(edge_label)\n else:\n top_k_edge_labels[j] += edge_label\n for i, edge_label in enumerate(top_k_edge_labels):\n print(i, edge_label)\n print('*****************************')\n",
"step-5": "import torch\nimport torch.nn as nn\nfrom tqdm import tqdm\nimport torch.nn.functional as F\nimport torch.multiprocessing as mp\nfrom policy_network import Policy_Network\nfrom util import safe_log\nfrom util import index2word, rearrange_vector_list, get_num_gpus, set_seed\n\nclass TestWorker(mp.Process):\n def __init__(self, args, worker_id, env, d_entity_neighours, d_entity2bucketid, d_action_space_buckets, d_entity2id, d_relation2id, reqa_checkpoint_path, d_results, word_num, entity_num, relation_num, keqa_checkpoint_path, return_trace = False):\n super().__init__(name='test-worker-%02d' % (worker_id))\n self.args = args\n self.seed = args.seed + worker_id\n self.fix_batch_size = args.batch_size\n self.use_keqa_vector = args.use_keqa_vector\n self.max_hop = args.max_hop\n self.beam_size = args.beam_size\n self.return_trace = return_trace\n\n self.d_entity_neighours = d_entity_neighours\n self.d_entity2bucketid = d_entity2bucketid\n self.d_action_space_buckets = d_action_space_buckets\n self.id2entity = index2word(d_entity2id)\n self.id2relation = index2word(d_relation2id)\n self.worker_id = worker_id\n self.gpu_id = self.worker_id % get_num_gpus()\n self.env = env\n self.d_results = d_results\n self.reqa_checkpoint_path = reqa_checkpoint_path\n self.word_num = word_num \n self.entity_num = entity_num\n self.relation_num = relation_num\n self.keqa_checkpoint_path = keqa_checkpoint_path\n\n def run(self):\n set_seed(self.seed)\n self.model = Policy_Network(self.args, self.word_num, self.entity_num, self.relation_num, self.keqa_checkpoint_path, self.gpu_id)\n self.model.load(self.reqa_checkpoint_path)\n self.model.cuda(self.gpu_id)\n self.model.eval()\n self.env.set_model(self.model)\n self.env.set_gpu_id(self.gpu_id)\n total_data_num = len(self.env.d_dataset)\n\n hits_1_num = 0\n with torch.no_grad():\n for example_id in tqdm(range(0, len(self.env.d_dataset), self.fix_batch_size), desc=self.name, position=self.worker_id): \n idx = range(example_id, example_id + self.fix_batch_size) \n \n self.env.reset(idx)\n self.batch_size = self.env.batch_size \n \n batch_hits1 = self.rollout() \n hits_1_num += batch_hits1\n\n \n hits_1_result = 1.0 * hits_1_num / total_data_num\n self.d_results['hits@1'] = hits_1_result\n\n def rollout(self): \n batch_question, batch_question_len, batch_head, batch_answers = self.env.return_batch_data()\n\n if self.return_trace:\n l_search_trace = []\n l_log_action_probs = []\n\n batch_pred_vector = None\n if self.use_keqa_vector:\n batch_pred_vector = self.model.get_anticipated_entity_vector(batch_head, batch_question, batch_question_len, self.d_entity_neighours)\n \n log_action_prob = torch.zeros(self.batch_size).cuda(self.gpu_id)\n for t in range(self.max_hop):\n path_trace, path_hidden = self.env.observe()\n last_r, e_t = path_trace[-1] \n \n batch_path_hidden = path_hidden[-1][0][-1, :, :]\n \n k = int(e_t.size()[0] / self.batch_size) \n\n beam_question = batch_question.unsqueeze(1).repeat(1, k, 1).view(self.batch_size * k, -1) \n beam_question_len = batch_question_len.unsqueeze(1).repeat(1, k).view(self.batch_size * k) \n \n beam_pred_vector = None\n if self.use_keqa_vector:\n beam_pred_vector = batch_pred_vector.unsqueeze(1).repeat(1, k, 1).view(self.batch_size * k, -1) \n \n db_outcomes, _, _, inv_offset = self.model.transit(t, e_t, beam_question, beam_question_len, batch_path_hidden, self.d_entity2bucketid, self.d_action_space_buckets, last_r, False, beam_pred_vector) \n db_action_spaces = [action_space for action_space, _ in db_outcomes]\n db_action_dist = [action_dist for _, action_dist in db_outcomes]\n \n action_space = self.pad_and_cat_action_space(db_action_spaces, inv_offset) \n action_dist = self.pad_and_cat(db_action_dist, padding_value=0)[inv_offset]\n\n log_action_dist = log_action_prob.view(-1, 1) + safe_log(action_dist) \n\n if self.return_trace:\n print(t)\n print(last_r, e_t)\n print(\"----\")\n print(action_space[0])\n print(F.softmax(log_action_dist.view(-1)).view(self.batch_size * k, -1))\n print(\"------------------------\")\n\n if t == self.max_hop - 1:\n action, log_action_prob, action_offset = self.top_k_answer_unique(log_action_dist, action_space)\n else:\n action, log_action_prob, action_offset = self.top_k_action(log_action_dist, action_space)\n \n path_list, (h_t, c_t) = self.model.update_path(action, path_hidden, offset = action_offset) \n self.env.step(action, path_list, (h_t, c_t))\n\n if self.return_trace:\n rearrange_vector_list(l_log_action_probs, action_offset)\n l_log_action_probs.append(log_action_prob) \n self.adjust_search_trace(l_search_trace, action_offset)\n l_search_trace.append(action)\n \n batch_pred_e2 = action[1].view(self.batch_size, -1) \n batch_pred_e2_top1 = batch_pred_e2[:, 0].view(self.batch_size, -1)\n \n batch_hits1 = torch.sum(torch.gather(batch_answers, 1, batch_pred_e2_top1).view(-1)).item()\n\n if self.return_trace:\n self.print_search_trace(batch_head, l_search_trace, l_log_action_probs)\n \n return batch_hits1\n\n\n def top_k_action(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n\n log_action_dist = log_action_dist.view(self.batch_size, -1) \n beam_action_space_size = log_action_dist.size()[1]\n k = min(self.beam_size, beam_action_space_size) \n\n log_action_prob, action_ind = torch.topk(log_action_dist, k) \n next_r = torch.gather(r_space.view(self.batch_size, -1), 1, action_ind).view(-1) \n next_e = torch.gather(e_space.view(self.batch_size, -1), 1, action_ind).view(-1) \n log_action_prob = log_action_prob.view(-1) \n action_beam_offset = action_ind // action_space_size \n action_batch_offset = (torch.arange(self.batch_size).cuda(self.gpu_id) * last_k).unsqueeze(1) \n action_offset = (action_batch_offset + action_beam_offset).view(-1) \n\n return (next_r, next_e), log_action_prob, action_offset \n \n def top_k_answer_unique(self, log_action_dist, action_space):\n full_size = len(log_action_dist)\n last_k = int(full_size / self.batch_size)\n (r_space, e_space), _ = action_space\n action_space_size = r_space.size()[1]\n\n r_space = r_space.view(self.batch_size, -1) \n e_space = e_space.view(self.batch_size, -1)\n log_action_dist = log_action_dist.view(self.batch_size, -1)\n beam_action_space_size = log_action_dist.size()[1]\n \n k = min(self.beam_size, beam_action_space_size)\n next_r_list, next_e_list = [], []\n log_action_prob_list = []\n action_offset_list = []\n\n for i in range(self.batch_size):\n log_action_dist_b = log_action_dist[i]\n r_space_b = r_space[i]\n e_space_b = e_space[i]\n unique_e_space_b = torch.unique(e_space_b.data.cpu()).cuda(self.gpu_id) \n unique_log_action_dist, unique_idx = self.unique_max(unique_e_space_b, e_space_b, log_action_dist_b) \n k_prime = min(len(unique_e_space_b), k)\n top_unique_log_action_dist, top_unique_idx2 = torch.topk(unique_log_action_dist, k_prime)\n top_unique_idx = unique_idx[top_unique_idx2]\n top_unique_beam_offset = top_unique_idx // action_space_size\n top_r = r_space_b[top_unique_idx]\n top_e = e_space_b[top_unique_idx]\n next_r_list.append(top_r.unsqueeze(0))\n next_e_list.append(top_e.unsqueeze(0))\n log_action_prob_list.append(top_unique_log_action_dist.unsqueeze(0))\n top_unique_batch_offset = i * last_k\n top_unique_action_offset = top_unique_batch_offset + top_unique_beam_offset\n action_offset_list.append(top_unique_action_offset.unsqueeze(0))\n next_r = self.pad_and_cat(next_r_list, padding_value=0).view(-1)\n next_e = self.pad_and_cat(next_e_list, padding_value=0).view(-1)\n log_action_prob = self.pad_and_cat(log_action_prob_list, padding_value = -float(\"inf\"))\n action_offset = self.pad_and_cat(action_offset_list, padding_value=-1)\n return (next_r, next_e), log_action_prob.view(-1), action_offset.view(-1)\n\n def sync_model(self):\n self.model.load_state_dict(self.shared_model.state_dict())\n \n def pad_and_cat_action_space(self, action_spaces, inv_offset):\n db_r_space, db_e_space, db_action_mask = [], [], []\n for (r_space, e_space), action_mask in action_spaces:\n db_r_space.append(r_space)\n db_e_space.append(e_space)\n db_action_mask.append(action_mask)\n r_space = self.pad_and_cat(db_r_space, padding_value=0)[inv_offset]\n e_space = self.pad_and_cat(db_e_space, padding_value=0)[inv_offset]\n action_mask = self.pad_and_cat(db_action_mask, padding_value=0)[inv_offset]\n action_space = ((r_space, e_space), action_mask)\n return action_space\n \n def pad_and_cat(self, a, padding_value, padding_dim=1):\n max_dim_size = max([x.size()[padding_dim] for x in a])\n padded_a = []\n for x in a:\n if x.size()[padding_dim] < max_dim_size:\n res_len = max_dim_size - x.size()[1]\n pad = nn.ConstantPad1d((0, res_len), padding_value)\n padded_a.append(pad(x))\n else:\n padded_a.append(x)\n return torch.cat(padded_a, dim=0).cuda(self.gpu_id)\n \n\n def unique_max(self, unique_x, x, values, marker_2D=None):\n unique_interval = 100\n HUGE_INT = 1e31\n\n unique_values, unique_indices = [], []\n for i in range(0, len(unique_x), unique_interval):\n unique_x_b = unique_x[i:i+unique_interval]\n marker_2D = (unique_x_b.unsqueeze(1) == x.unsqueeze(0)).float() \n values_2D = marker_2D * values.unsqueeze(0) - (1 - marker_2D) * HUGE_INT \n unique_values_b, unique_idx_b = values_2D.max(dim=1) \n unique_values.append(unique_values_b)\n unique_indices.append(unique_idx_b)\n unique_values = torch.cat(unique_values).cuda(self.gpu_id)\n unique_idx = torch.cat(unique_indices).cuda(self.gpu_id)\n return unique_values, unique_idx\n \n def adjust_search_trace(self, search_trace, action_offset):\n for i, (r, e) in enumerate(search_trace):\n new_r = r[action_offset]\n new_e = e[action_offset]\n search_trace[i] = (new_r, new_e)\n \n def print_search_trace(self, batch_head, l_search_trace, l_log_action_probs):\n for i in range(self.batch_size):\n top_k_edge_labels = []\n for k, log_action_prob in enumerate(l_log_action_probs):\n beam_size = len(log_action_prob)\n for j in range(beam_size): \n ind = i * beam_size + j\n r = self.id2relation[int(l_search_trace[k][0][ind])]\n e = self.id2entity[int(l_search_trace[k][1][ind])]\n if r.endswith('_inverse'):\n edge_label = '<-{}-{} {}'.format(r[:-8], e, float(log_action_prob[ind]))\n else:\n edge_label = '-{}->{} {}'.format(r, e, float(log_action_prob[ind]))\n \n if k == 0:\n edge_label = self.id2entity[int(batch_head[i])] + edge_label\n top_k_edge_labels.append(edge_label) \n else:\n top_k_edge_labels[j] += edge_label \n \n for i, edge_label in enumerate(top_k_edge_labels):\n print(i, edge_label)\n print(\"*****************************\")\n\n\n",
"step-ids": [
6,
9,
10,
12,
14
]
}
|
[
6,
9,
10,
12,
14
] |
def html_print(text, title=''):
from IPython.core.display import display, HTML
# create title for the content
display(HTML("<h4>" + str(title) + "</h4>"))
# create content
html = display(HTML("<font size=2 face=Verdana>" + text + "</font>"))
return html
|
normal
|
{
"blob_id": "84a63f60a45f1f8fc1efec8f30345a43c3c30c63",
"index": 7332,
"step-1": "<mask token>\n",
"step-2": "def html_print(text, title=''):\n from IPython.core.display import display, HTML\n display(HTML('<h4>' + str(title) + '</h4>'))\n html = display(HTML('<font size=2 face=Verdana>' + text + '</font>'))\n return html\n",
"step-3": "def html_print(text, title=''):\n\n from IPython.core.display import display, HTML\n\n # create title for the content\n display(HTML(\"<h4>\" + str(title) + \"</h4>\"))\n\n # create content\n html = display(HTML(\"<font size=2 face=Verdana>\" + text + \"</font>\"))\n\n return html\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
import torch
import torch.nn as nn
from utils import *
from collections import OrderedDict
from torchsummary import summary
class Model(nn.Module):
"""Example usage:
model = Model()
outputs = model(pov_tensor, feat_tensor)
"""
def __init__(self):
super(Model, self).__init__()
# Convolutional network architecture
self.image_embed = nn.Sequential(
nn.BatchNorm2d(3),
nn.Conv2d(3, 16, 5, stride=2),
nn.MaxPool2d(2, 2),
nn.LeakyReLU(True),
nn.BatchNorm2d(16),
nn.Conv2d(16, 24, 3),
nn.MaxPool2d(2, 2),
nn.LeakyReLU(True),
nn.BatchNorm2d(24),
nn.Conv2d(24, 24, 3),
nn.MaxPool2d(2, 2),
nn.LeakyReLU(True),
nn.BatchNorm2d(24),
nn.Flatten(),
nn.Linear(96, 50),
)
# Regularization layer
self.l1 = nn.Linear(50 + 2, 50)
self.r1 = nn.LeakyReLU()
self.out = nn.Linear(50, 11)
"""Model to approximate Q values.
Input
-----
pov: (batch_size, 3, 64, 64) tensor of player view
input_size: (batch_size, 2)
Returns
-------
action: (batch_size, 9) tensor with indicies:
0: attack probability
1-5: CAMERA_OPTIONS[0-4]
6: forward probability
7: jump probability
8: place probability
"""
def forward(self, pov, feats):
pov = self.image_embed(pov)
full_embed = self.l1(torch.cat((pov, feats), dim=1))
full_embed = self.r1(full_embed)
out = self.out(full_embed)
return out
|
normal
|
{
"blob_id": "981cfecdb50b5f3ae326bf3103163f6e814ccc95",
"index": 6857,
"step-1": "<mask token>\n\n\nclass Model(nn.Module):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Model(nn.Module):\n <mask token>\n <mask token>\n <mask token>\n\n def forward(self, pov, feats):\n pov = self.image_embed(pov)\n full_embed = self.l1(torch.cat((pov, feats), dim=1))\n full_embed = self.r1(full_embed)\n out = self.out(full_embed)\n return out\n",
"step-3": "<mask token>\n\n\nclass Model(nn.Module):\n \"\"\"Example usage:\n\n model = Model()\n outputs = model(pov_tensor, feat_tensor)\n \"\"\"\n\n def __init__(self):\n super(Model, self).__init__()\n self.image_embed = nn.Sequential(nn.BatchNorm2d(3), nn.Conv2d(3, 16,\n 5, stride=2), nn.MaxPool2d(2, 2), nn.LeakyReLU(True), nn.\n BatchNorm2d(16), nn.Conv2d(16, 24, 3), nn.MaxPool2d(2, 2), nn.\n LeakyReLU(True), nn.BatchNorm2d(24), nn.Conv2d(24, 24, 3), nn.\n MaxPool2d(2, 2), nn.LeakyReLU(True), nn.BatchNorm2d(24), nn.\n Flatten(), nn.Linear(96, 50))\n self.l1 = nn.Linear(50 + 2, 50)\n self.r1 = nn.LeakyReLU()\n self.out = nn.Linear(50, 11)\n \"\"\"Model to approximate Q values.\n\n Input\n -----\n pov: (batch_size, 3, 64, 64) tensor of player view\n input_size: (batch_size, 2)\n\n Returns\n -------\n action: (batch_size, 9) tensor with indicies:\n 0: attack probability\n 1-5: CAMERA_OPTIONS[0-4]\n 6: forward probability\n 7: jump probability\n 8: place probability\n\n \"\"\"\n\n def forward(self, pov, feats):\n pov = self.image_embed(pov)\n full_embed = self.l1(torch.cat((pov, feats), dim=1))\n full_embed = self.r1(full_embed)\n out = self.out(full_embed)\n return out\n",
"step-4": "import numpy as np\nimport torch\nimport torch.nn as nn\nfrom utils import *\nfrom collections import OrderedDict\nfrom torchsummary import summary\n\n\nclass Model(nn.Module):\n \"\"\"Example usage:\n\n model = Model()\n outputs = model(pov_tensor, feat_tensor)\n \"\"\"\n\n def __init__(self):\n super(Model, self).__init__()\n self.image_embed = nn.Sequential(nn.BatchNorm2d(3), nn.Conv2d(3, 16,\n 5, stride=2), nn.MaxPool2d(2, 2), nn.LeakyReLU(True), nn.\n BatchNorm2d(16), nn.Conv2d(16, 24, 3), nn.MaxPool2d(2, 2), nn.\n LeakyReLU(True), nn.BatchNorm2d(24), nn.Conv2d(24, 24, 3), nn.\n MaxPool2d(2, 2), nn.LeakyReLU(True), nn.BatchNorm2d(24), nn.\n Flatten(), nn.Linear(96, 50))\n self.l1 = nn.Linear(50 + 2, 50)\n self.r1 = nn.LeakyReLU()\n self.out = nn.Linear(50, 11)\n \"\"\"Model to approximate Q values.\n\n Input\n -----\n pov: (batch_size, 3, 64, 64) tensor of player view\n input_size: (batch_size, 2)\n\n Returns\n -------\n action: (batch_size, 9) tensor with indicies:\n 0: attack probability\n 1-5: CAMERA_OPTIONS[0-4]\n 6: forward probability\n 7: jump probability\n 8: place probability\n\n \"\"\"\n\n def forward(self, pov, feats):\n pov = self.image_embed(pov)\n full_embed = self.l1(torch.cat((pov, feats), dim=1))\n full_embed = self.r1(full_embed)\n out = self.out(full_embed)\n return out\n",
"step-5": "import numpy as np\nimport torch\nimport torch.nn as nn\nfrom utils import *\nfrom collections import OrderedDict\nfrom torchsummary import summary\n\n\nclass Model(nn.Module):\n \"\"\"Example usage:\n\n model = Model()\n outputs = model(pov_tensor, feat_tensor)\n \"\"\"\n def __init__(self):\n super(Model, self).__init__()\n # Convolutional network architecture\n self.image_embed = nn.Sequential(\n nn.BatchNorm2d(3),\n nn.Conv2d(3, 16, 5, stride=2),\n nn.MaxPool2d(2, 2),\n nn.LeakyReLU(True),\n nn.BatchNorm2d(16),\n nn.Conv2d(16, 24, 3),\n nn.MaxPool2d(2, 2),\n nn.LeakyReLU(True),\n nn.BatchNorm2d(24),\n nn.Conv2d(24, 24, 3),\n nn.MaxPool2d(2, 2),\n nn.LeakyReLU(True),\n nn.BatchNorm2d(24),\n nn.Flatten(),\n nn.Linear(96, 50),\n )\n # Regularization layer\n self.l1 = nn.Linear(50 + 2, 50)\n self.r1 = nn.LeakyReLU()\n self.out = nn.Linear(50, 11)\n\n \"\"\"Model to approximate Q values.\n\n Input\n -----\n pov: (batch_size, 3, 64, 64) tensor of player view\n input_size: (batch_size, 2)\n\n Returns\n -------\n action: (batch_size, 9) tensor with indicies:\n 0: attack probability\n 1-5: CAMERA_OPTIONS[0-4]\n 6: forward probability\n 7: jump probability\n 8: place probability\n\n \"\"\"\n def forward(self, pov, feats):\n pov = self.image_embed(pov)\n full_embed = self.l1(torch.cat((pov, feats), dim=1))\n full_embed = self.r1(full_embed)\n out = self.out(full_embed)\n return out\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
from django.db.models import Count
from django.utils.text import slugify
from rest_framework.serializers import ModelSerializer, SerializerMethodField, Serializer
from rest_framework import serializers
from category.models import Category
from product.models import Product, GalleryProduct, Stone, Color, Size
from category.api.serializers import CategorySerializer
from extensions.calculations import calculating_gold_jewelry
from taggit_serializer.serializers import (
TagListSerializerField,
TaggitSerializer
)
def _create_custom_uuid():
max_id = 1
ex_last_product = Product.objects.last()
if ex_last_product:
max_id = ex_last_product.id
my_id = '{}{:07d}'.format('EUA', max_id if max_id is not None else 1)
return my_id
class ColorSerializer(ModelSerializer):
class Meta:
model = Color
fields = ['id', 'color']
class SizeSerializer(ModelSerializer):
class Meta:
model = Size
fields = ['id', 'size']
class StoneSerilizer(ModelSerializer):
class Meta:
model = Stone
fields = '__all__'
class ImageCreateProductSerializer(serializers.Serializer):
class Meta:
model = GalleryProduct
fields = ['image']
class ProductListSerializer(serializers.ModelSerializer):
gallery = serializers.SerializerMethodField()
category = serializers.SerializerMethodField()
price = serializers.SerializerMethodField()
class Meta:
model = Product
fields = [
'id',
'rating',
'title',
'slug',
'image',
'gallery',
'category',
'price'
]
def get_category(self, obj):
result = obj.category
return CategorySerializer(instance=result).data
def get_gallery(self, obj):
result = GalleryProduct.objects.filter(product_id=obj)
return ImageProductSerializer(instance=result, many=True).data
def get_price(self, obj):
return obj.price
class ProductsOrderCartSerializer(ModelSerializer):
class Meta:
model = Product
fields = ['id', 'title', 'slug', 'image']
class ProductDetailSerializer(TaggitSerializer, ModelSerializer):
tags = TagListSerializerField()
gallery = SerializerMethodField()
color = SerializerMethodField()
size = SerializerMethodField()
category = SerializerMethodField()
price = serializers.SerializerMethodField()
class Meta:
model = Product
exclude = [
'site_rate',
'is_rate_fixed',
'provider_gold_rate',
'provider_diamond_price',
]
def get_color(self, obj):
result = obj.color.all()
return ColorSerializer(instance=result, many=True).data
def get_size(self, obj):
result = obj.size.all()
return SizeSerializer(instance=result, many=True).data
def get_category(self, obj):
return CategorySerializer(instance=obj.category).data
def get_gallery(self, obj):
result = GalleryProduct.objects.filter(product_id=obj)
return ImageProductSerializer(instance=result, many=True).data
def get_price(self, obj):
return obj.price
class ImageProductSerializer(ModelSerializer):
class Meta:
model = GalleryProduct
fields = ['image', 'product']
|
normal
|
{
"blob_id": "8be6031caad26ec6b6b99b8d8b8f80d16ad243d4",
"index": 7706,
"step-1": "<mask token>\n\n\nclass ProductsOrderCartSerializer(ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = ['id', 'title', 'slug', 'image']\n\n\nclass ProductDetailSerializer(TaggitSerializer, ModelSerializer):\n tags = TagListSerializerField()\n gallery = SerializerMethodField()\n color = SerializerMethodField()\n size = SerializerMethodField()\n category = SerializerMethodField()\n price = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Product\n exclude = ['site_rate', 'is_rate_fixed', 'provider_gold_rate',\n 'provider_diamond_price']\n\n def get_color(self, obj):\n result = obj.color.all()\n return ColorSerializer(instance=result, many=True).data\n\n def get_size(self, obj):\n result = obj.size.all()\n return SizeSerializer(instance=result, many=True).data\n\n def get_category(self, obj):\n return CategorySerializer(instance=obj.category).data\n\n def get_gallery(self, obj):\n result = GalleryProduct.objects.filter(product_id=obj)\n return ImageProductSerializer(instance=result, many=True).data\n\n def get_price(self, obj):\n return obj.price\n\n\nclass ImageProductSerializer(ModelSerializer):\n\n\n class Meta:\n model = GalleryProduct\n fields = ['image', 'product']\n",
"step-2": "<mask token>\n\n\nclass ImageCreateProductSerializer(serializers.Serializer):\n\n\n class Meta:\n model = GalleryProduct\n fields = ['image']\n\n\nclass ProductListSerializer(serializers.ModelSerializer):\n gallery = serializers.SerializerMethodField()\n category = serializers.SerializerMethodField()\n price = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Product\n fields = ['id', 'rating', 'title', 'slug', 'image', 'gallery',\n 'category', 'price']\n\n def get_category(self, obj):\n result = obj.category\n return CategorySerializer(instance=result).data\n\n def get_gallery(self, obj):\n result = GalleryProduct.objects.filter(product_id=obj)\n return ImageProductSerializer(instance=result, many=True).data\n\n def get_price(self, obj):\n return obj.price\n\n\nclass ProductsOrderCartSerializer(ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = ['id', 'title', 'slug', 'image']\n\n\nclass ProductDetailSerializer(TaggitSerializer, ModelSerializer):\n tags = TagListSerializerField()\n gallery = SerializerMethodField()\n color = SerializerMethodField()\n size = SerializerMethodField()\n category = SerializerMethodField()\n price = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Product\n exclude = ['site_rate', 'is_rate_fixed', 'provider_gold_rate',\n 'provider_diamond_price']\n\n def get_color(self, obj):\n result = obj.color.all()\n return ColorSerializer(instance=result, many=True).data\n\n def get_size(self, obj):\n result = obj.size.all()\n return SizeSerializer(instance=result, many=True).data\n\n def get_category(self, obj):\n return CategorySerializer(instance=obj.category).data\n\n def get_gallery(self, obj):\n result = GalleryProduct.objects.filter(product_id=obj)\n return ImageProductSerializer(instance=result, many=True).data\n\n def get_price(self, obj):\n return obj.price\n\n\nclass ImageProductSerializer(ModelSerializer):\n\n\n class Meta:\n model = GalleryProduct\n fields = ['image', 'product']\n",
"step-3": "<mask token>\n\n\nclass StoneSerilizer(ModelSerializer):\n\n\n class Meta:\n model = Stone\n fields = '__all__'\n\n\nclass ImageCreateProductSerializer(serializers.Serializer):\n\n\n class Meta:\n model = GalleryProduct\n fields = ['image']\n\n\nclass ProductListSerializer(serializers.ModelSerializer):\n gallery = serializers.SerializerMethodField()\n category = serializers.SerializerMethodField()\n price = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Product\n fields = ['id', 'rating', 'title', 'slug', 'image', 'gallery',\n 'category', 'price']\n\n def get_category(self, obj):\n result = obj.category\n return CategorySerializer(instance=result).data\n\n def get_gallery(self, obj):\n result = GalleryProduct.objects.filter(product_id=obj)\n return ImageProductSerializer(instance=result, many=True).data\n\n def get_price(self, obj):\n return obj.price\n\n\nclass ProductsOrderCartSerializer(ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = ['id', 'title', 'slug', 'image']\n\n\nclass ProductDetailSerializer(TaggitSerializer, ModelSerializer):\n tags = TagListSerializerField()\n gallery = SerializerMethodField()\n color = SerializerMethodField()\n size = SerializerMethodField()\n category = SerializerMethodField()\n price = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Product\n exclude = ['site_rate', 'is_rate_fixed', 'provider_gold_rate',\n 'provider_diamond_price']\n\n def get_color(self, obj):\n result = obj.color.all()\n return ColorSerializer(instance=result, many=True).data\n\n def get_size(self, obj):\n result = obj.size.all()\n return SizeSerializer(instance=result, many=True).data\n\n def get_category(self, obj):\n return CategorySerializer(instance=obj.category).data\n\n def get_gallery(self, obj):\n result = GalleryProduct.objects.filter(product_id=obj)\n return ImageProductSerializer(instance=result, many=True).data\n\n def get_price(self, obj):\n return obj.price\n\n\nclass ImageProductSerializer(ModelSerializer):\n\n\n class Meta:\n model = GalleryProduct\n fields = ['image', 'product']\n",
"step-4": "<mask token>\n\n\nclass SizeSerializer(ModelSerializer):\n\n\n class Meta:\n model = Size\n fields = ['id', 'size']\n\n\nclass StoneSerilizer(ModelSerializer):\n\n\n class Meta:\n model = Stone\n fields = '__all__'\n\n\nclass ImageCreateProductSerializer(serializers.Serializer):\n\n\n class Meta:\n model = GalleryProduct\n fields = ['image']\n\n\nclass ProductListSerializer(serializers.ModelSerializer):\n gallery = serializers.SerializerMethodField()\n category = serializers.SerializerMethodField()\n price = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Product\n fields = ['id', 'rating', 'title', 'slug', 'image', 'gallery',\n 'category', 'price']\n\n def get_category(self, obj):\n result = obj.category\n return CategorySerializer(instance=result).data\n\n def get_gallery(self, obj):\n result = GalleryProduct.objects.filter(product_id=obj)\n return ImageProductSerializer(instance=result, many=True).data\n\n def get_price(self, obj):\n return obj.price\n\n\nclass ProductsOrderCartSerializer(ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = ['id', 'title', 'slug', 'image']\n\n\nclass ProductDetailSerializer(TaggitSerializer, ModelSerializer):\n tags = TagListSerializerField()\n gallery = SerializerMethodField()\n color = SerializerMethodField()\n size = SerializerMethodField()\n category = SerializerMethodField()\n price = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Product\n exclude = ['site_rate', 'is_rate_fixed', 'provider_gold_rate',\n 'provider_diamond_price']\n\n def get_color(self, obj):\n result = obj.color.all()\n return ColorSerializer(instance=result, many=True).data\n\n def get_size(self, obj):\n result = obj.size.all()\n return SizeSerializer(instance=result, many=True).data\n\n def get_category(self, obj):\n return CategorySerializer(instance=obj.category).data\n\n def get_gallery(self, obj):\n result = GalleryProduct.objects.filter(product_id=obj)\n return ImageProductSerializer(instance=result, many=True).data\n\n def get_price(self, obj):\n return obj.price\n\n\nclass ImageProductSerializer(ModelSerializer):\n\n\n class Meta:\n model = GalleryProduct\n fields = ['image', 'product']\n",
"step-5": "from django.db.models import Count\r\nfrom django.utils.text import slugify\r\n\r\nfrom rest_framework.serializers import ModelSerializer, SerializerMethodField, Serializer\r\nfrom rest_framework import serializers\r\n\r\nfrom category.models import Category\r\nfrom product.models import Product, GalleryProduct, Stone, Color, Size\r\nfrom category.api.serializers import CategorySerializer\r\nfrom extensions.calculations import calculating_gold_jewelry\r\nfrom taggit_serializer.serializers import (\r\n\tTagListSerializerField,\r\n\tTaggitSerializer\r\n\t)\r\n\r\n\r\ndef _create_custom_uuid():\r\n\tmax_id = 1\r\n\tex_last_product = Product.objects.last()\r\n\tif ex_last_product:\r\n\t\tmax_id = ex_last_product.id\r\n\r\n\tmy_id = '{}{:07d}'.format('EUA', max_id if max_id is not None else 1)\r\n\treturn my_id\r\n\r\n\r\nclass ColorSerializer(ModelSerializer):\r\n\tclass Meta:\r\n\t\tmodel = Color\r\n\t\tfields = ['id', 'color']\r\n\r\n\r\nclass SizeSerializer(ModelSerializer):\r\n\tclass Meta:\r\n\t\tmodel = Size\r\n\t\tfields = ['id', 'size']\r\n\r\n\r\nclass StoneSerilizer(ModelSerializer):\r\n\tclass Meta:\r\n\t\tmodel = Stone\r\n\t\tfields = '__all__'\r\n\t\t\r\n\r\nclass ImageCreateProductSerializer(serializers.Serializer):\r\n\tclass Meta:\r\n\t\tmodel = GalleryProduct\r\n\t\tfields = ['image']\r\n\t\r\n\r\nclass ProductListSerializer(serializers.ModelSerializer):\r\n\tgallery = serializers.SerializerMethodField()\r\n\tcategory = serializers.SerializerMethodField()\r\n\tprice = serializers.SerializerMethodField()\r\n\r\n\tclass Meta:\r\n\t\tmodel = Product\r\n\t\tfields = [\r\n\t\t\t'id',\r\n\t\t\t'rating',\r\n\t\t\t'title',\r\n\t\t\t'slug',\r\n\t\t\t'image',\r\n\t\t\t'gallery',\r\n\t\t\t'category',\r\n\t\t\t'price'\r\n\t\t]\r\n\r\n\tdef get_category(self, obj):\r\n\t\tresult = obj.category\r\n\t\treturn CategorySerializer(instance=result).data\r\n\r\n\tdef get_gallery(self, obj):\r\n\t\tresult = GalleryProduct.objects.filter(product_id=obj)\r\n\t\treturn ImageProductSerializer(instance=result, many=True).data\r\n\r\n\tdef get_price(self, obj):\r\n\t\treturn obj.price\r\n\r\n\r\nclass ProductsOrderCartSerializer(ModelSerializer):\r\n\r\n\tclass Meta:\r\n\t\tmodel = Product\r\n\t\tfields = ['id', 'title', 'slug', 'image']\r\n\r\n\r\nclass ProductDetailSerializer(TaggitSerializer, ModelSerializer):\r\n\ttags = TagListSerializerField()\r\n\tgallery = SerializerMethodField()\r\n\tcolor = SerializerMethodField()\r\n\tsize = SerializerMethodField()\r\n\tcategory = SerializerMethodField()\r\n\tprice = serializers.SerializerMethodField()\r\n\r\n\tclass Meta:\r\n\t\tmodel = Product\r\n\t\texclude = [\r\n\t\t\t'site_rate',\r\n\t\t\t'is_rate_fixed',\r\n\t\t\t'provider_gold_rate',\r\n\t\t\t'provider_diamond_price',\r\n\t\t]\r\n\r\n\tdef get_color(self, obj):\r\n\t\tresult = obj.color.all()\r\n\t\treturn ColorSerializer(instance=result, many=True).data\r\n\r\n\tdef get_size(self, obj):\r\n\t\tresult = obj.size.all()\r\n\t\treturn SizeSerializer(instance=result, many=True).data\r\n\r\n\tdef get_category(self, obj):\r\n\t\treturn CategorySerializer(instance=obj.category).data\r\n\r\n\tdef get_gallery(self, obj):\r\n\t\tresult = GalleryProduct.objects.filter(product_id=obj)\r\n\t\treturn ImageProductSerializer(instance=result, many=True).data\r\n\r\n\tdef get_price(self, obj):\r\n\t\treturn obj.price\r\n\r\n\r\nclass ImageProductSerializer(ModelSerializer):\r\n\tclass Meta:\r\n\t\tmodel = GalleryProduct\r\n\t\tfields = ['image', 'product']\r\n",
"step-ids": [
9,
15,
16,
17,
21
]
}
|
[
9,
15,
16,
17,
21
] |
import os
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "day66.settings")
import django
django.setup()
from applistions.models import MyClass,Student,Teacher,Employee
from django.db.models import Avg, Sum, Max, Min, Count
# 1.求所有人里面工资最高的
ret = Employee.objects.all().aggregate(Max('salary'))
print(ret) # {'salary__max': 80909}
# # 指定返回字典中key的值
ret = Employee.objects.all().aggregate(max_salary=Max('salary'))
print(ret) # {'max_salary': 80909}
# # 求所有人的平均价格
ret = Employee.objects.all().aggregate(Avg('salary'))
print(ret) # {'salary__avg': 20855.1667}
# 使用ORM查询每个部门的平均工资
ret = Employee.objects.values('dept').aggregate(Avg('salary'))
print(ret) # 查询的是每个人的平均工资,此条查询错误
# annotate中要写上分住之后要做的事情
# anntate前面查询的是什么就按什么分组
ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list('dept','salary__avg')
print(ret) # <QuerySet [('财务部', 2111.0), ('技术部', 17000.0), ('人事部', 6000.0), ('管理部', 80909.0)]>
# # ORM中分组使用annotate
# # 1. annotate中要写上分组之后要做的事情
# # 2. annotate前面查询的是什么就按什么分组
# ret = Employee.objects.values('dept').annotate(avg_price=Avg('salary')).values('dept', 'avg_price')
# print(ret)
#
# # 每个部门的平均年龄
ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')).values_list('dept','avg_age')
print(ret) # <QuerySet [('财务部', 27.5), ('技术部', 300.0), ('人事部', 45.0), ('管理部', 45.0)]>
# # 求每个班级的学生的数量
ret = Student.objects.values('myclass').annotate(s_count=Count('id'))
print(ret) # <QuerySet [{'myclass': 1, 's_count': 1}, {'myclass': 2, 's_count': 3}, {'myclass': 3, 's_count': 2}, {'myclass': 4, 's_count': 1}, {'myclass': 5, 's_count': 1}, {'myclass': 6, 's_count': 1}, {'myclass': 7, 's_count': 1}]>
|
normal
|
{
"blob_id": "ee72262fb29b46784fb357269dd5160192968c1b",
"index": 1713,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'day66.settings')\n import django\n django.setup()\n from applistions.models import MyClass, Student, Teacher, Employee\n from django.db.models import Avg, Sum, Max, Min, Count\n ret = Employee.objects.all().aggregate(Max('salary'))\n print(ret)\n ret = Employee.objects.all().aggregate(max_salary=Max('salary'))\n print(ret)\n ret = Employee.objects.all().aggregate(Avg('salary'))\n print(ret)\n ret = Employee.objects.values('dept').aggregate(Avg('salary'))\n print(ret)\n ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list(\n 'dept', 'salary__avg')\n print(ret)\n ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')\n ).values_list('dept', 'avg_age')\n print(ret)\n ret = Student.objects.values('myclass').annotate(s_count=Count('id'))\n print(ret)\n",
"step-3": "import os\nif __name__ == '__main__':\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'day66.settings')\n import django\n django.setup()\n from applistions.models import MyClass, Student, Teacher, Employee\n from django.db.models import Avg, Sum, Max, Min, Count\n ret = Employee.objects.all().aggregate(Max('salary'))\n print(ret)\n ret = Employee.objects.all().aggregate(max_salary=Max('salary'))\n print(ret)\n ret = Employee.objects.all().aggregate(Avg('salary'))\n print(ret)\n ret = Employee.objects.values('dept').aggregate(Avg('salary'))\n print(ret)\n ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list(\n 'dept', 'salary__avg')\n print(ret)\n ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')\n ).values_list('dept', 'avg_age')\n print(ret)\n ret = Student.objects.values('myclass').annotate(s_count=Count('id'))\n print(ret)\n",
"step-4": "import os\n\nif __name__ == \"__main__\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"day66.settings\")\n\n import django\n django.setup()\n\n from applistions.models import MyClass,Student,Teacher,Employee\n from django.db.models import Avg, Sum, Max, Min, Count\n\n # 1.求所有人里面工资最高的\n ret = Employee.objects.all().aggregate(Max('salary'))\n print(ret) # {'salary__max': 80909}\n\n # # 指定返回字典中key的值\n ret = Employee.objects.all().aggregate(max_salary=Max('salary'))\n print(ret) # {'max_salary': 80909}\n\n # # 求所有人的平均价格\n ret = Employee.objects.all().aggregate(Avg('salary'))\n print(ret) # {'salary__avg': 20855.1667}\n\n # 使用ORM查询每个部门的平均工资\n ret = Employee.objects.values('dept').aggregate(Avg('salary'))\n print(ret) # 查询的是每个人的平均工资,此条查询错误\n # annotate中要写上分住之后要做的事情\n # anntate前面查询的是什么就按什么分组\n ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list('dept','salary__avg')\n print(ret) # <QuerySet [('财务部', 2111.0), ('技术部', 17000.0), ('人事部', 6000.0), ('管理部', 80909.0)]>\n\n # # ORM中分组使用annotate\n # # 1. annotate中要写上分组之后要做的事情\n # # 2. annotate前面查询的是什么就按什么分组\n # ret = Employee.objects.values('dept').annotate(avg_price=Avg('salary')).values('dept', 'avg_price')\n # print(ret)\n #\n # # 每个部门的平均年龄\n ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')).values_list('dept','avg_age')\n print(ret) # <QuerySet [('财务部', 27.5), ('技术部', 300.0), ('人事部', 45.0), ('管理部', 45.0)]>\n\n # # 求每个班级的学生的数量\n ret = Student.objects.values('myclass').annotate(s_count=Count('id'))\n print(ret) # <QuerySet [{'myclass': 1, 's_count': 1}, {'myclass': 2, 's_count': 3}, {'myclass': 3, 's_count': 2}, {'myclass': 4, 's_count': 1}, {'myclass': 5, 's_count': 1}, {'myclass': 6, 's_count': 1}, {'myclass': 7, 's_count': 1}]>\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
from django.http import HttpResponse
try:
import simplejson as json
except ImportError:
import json
from api import *
def index(request):
data = parse_signed_request(request)
if not data.has_key('user_id'):
request_url = oauth_request_url()
return HttpResponse("<script>top.location.href='%s';</script>" % request_url)
return HttpResponse("Welcome %s" % data['user_id'])
|
normal
|
{
"blob_id": "17f76c2b53b36c81cea7f7616859f5257790cd73",
"index": 9298,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n data = parse_signed_request(request)\n if not data.has_key('user_id'):\n request_url = oauth_request_url()\n return HttpResponse(\"<script>top.location.href='%s';</script>\" %\n request_url)\n return HttpResponse('Welcome %s' % data['user_id'])\n",
"step-3": "<mask token>\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n<mask token>\n\n\ndef index(request):\n data = parse_signed_request(request)\n if not data.has_key('user_id'):\n request_url = oauth_request_url()\n return HttpResponse(\"<script>top.location.href='%s';</script>\" %\n request_url)\n return HttpResponse('Welcome %s' % data['user_id'])\n",
"step-4": "from django.http import HttpResponse\ntry:\n import simplejson as json\nexcept ImportError:\n import json\nfrom api import *\n\n\ndef index(request):\n data = parse_signed_request(request)\n if not data.has_key('user_id'):\n request_url = oauth_request_url()\n return HttpResponse(\"<script>top.location.href='%s';</script>\" %\n request_url)\n return HttpResponse('Welcome %s' % data['user_id'])\n",
"step-5": "#!/usr/bin/env python\r\nfrom django.http import HttpResponse\r\n\r\ntry:\r\n import simplejson as json\r\nexcept ImportError:\r\n import json\r\n \r\nfrom api import *\r\n\r\ndef index(request):\r\n data = parse_signed_request(request)\r\n\r\n if not data.has_key('user_id'):\r\n request_url = oauth_request_url()\r\n\r\n return HttpResponse(\"<script>top.location.href='%s';</script>\" % request_url)\r\n\r\n return HttpResponse(\"Welcome %s\" % data['user_id'])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import matplotlib.pyplot as plt
x_list = []
y_list = []
file1 = open("pos_data_x.txt", "r")
for line in file1:
#x_list.append(float(file1.readline(line)))
x_list.append(float(line))
file2 = open("pos_data_y.txt", "r")
for line in file2:
#y_list.append(float(file1.readline(line)))
y_list.append(float(line))
file2.close
file1.close
desired_x = [0.0, 0.5, 0.5]
desired_y = [0.0, 0.0, 0.5]
desired_pos_x_list = [1.0, 1.0, 0.0, 0.0] #[0.5, 0.5, 0.0, 0.0]
desired_pos_y_list = [0.0, 0.7, 0.7, 0.0] #[0.0, 0.5, 0.5, 0.0]
plt.plot(x_list, y_list, label = 'robot trajectory')
#plt.plot(desired_x, desired_y, marker = 'x', label = 'desired position')
plt.plot(desired_pos_x_list, desired_pos_y_list, marker = 'x', label = 'desired position')
plt.title("Robot trajectory based on the wheel encoders ")
plt.xlabel("x [m]")
plt.ylabel("y [m]")
#plt.axis("square")
plt.legend()
plt.show()
|
normal
|
{
"blob_id": "d869aa32cb9793ce11a5b6a782cc66c2dd0be309",
"index": 6176,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in file1:\n x_list.append(float(line))\n<mask token>\nfor line in file2:\n y_list.append(float(line))\nfile2.close\nfile1.close\n<mask token>\nplt.plot(x_list, y_list, label='robot trajectory')\nplt.plot(desired_pos_x_list, desired_pos_y_list, marker='x', label=\n 'desired position')\nplt.title('Robot trajectory based on the wheel encoders ')\nplt.xlabel('x [m]')\nplt.ylabel('y [m]')\nplt.legend()\nplt.show()\n",
"step-3": "<mask token>\nx_list = []\ny_list = []\nfile1 = open('pos_data_x.txt', 'r')\nfor line in file1:\n x_list.append(float(line))\nfile2 = open('pos_data_y.txt', 'r')\nfor line in file2:\n y_list.append(float(line))\nfile2.close\nfile1.close\ndesired_x = [0.0, 0.5, 0.5]\ndesired_y = [0.0, 0.0, 0.5]\ndesired_pos_x_list = [1.0, 1.0, 0.0, 0.0]\ndesired_pos_y_list = [0.0, 0.7, 0.7, 0.0]\nplt.plot(x_list, y_list, label='robot trajectory')\nplt.plot(desired_pos_x_list, desired_pos_y_list, marker='x', label=\n 'desired position')\nplt.title('Robot trajectory based on the wheel encoders ')\nplt.xlabel('x [m]')\nplt.ylabel('y [m]')\nplt.legend()\nplt.show()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nx_list = []\ny_list = []\nfile1 = open('pos_data_x.txt', 'r')\nfor line in file1:\n x_list.append(float(line))\nfile2 = open('pos_data_y.txt', 'r')\nfor line in file2:\n y_list.append(float(line))\nfile2.close\nfile1.close\ndesired_x = [0.0, 0.5, 0.5]\ndesired_y = [0.0, 0.0, 0.5]\ndesired_pos_x_list = [1.0, 1.0, 0.0, 0.0]\ndesired_pos_y_list = [0.0, 0.7, 0.7, 0.0]\nplt.plot(x_list, y_list, label='robot trajectory')\nplt.plot(desired_pos_x_list, desired_pos_y_list, marker='x', label=\n 'desired position')\nplt.title('Robot trajectory based on the wheel encoders ')\nplt.xlabel('x [m]')\nplt.ylabel('y [m]')\nplt.legend()\nplt.show()\n",
"step-5": "import numpy as np\n\nimport matplotlib.pyplot as plt\n\nx_list = []\ny_list = []\n\nfile1 = open(\"pos_data_x.txt\", \"r\")\nfor line in file1:\n\t#x_list.append(float(file1.readline(line)))\n\tx_list.append(float(line))\n\t\nfile2 = open(\"pos_data_y.txt\", \"r\")\nfor line in file2:\n\t#y_list.append(float(file1.readline(line)))\n\ty_list.append(float(line))\n\t\n\nfile2.close\nfile1.close\n\ndesired_x = [0.0, 0.5, 0.5]\ndesired_y = [0.0, 0.0, 0.5]\n\ndesired_pos_x_list = [1.0, 1.0, 0.0, 0.0] #[0.5, 0.5, 0.0, 0.0]\ndesired_pos_y_list = [0.0, 0.7, 0.7, 0.0] #[0.0, 0.5, 0.5, 0.0]\n\nplt.plot(x_list, y_list, label = 'robot trajectory')\n#plt.plot(desired_x, desired_y, marker = 'x', label = 'desired position')\nplt.plot(desired_pos_x_list, desired_pos_y_list, marker = 'x', label = 'desired position')\nplt.title(\"Robot trajectory based on the wheel encoders \")\nplt.xlabel(\"x [m]\")\nplt.ylabel(\"y [m]\")\n#plt.axis(\"square\")\nplt.legend()\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django_evolution.mutations import ChangeField
MUTATIONS = [
ChangeField('ReviewRequest', 'depends_on', initial=None, null=False),
ChangeField('ReviewRequestDraft', 'depends_on', initial=None, null=False),
]
|
normal
|
{
"blob_id": "286953e381d03c0817d57f9ee4e15f2a0ce808a9",
"index": 9776,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nMUTATIONS = [ChangeField('ReviewRequest', 'depends_on', initial=None, null=\n False), ChangeField('ReviewRequestDraft', 'depends_on', initial=None,\n null=False)]\n",
"step-3": "from django_evolution.mutations import ChangeField\nMUTATIONS = [ChangeField('ReviewRequest', 'depends_on', initial=None, null=\n False), ChangeField('ReviewRequestDraft', 'depends_on', initial=None,\n null=False)]\n",
"step-4": "from django_evolution.mutations import ChangeField\n\n\nMUTATIONS = [\n ChangeField('ReviewRequest', 'depends_on', initial=None, null=False),\n ChangeField('ReviewRequestDraft', 'depends_on', initial=None, null=False),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from functools import reduce
from collections import defaultdict
def memory(count: int, start_numbers: list):
numbers = defaultdict(lambda: tuple(2 * [None]), { el: (idx,None ) for idx,el in enumerate(start_numbers) })
last = start_numbers[-1]
for idx in range(len(numbers), count):
last = 0 if None in numbers[last] else reduce(lambda a,b:a-b, numbers[last])
numbers[last] = ( idx, numbers[last][0] )
print(f"For starting numbers: {start_numbers}, the {count}th number is: {last}")
[ memory(count, [8,0,17,4,1,12]) for count in [ 2020, 30000000 ] ]
|
normal
|
{
"blob_id": "0f0adde7241898d2efe7e2b5cc218e42ed7b73d8",
"index": 5475,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef memory(count: int, start_numbers: list):\n numbers = defaultdict(lambda : tuple(2 * [None]), {el: (idx, None) for \n idx, el in enumerate(start_numbers)})\n last = start_numbers[-1]\n for idx in range(len(numbers), count):\n last = 0 if None in numbers[last] else reduce(lambda a, b: a - b,\n numbers[last])\n numbers[last] = idx, numbers[last][0]\n print(\n f'For starting numbers: {start_numbers}, the {count}th number is: {last}'\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef memory(count: int, start_numbers: list):\n numbers = defaultdict(lambda : tuple(2 * [None]), {el: (idx, None) for \n idx, el in enumerate(start_numbers)})\n last = start_numbers[-1]\n for idx in range(len(numbers), count):\n last = 0 if None in numbers[last] else reduce(lambda a, b: a - b,\n numbers[last])\n numbers[last] = idx, numbers[last][0]\n print(\n f'For starting numbers: {start_numbers}, the {count}th number is: {last}'\n )\n\n\n[memory(count, [8, 0, 17, 4, 1, 12]) for count in [2020, 30000000]]\n",
"step-4": "from functools import reduce\nfrom collections import defaultdict\n\n\ndef memory(count: int, start_numbers: list):\n numbers = defaultdict(lambda : tuple(2 * [None]), {el: (idx, None) for \n idx, el in enumerate(start_numbers)})\n last = start_numbers[-1]\n for idx in range(len(numbers), count):\n last = 0 if None in numbers[last] else reduce(lambda a, b: a - b,\n numbers[last])\n numbers[last] = idx, numbers[last][0]\n print(\n f'For starting numbers: {start_numbers}, the {count}th number is: {last}'\n )\n\n\n[memory(count, [8, 0, 17, 4, 1, 12]) for count in [2020, 30000000]]\n",
"step-5": "from functools import reduce\nfrom collections import defaultdict\n\ndef memory(count: int, start_numbers: list):\n numbers = defaultdict(lambda: tuple(2 * [None]), { el: (idx,None ) for idx,el in enumerate(start_numbers) })\n last = start_numbers[-1]\n for idx in range(len(numbers), count):\n last = 0 if None in numbers[last] else reduce(lambda a,b:a-b, numbers[last])\n numbers[last] = ( idx, numbers[last][0] )\n print(f\"For starting numbers: {start_numbers}, the {count}th number is: {last}\")\n[ memory(count, [8,0,17,4,1,12]) for count in [ 2020, 30000000 ] ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# 导入包
import matplotlib.pyplot as plt
import numpy as np
# 显示中文和显示负号
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# X轴和Y轴数据,票房单位亿
a = ["战狼2","速度与激情8","功夫瑜伽","西游伏妖篇","变形金刚5:最后的骑士","摔跤吧!爸爸","加勒比海盗5:死无对证","金刚:骷髅岛","极限特工:终极回归","生化危机6:终章","乘风破浪","神偷奶爸3","智取威虎山","大闹天竺","金刚狼3:殊死一战","蜘蛛侠:英雄归来","悟空传","银河护卫队2","情圣","新木乃伊",]
b = [56.01,26.94,17.53,16.49,15.45,12.96,11.8,11.61,11.28,11.12,10.49,10.3,8.75,7.55,7.32,6.99,6.88,6.86,6.58,6.23]
# 设置图形的大小
plt.figure(figsize=(20, 8), dpi=128)
# 绘制横置条形图,x轴参数是一个可迭代对象,一般为列表
# 竖直条形图,用的是width设置宽度
plt.barh(a, b, height=0.5, color='red')
# 设置图片,X轴,Y轴标题
plt.title("2018年电影票房纪录", fontsize=24)
plt.xlabel("票房(亿元)", fontsize=14)
# 设置坐标轴刻度,刻度间隔,range不能设置步长
my_x_ticks = np.arange(0, 61, 5)
plt.xticks(my_x_ticks)
# 设置网格
plt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)
# 显示图形
plt.show()
|
normal
|
{
"blob_id": "16d86c48c45ab0441046e968ea364d27f6dcfd12",
"index": 3066,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.figure(figsize=(20, 8), dpi=128)\nplt.barh(a, b, height=0.5, color='red')\nplt.title('2018年电影票房纪录', fontsize=24)\nplt.xlabel('票房(亿元)', fontsize=14)\n<mask token>\nplt.xticks(my_x_ticks)\nplt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)\nplt.show()\n",
"step-3": "<mask token>\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\na = ['战狼2', '速度与激情8', '功夫瑜伽', '西游伏妖篇', '变形金刚5:最后的骑士', '摔跤吧!爸爸',\n '加勒比海盗5:死无对证', '金刚:骷髅岛', '极限特工:终极回归', '生化危机6:终章', '乘风破浪', '神偷奶爸3',\n '智取威虎山', '大闹天竺', '金刚狼3:殊死一战', '蜘蛛侠:英雄归来', '悟空传', '银河护卫队2', '情圣', '新木乃伊']\nb = [56.01, 26.94, 17.53, 16.49, 15.45, 12.96, 11.8, 11.61, 11.28, 11.12, \n 10.49, 10.3, 8.75, 7.55, 7.32, 6.99, 6.88, 6.86, 6.58, 6.23]\nplt.figure(figsize=(20, 8), dpi=128)\nplt.barh(a, b, height=0.5, color='red')\nplt.title('2018年电影票房纪录', fontsize=24)\nplt.xlabel('票房(亿元)', fontsize=14)\nmy_x_ticks = np.arange(0, 61, 5)\nplt.xticks(my_x_ticks)\nplt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\na = ['战狼2', '速度与激情8', '功夫瑜伽', '西游伏妖篇', '变形金刚5:最后的骑士', '摔跤吧!爸爸',\n '加勒比海盗5:死无对证', '金刚:骷髅岛', '极限特工:终极回归', '生化危机6:终章', '乘风破浪', '神偷奶爸3',\n '智取威虎山', '大闹天竺', '金刚狼3:殊死一战', '蜘蛛侠:英雄归来', '悟空传', '银河护卫队2', '情圣', '新木乃伊']\nb = [56.01, 26.94, 17.53, 16.49, 15.45, 12.96, 11.8, 11.61, 11.28, 11.12, \n 10.49, 10.3, 8.75, 7.55, 7.32, 6.99, 6.88, 6.86, 6.58, 6.23]\nplt.figure(figsize=(20, 8), dpi=128)\nplt.barh(a, b, height=0.5, color='red')\nplt.title('2018年电影票房纪录', fontsize=24)\nplt.xlabel('票房(亿元)', fontsize=14)\nmy_x_ticks = np.arange(0, 61, 5)\nplt.xticks(my_x_ticks)\nplt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)\nplt.show()\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# 导入包\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# 显示中文和显示负号\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\n# X轴和Y轴数据,票房单位亿\na = [\"战狼2\",\"速度与激情8\",\"功夫瑜伽\",\"西游伏妖篇\",\"变形金刚5:最后的骑士\",\"摔跤吧!爸爸\",\"加勒比海盗5:死无对证\",\"金刚:骷髅岛\",\"极限特工:终极回归\",\"生化危机6:终章\",\"乘风破浪\",\"神偷奶爸3\",\"智取威虎山\",\"大闹天竺\",\"金刚狼3:殊死一战\",\"蜘蛛侠:英雄归来\",\"悟空传\",\"银河护卫队2\",\"情圣\",\"新木乃伊\",]\nb = [56.01,26.94,17.53,16.49,15.45,12.96,11.8,11.61,11.28,11.12,10.49,10.3,8.75,7.55,7.32,6.99,6.88,6.86,6.58,6.23]\n\n# 设置图形的大小\nplt.figure(figsize=(20, 8), dpi=128)\n\n# 绘制横置条形图,x轴参数是一个可迭代对象,一般为列表\n# 竖直条形图,用的是width设置宽度\nplt.barh(a, b, height=0.5, color='red')\n\n# 设置图片,X轴,Y轴标题\nplt.title(\"2018年电影票房纪录\", fontsize=24)\nplt.xlabel(\"票房(亿元)\", fontsize=14)\n\n# 设置坐标轴刻度,刻度间隔,range不能设置步长\nmy_x_ticks = np.arange(0, 61, 5)\nplt.xticks(my_x_ticks)\n\n# 设置网格\nplt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)\n\n# 显示图形\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding=utf-8
"""Advent of Code 2018, Day 7"""
import networkx
import re
G = networkx.DiGraph()
with open("puzzle_input") as f:
for line in f.read().split("\n"):
match = re.search("Step (?P<pre>[A-Z]).*step (?P<post>[A-Z])", line)
G.add_edge(match.group("pre"), match.group("post"))
def part_one():
"""Solution to Part 1"""
return "".join(networkx.lexicographical_topological_sort(G))
def part_two():
"""Solution to Part 2"""
tasks = {}
current_time = 0
while G.nodes():
# noinspection PyCallingNonCallable
candidate_next_tasks = [task for task in G.nodes()
if task not in tasks.keys() and G.in_degree(task) == 0]
if candidate_next_tasks and len(tasks) < 5:
next_task = sorted(candidate_next_tasks)[0]
tasks[next_task] = ord(next_task) - 4
else:
min_task_time = min(tasks.values())
current_time += min_task_time
completed_task = dict(zip(tasks.values(), tasks.keys()))[min_task_time]
tasks = {k: v - min_task_time for k, v in tasks.items() if k != completed_task}
G.remove_node(completed_task)
return current_time
|
normal
|
{
"blob_id": "1c5884c10ac0b6a3335f8e677007fc52311245e2",
"index": 7603,
"step-1": "<mask token>\n\n\ndef part_one():\n \"\"\"Solution to Part 1\"\"\"\n return ''.join(networkx.lexicographical_topological_sort(G))\n\n\ndef part_two():\n \"\"\"Solution to Part 2\"\"\"\n tasks = {}\n current_time = 0\n while G.nodes():\n candidate_next_tasks = [task for task in G.nodes() if task not in\n tasks.keys() and G.in_degree(task) == 0]\n if candidate_next_tasks and len(tasks) < 5:\n next_task = sorted(candidate_next_tasks)[0]\n tasks[next_task] = ord(next_task) - 4\n else:\n min_task_time = min(tasks.values())\n current_time += min_task_time\n completed_task = dict(zip(tasks.values(), tasks.keys()))[\n min_task_time]\n tasks = {k: (v - min_task_time) for k, v in tasks.items() if k !=\n completed_task}\n G.remove_node(completed_task)\n return current_time\n",
"step-2": "<mask token>\nwith open('puzzle_input') as f:\n for line in f.read().split('\\n'):\n match = re.search('Step (?P<pre>[A-Z]).*step (?P<post>[A-Z])', line)\n G.add_edge(match.group('pre'), match.group('post'))\n\n\ndef part_one():\n \"\"\"Solution to Part 1\"\"\"\n return ''.join(networkx.lexicographical_topological_sort(G))\n\n\ndef part_two():\n \"\"\"Solution to Part 2\"\"\"\n tasks = {}\n current_time = 0\n while G.nodes():\n candidate_next_tasks = [task for task in G.nodes() if task not in\n tasks.keys() and G.in_degree(task) == 0]\n if candidate_next_tasks and len(tasks) < 5:\n next_task = sorted(candidate_next_tasks)[0]\n tasks[next_task] = ord(next_task) - 4\n else:\n min_task_time = min(tasks.values())\n current_time += min_task_time\n completed_task = dict(zip(tasks.values(), tasks.keys()))[\n min_task_time]\n tasks = {k: (v - min_task_time) for k, v in tasks.items() if k !=\n completed_task}\n G.remove_node(completed_task)\n return current_time\n",
"step-3": "<mask token>\nG = networkx.DiGraph()\nwith open('puzzle_input') as f:\n for line in f.read().split('\\n'):\n match = re.search('Step (?P<pre>[A-Z]).*step (?P<post>[A-Z])', line)\n G.add_edge(match.group('pre'), match.group('post'))\n\n\ndef part_one():\n \"\"\"Solution to Part 1\"\"\"\n return ''.join(networkx.lexicographical_topological_sort(G))\n\n\ndef part_two():\n \"\"\"Solution to Part 2\"\"\"\n tasks = {}\n current_time = 0\n while G.nodes():\n candidate_next_tasks = [task for task in G.nodes() if task not in\n tasks.keys() and G.in_degree(task) == 0]\n if candidate_next_tasks and len(tasks) < 5:\n next_task = sorted(candidate_next_tasks)[0]\n tasks[next_task] = ord(next_task) - 4\n else:\n min_task_time = min(tasks.values())\n current_time += min_task_time\n completed_task = dict(zip(tasks.values(), tasks.keys()))[\n min_task_time]\n tasks = {k: (v - min_task_time) for k, v in tasks.items() if k !=\n completed_task}\n G.remove_node(completed_task)\n return current_time\n",
"step-4": "<mask token>\nimport networkx\nimport re\nG = networkx.DiGraph()\nwith open('puzzle_input') as f:\n for line in f.read().split('\\n'):\n match = re.search('Step (?P<pre>[A-Z]).*step (?P<post>[A-Z])', line)\n G.add_edge(match.group('pre'), match.group('post'))\n\n\ndef part_one():\n \"\"\"Solution to Part 1\"\"\"\n return ''.join(networkx.lexicographical_topological_sort(G))\n\n\ndef part_two():\n \"\"\"Solution to Part 2\"\"\"\n tasks = {}\n current_time = 0\n while G.nodes():\n candidate_next_tasks = [task for task in G.nodes() if task not in\n tasks.keys() and G.in_degree(task) == 0]\n if candidate_next_tasks and len(tasks) < 5:\n next_task = sorted(candidate_next_tasks)[0]\n tasks[next_task] = ord(next_task) - 4\n else:\n min_task_time = min(tasks.values())\n current_time += min_task_time\n completed_task = dict(zip(tasks.values(), tasks.keys()))[\n min_task_time]\n tasks = {k: (v - min_task_time) for k, v in tasks.items() if k !=\n completed_task}\n G.remove_node(completed_task)\n return current_time\n",
"step-5": "# coding=utf-8\n\"\"\"Advent of Code 2018, Day 7\"\"\"\n\nimport networkx\nimport re\n\nG = networkx.DiGraph()\nwith open(\"puzzle_input\") as f:\n for line in f.read().split(\"\\n\"):\n match = re.search(\"Step (?P<pre>[A-Z]).*step (?P<post>[A-Z])\", line)\n G.add_edge(match.group(\"pre\"), match.group(\"post\"))\n\n\ndef part_one():\n \"\"\"Solution to Part 1\"\"\"\n return \"\".join(networkx.lexicographical_topological_sort(G))\n\n\ndef part_two():\n \"\"\"Solution to Part 2\"\"\"\n tasks = {}\n current_time = 0\n while G.nodes():\n # noinspection PyCallingNonCallable\n candidate_next_tasks = [task for task in G.nodes()\n if task not in tasks.keys() and G.in_degree(task) == 0]\n if candidate_next_tasks and len(tasks) < 5:\n next_task = sorted(candidate_next_tasks)[0]\n tasks[next_task] = ord(next_task) - 4\n else:\n min_task_time = min(tasks.values())\n current_time += min_task_time\n completed_task = dict(zip(tasks.values(), tasks.keys()))[min_task_time]\n tasks = {k: v - min_task_time for k, v in tasks.items() if k != completed_task}\n G.remove_node(completed_task)\n return current_time\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
'''
Created on 5 Mar 2010
@author: oppianmatt
'''
# hook to find setup tools if not installed
try:
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
pass
from setuptools import setup, find_packages
setup(
name = "django-defaultsite",
version = "1.1",
packages = find_packages('src'),
package_dir = {'': 'src'},
package_data={'': ['LICENSE']},
include_package_data=True,
zip_safe=False,
# metadata for upload to PyPI
author = "Oppian System Ltd",
author_email = "matt@oppian.com",
description = "django-defaultsiteSets the Site object in django to something better then example.com.",
license = 'LICENSE.txt',
keywords = "django site example.com",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
url = "http://oppian.com/labs/django-defaultsite/",
long_description=open('README.txt').read(),
)
|
normal
|
{
"blob_id": "5580e5942370c925b759b09675306cdfbc7dd4f1",
"index": 3633,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n from ez_setup import use_setuptools\n use_setuptools()\nexcept ImportError:\n pass\n<mask token>\nsetup(name='django-defaultsite', version='1.1', packages=find_packages(\n 'src'), package_dir={'': 'src'}, package_data={'': ['LICENSE']},\n include_package_data=True, zip_safe=False, author='Oppian System Ltd',\n author_email='matt@oppian.com', description=\n 'django-defaultsiteSets the Site object in django to something better then example.com.'\n , license='LICENSE.txt', keywords='django site example.com',\n classifiers=['Development Status :: 3 - Alpha',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Framework :: Django'], url=\n 'http://oppian.com/labs/django-defaultsite/', long_description=open(\n 'README.txt').read())\n",
"step-3": "<mask token>\ntry:\n from ez_setup import use_setuptools\n use_setuptools()\nexcept ImportError:\n pass\nfrom setuptools import setup, find_packages\nsetup(name='django-defaultsite', version='1.1', packages=find_packages(\n 'src'), package_dir={'': 'src'}, package_data={'': ['LICENSE']},\n include_package_data=True, zip_safe=False, author='Oppian System Ltd',\n author_email='matt@oppian.com', description=\n 'django-defaultsiteSets the Site object in django to something better then example.com.'\n , license='LICENSE.txt', keywords='django site example.com',\n classifiers=['Development Status :: 3 - Alpha',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Framework :: Django'], url=\n 'http://oppian.com/labs/django-defaultsite/', long_description=open(\n 'README.txt').read())\n",
"step-4": "'''\nCreated on 5 Mar 2010\n\n@author: oppianmatt\n'''\n\n# hook to find setup tools if not installed\ntry:\n from ez_setup import use_setuptools\n use_setuptools()\nexcept ImportError:\n pass\n\nfrom setuptools import setup, find_packages\nsetup(\n name = \"django-defaultsite\",\n version = \"1.1\",\n packages = find_packages('src'),\n package_dir = {'': 'src'},\n package_data={'': ['LICENSE']},\n include_package_data=True,\n zip_safe=False,\n \n # metadata for upload to PyPI\n author = \"Oppian System Ltd\",\n author_email = \"matt@oppian.com\",\n description = \"django-defaultsiteSets the Site object in django to something better then example.com.\",\n license = 'LICENSE.txt',\n keywords = \"django site example.com\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Framework :: Django',\n ],\n url = \"http://oppian.com/labs/django-defaultsite/\",\n long_description=open('README.txt').read(),\n)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
"""
Load API client for a Tool Registry Service (TRS) endpoint based
either on the GA4GH specification or an existing client library.
"""
import logging
from bravado.requests_client import RequestsClient
from ga4ghtest.core.config import trs_config
from .client import TRSClient
logger = logging.getLogger(__name__)
def _get_trs_opts(service_id):
"""
Look up stored parameters for tool registry services.
"""
return trs_config()[service_id]
def _init_http_client(service_id=None, opts=None):
"""
Initialize and configure HTTP requests client for selected service.
"""
if service_id:
opts = _get_trs_opts(service_id)
http_client = RequestsClient()
http_client.set_api_key(host=opts['host'],
api_key=opts['auth'],
param_in='header')
return http_client
class TRSInterface:
def toolsGet(self):
raise NotImplementedError
def metadataGet(self):
raise NotImplementedError
def toolsIdGet(self, tool_id):
raise NotImplementedError
def toolsIdVersionGet(self, tool_id, tool_version):
raise NotImplementedError
def toolsIdVersionsGet(self, tool_id):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id, tool_version, descriptor_type):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id, tool_version, descriptor_type, rel_path):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version, descriptor_type, rel_path):
raise NotImplementedError
def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version, descriptor_type):
raise NotImplementedError
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
raise NotImplementedError
class TRSAdapter(TRSInterface):
"""
Adapter class for TRS client functionality.
Args:
trs_client: ...
"""
def __init__(self, trs_client):
self.trs_client = trs_client
def toolsGet(self):
return self.trs_client.get_tools()
def metadataGet(self):
raise self.trs_client.get_tool_types()
def toolsIdGet(self, tool_id):
return self.trs_client.get_tool(tool_id)
def toolsIdVersionGet(self, tool_id, tool_version):
return self.trs_client.get_tool_version(tool_id, tool_version)
def toolsIdVersionsGet(self, tool_id):
return self.trs_client.get_tool_versions(tool_id)
def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id, tool_version, descriptor_type):
return self.trs_client.get_tool_descriptor(tool_id, tool_version, descriptor_type)
def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id, tool_version, descriptor_type, rel_path):
return self.trs_client.get_relative_tool_descriptor(tool_id, tool_version, descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version, descriptor_type, rel_path):
return self.trs_client.get_tool_tests(tool_id, tool_version, descriptor_type, rel_path)
def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version, descriptor_type):
return self.trs_client.get_tools_with_relative_path(tool_id, tool_version, descriptor_type)
def toolsIdVersionsContainerGet(self, tool_id, tool_version):
return self.trs_client.get_tool_container_specs(tool_id, tool_version)
def load_trs_client(service_id, http_client=None):
"""Return an API client for the selected workflow execution service."""
trs_client = TRSClient(service=_get_trs_opts(service_id))
return TRSAdapter(trs_client)
|
normal
|
{
"blob_id": "d122267e1da2d9cf68d245148bb496dfba3e7d19",
"index": 4467,
"step-1": "<mask token>\n\n\nclass TRSInterface:\n\n def toolsGet(self):\n raise NotImplementedError\n\n def metadataGet(self):\n raise NotImplementedError\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n raise NotImplementedError\n <mask token>\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n\nclass TRSAdapter(TRSInterface):\n \"\"\"\n Adapter class for TRS client functionality.\n\n Args:\n trs_client: ...\n \"\"\"\n\n def __init__(self, trs_client):\n self.trs_client = trs_client\n\n def toolsGet(self):\n return self.trs_client.get_tools()\n\n def metadataGet(self):\n raise self.trs_client.get_tool_types()\n\n def toolsIdGet(self, tool_id):\n return self.trs_client.get_tool(tool_id)\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_version(tool_id, tool_version)\n\n def toolsIdVersionsGet(self, tool_id):\n return self.trs_client.get_tool_versions(tool_id)\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,\n tool_version, descriptor_type):\n return self.trs_client.get_tool_descriptor(tool_id, tool_version,\n descriptor_type)\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,\n tool_version, descriptor_type, rel_path):\n return self.trs_client.get_relative_tool_descriptor(tool_id,\n tool_version, descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n return self.trs_client.get_tool_tests(tool_id, tool_version,\n descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,\n descriptor_type):\n return self.trs_client.get_tools_with_relative_path(tool_id,\n tool_version, descriptor_type)\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_container_specs(tool_id, tool_version)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TRSInterface:\n\n def toolsGet(self):\n raise NotImplementedError\n\n def metadataGet(self):\n raise NotImplementedError\n <mask token>\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n def toolsIdVersionsGet(self, tool_id):\n raise NotImplementedError\n <mask token>\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,\n tool_version, descriptor_type, rel_path):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n raise NotImplementedError\n <mask token>\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n\nclass TRSAdapter(TRSInterface):\n \"\"\"\n Adapter class for TRS client functionality.\n\n Args:\n trs_client: ...\n \"\"\"\n\n def __init__(self, trs_client):\n self.trs_client = trs_client\n\n def toolsGet(self):\n return self.trs_client.get_tools()\n\n def metadataGet(self):\n raise self.trs_client.get_tool_types()\n\n def toolsIdGet(self, tool_id):\n return self.trs_client.get_tool(tool_id)\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_version(tool_id, tool_version)\n\n def toolsIdVersionsGet(self, tool_id):\n return self.trs_client.get_tool_versions(tool_id)\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,\n tool_version, descriptor_type):\n return self.trs_client.get_tool_descriptor(tool_id, tool_version,\n descriptor_type)\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,\n tool_version, descriptor_type, rel_path):\n return self.trs_client.get_relative_tool_descriptor(tool_id,\n tool_version, descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n return self.trs_client.get_tool_tests(tool_id, tool_version,\n descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,\n descriptor_type):\n return self.trs_client.get_tools_with_relative_path(tool_id,\n tool_version, descriptor_type)\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_container_specs(tool_id, tool_version)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TRSInterface:\n\n def toolsGet(self):\n raise NotImplementedError\n\n def metadataGet(self):\n raise NotImplementedError\n\n def toolsIdGet(self, tool_id):\n raise NotImplementedError\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n def toolsIdVersionsGet(self, tool_id):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,\n tool_version, descriptor_type):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,\n tool_version, descriptor_type, rel_path):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,\n descriptor_type):\n raise NotImplementedError\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n\nclass TRSAdapter(TRSInterface):\n \"\"\"\n Adapter class for TRS client functionality.\n\n Args:\n trs_client: ...\n \"\"\"\n\n def __init__(self, trs_client):\n self.trs_client = trs_client\n\n def toolsGet(self):\n return self.trs_client.get_tools()\n\n def metadataGet(self):\n raise self.trs_client.get_tool_types()\n\n def toolsIdGet(self, tool_id):\n return self.trs_client.get_tool(tool_id)\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_version(tool_id, tool_version)\n\n def toolsIdVersionsGet(self, tool_id):\n return self.trs_client.get_tool_versions(tool_id)\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,\n tool_version, descriptor_type):\n return self.trs_client.get_tool_descriptor(tool_id, tool_version,\n descriptor_type)\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,\n tool_version, descriptor_type, rel_path):\n return self.trs_client.get_relative_tool_descriptor(tool_id,\n tool_version, descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n return self.trs_client.get_tool_tests(tool_id, tool_version,\n descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,\n descriptor_type):\n return self.trs_client.get_tools_with_relative_path(tool_id,\n tool_version, descriptor_type)\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_container_specs(tool_id, tool_version)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef _get_trs_opts(service_id):\n \"\"\"\n Look up stored parameters for tool registry services.\n \"\"\"\n return trs_config()[service_id]\n\n\ndef _init_http_client(service_id=None, opts=None):\n \"\"\"\n Initialize and configure HTTP requests client for selected service.\n \"\"\"\n if service_id:\n opts = _get_trs_opts(service_id)\n http_client = RequestsClient()\n http_client.set_api_key(host=opts['host'], api_key=opts['auth'],\n param_in='header')\n return http_client\n\n\nclass TRSInterface:\n\n def toolsGet(self):\n raise NotImplementedError\n\n def metadataGet(self):\n raise NotImplementedError\n\n def toolsIdGet(self, tool_id):\n raise NotImplementedError\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n def toolsIdVersionsGet(self, tool_id):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,\n tool_version, descriptor_type):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,\n tool_version, descriptor_type, rel_path):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,\n descriptor_type):\n raise NotImplementedError\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n\nclass TRSAdapter(TRSInterface):\n \"\"\"\n Adapter class for TRS client functionality.\n\n Args:\n trs_client: ...\n \"\"\"\n\n def __init__(self, trs_client):\n self.trs_client = trs_client\n\n def toolsGet(self):\n return self.trs_client.get_tools()\n\n def metadataGet(self):\n raise self.trs_client.get_tool_types()\n\n def toolsIdGet(self, tool_id):\n return self.trs_client.get_tool(tool_id)\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_version(tool_id, tool_version)\n\n def toolsIdVersionsGet(self, tool_id):\n return self.trs_client.get_tool_versions(tool_id)\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id,\n tool_version, descriptor_type):\n return self.trs_client.get_tool_descriptor(tool_id, tool_version,\n descriptor_type)\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id,\n tool_version, descriptor_type, rel_path):\n return self.trs_client.get_relative_tool_descriptor(tool_id,\n tool_version, descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version,\n descriptor_type, rel_path):\n return self.trs_client.get_tool_tests(tool_id, tool_version,\n descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version,\n descriptor_type):\n return self.trs_client.get_tools_with_relative_path(tool_id,\n tool_version, descriptor_type)\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_container_specs(tool_id, tool_version)\n\n\ndef load_trs_client(service_id, http_client=None):\n \"\"\"Return an API client for the selected workflow execution service.\"\"\"\n trs_client = TRSClient(service=_get_trs_opts(service_id))\n return TRSAdapter(trs_client)\n",
"step-5": "#!/usr/bin/env python\n\"\"\"\nLoad API client for a Tool Registry Service (TRS) endpoint based\neither on the GA4GH specification or an existing client library.\n\"\"\"\nimport logging\n\nfrom bravado.requests_client import RequestsClient\n\nfrom ga4ghtest.core.config import trs_config\nfrom .client import TRSClient\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_trs_opts(service_id):\n \"\"\"\n Look up stored parameters for tool registry services.\n \"\"\"\n return trs_config()[service_id]\n\n\ndef _init_http_client(service_id=None, opts=None):\n \"\"\"\n Initialize and configure HTTP requests client for selected service.\n \"\"\"\n if service_id:\n opts = _get_trs_opts(service_id)\n\n http_client = RequestsClient()\n\n http_client.set_api_key(host=opts['host'],\n api_key=opts['auth'],\n param_in='header')\n return http_client\n\n\nclass TRSInterface:\n def toolsGet(self):\n raise NotImplementedError\n\n def metadataGet(self):\n raise NotImplementedError\n\n def toolsIdGet(self, tool_id):\n raise NotImplementedError\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n def toolsIdVersionsGet(self, tool_id):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id, tool_version, descriptor_type):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id, tool_version, descriptor_type, rel_path):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version, descriptor_type, rel_path):\n raise NotImplementedError\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version, descriptor_type):\n raise NotImplementedError\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n raise NotImplementedError\n\n\nclass TRSAdapter(TRSInterface):\n \"\"\"\n Adapter class for TRS client functionality.\n\n Args:\n trs_client: ...\n \"\"\"\n def __init__(self, trs_client):\n self.trs_client = trs_client\n\n def toolsGet(self):\n return self.trs_client.get_tools()\n\n def metadataGet(self):\n raise self.trs_client.get_tool_types()\n\n def toolsIdGet(self, tool_id):\n return self.trs_client.get_tool(tool_id)\n\n def toolsIdVersionGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_version(tool_id, tool_version)\n\n def toolsIdVersionsGet(self, tool_id):\n return self.trs_client.get_tool_versions(tool_id)\n\n def toolsIdVersionsVersionIdTypeDescriptorGet(self, tool_id, tool_version, descriptor_type):\n return self.trs_client.get_tool_descriptor(tool_id, tool_version, descriptor_type)\n\n def toolsIdVersionsVersionIdTypeDescriptorRelativePathGet(self, tool_id, tool_version, descriptor_type, rel_path):\n return self.trs_client.get_relative_tool_descriptor(tool_id, tool_version, descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeTestsGet(self, tool_id, tool_version, descriptor_type, rel_path):\n return self.trs_client.get_tool_tests(tool_id, tool_version, descriptor_type, rel_path)\n\n def toolsIdVersionsVersionIdTypeFilesGet(self, tool_id, tool_version, descriptor_type):\n return self.trs_client.get_tools_with_relative_path(tool_id, tool_version, descriptor_type)\n\n def toolsIdVersionsContainerGet(self, tool_id, tool_version):\n return self.trs_client.get_tool_container_specs(tool_id, tool_version)\n\n\ndef load_trs_client(service_id, http_client=None):\n \"\"\"Return an API client for the selected workflow execution service.\"\"\"\n trs_client = TRSClient(service=_get_trs_opts(service_id))\n return TRSAdapter(trs_client)\n",
"step-ids": [
18,
21,
24,
27,
30
]
}
|
[
18,
21,
24,
27,
30
] |
from setuptools import setup, find_packages
setup(
name='spt_compute',
version='2.0.1',
description='Computational framework for the Streamflow Prediciton Tool',
long_description='Computational framework to ingest ECMWF ensemble runoff forcasts '
' or otherLand Surface Model forecasts;'
' generate input for and run the RAPID (rapid-hub.org) program'
' using HTCondor or Python\'s Multiprocessing; and upload to '
' CKAN in order to be used by the Streamflow Prediction Tool (SPT).'
' There is also an experimental option to use the AutoRoute program'
' for flood inundation mapping.',
keywords='ECMWF, WRF, RAPID, Flood Prediction, Streamflow Prediction Tool',
author='Alan Dee Snow',
author_email='alan.d.snow@usace.army.mil',
url='https://github.com/erdc/spt_compute',
license='BSD 3-Clause',
packages=find_packages(),
install_requires=[
'numpy',
'netCDF4',
'pandas',
'RAPIDpy',
'tethys_dataset_services',
'xarray',
],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
extras_require={
'tests': [
'coveralls',
'pytest',
'pytest-cov',
],
},
)
|
normal
|
{
"blob_id": "53b6d30bf52c43daaebe8158002db1072e34f127",
"index": 7956,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='spt_compute', version='2.0.1', description=\n 'Computational framework for the Streamflow Prediciton Tool',\n long_description=\n \"Computational framework to ingest ECMWF ensemble runoff forcasts or otherLand Surface Model forecasts; generate input for and run the RAPID (rapid-hub.org) program using HTCondor or Python's Multiprocessing; and upload to CKAN in order to be used by the Streamflow Prediction Tool (SPT). There is also an experimental option to use the AutoRoute program for flood inundation mapping.\"\n , keywords=\n 'ECMWF, WRF, RAPID, Flood Prediction, Streamflow Prediction Tool',\n author='Alan Dee Snow', author_email='alan.d.snow@usace.army.mil', url=\n 'https://github.com/erdc/spt_compute', license='BSD 3-Clause', packages\n =find_packages(), install_requires=['numpy', 'netCDF4', 'pandas',\n 'RAPIDpy', 'tethys_dataset_services', 'xarray'], classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3'], extras_require={'tests': [\n 'coveralls', 'pytest', 'pytest-cov']})\n",
"step-3": "from setuptools import setup, find_packages\nsetup(name='spt_compute', version='2.0.1', description=\n 'Computational framework for the Streamflow Prediciton Tool',\n long_description=\n \"Computational framework to ingest ECMWF ensemble runoff forcasts or otherLand Surface Model forecasts; generate input for and run the RAPID (rapid-hub.org) program using HTCondor or Python's Multiprocessing; and upload to CKAN in order to be used by the Streamflow Prediction Tool (SPT). There is also an experimental option to use the AutoRoute program for flood inundation mapping.\"\n , keywords=\n 'ECMWF, WRF, RAPID, Flood Prediction, Streamflow Prediction Tool',\n author='Alan Dee Snow', author_email='alan.d.snow@usace.army.mil', url=\n 'https://github.com/erdc/spt_compute', license='BSD 3-Clause', packages\n =find_packages(), install_requires=['numpy', 'netCDF4', 'pandas',\n 'RAPIDpy', 'tethys_dataset_services', 'xarray'], classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3'], extras_require={'tests': [\n 'coveralls', 'pytest', 'pytest-cov']})\n",
"step-4": "from setuptools import setup, find_packages\n\nsetup(\n name='spt_compute',\n version='2.0.1',\n description='Computational framework for the Streamflow Prediciton Tool',\n long_description='Computational framework to ingest ECMWF ensemble runoff forcasts '\n ' or otherLand Surface Model forecasts;'\n ' generate input for and run the RAPID (rapid-hub.org) program'\n ' using HTCondor or Python\\'s Multiprocessing; and upload to '\n ' CKAN in order to be used by the Streamflow Prediction Tool (SPT).'\n ' There is also an experimental option to use the AutoRoute program'\n ' for flood inundation mapping.',\n keywords='ECMWF, WRF, RAPID, Flood Prediction, Streamflow Prediction Tool',\n author='Alan Dee Snow',\n author_email='alan.d.snow@usace.army.mil',\n url='https://github.com/erdc/spt_compute',\n license='BSD 3-Clause',\n packages=find_packages(),\n install_requires=[\n 'numpy',\n 'netCDF4',\n 'pandas',\n 'RAPIDpy',\n 'tethys_dataset_services',\n 'xarray',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n ],\n extras_require={\n 'tests': [\n 'coveralls',\n 'pytest',\n 'pytest-cov',\n ],\n },\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
from elftools.elf.elffile import ELFFile
from capstone import *
def process_file(filename):
with open(filename, 'rb') as f:
elffile = ELFFile(f)
code = elffile.get_section_by_name('.text')
rodata = elffile.get_section_by_name('.rodata')
plt = elffile.get_section_by_name('.plt')
data = elffile.get_section_by_name('.data')
bss = elffile.get_section_by_name('.bss')
opcodes = code.data()
addr = code['sh_addr']
#print "Entry point: {0}".format(hex(elffile.header['e_entry']))
md = Cs(CS_ARCH_X86, CS_MODE_64)
for i in md.disasm(opcodes, addr):
print "0x%x:\t%s\t%s\t" %(i.address, i.mnemonic, i.op_str)
print "\n\nrodata:\n"
print rodata.data()
print "\n\nplt\n"
print plt.data()
print "\n\ndata\n"
print data.data()
print "\n\nbss\n"
print bss.data()
if __name__ == '__main__':
if len(sys.argv) == 2:
process_file(sys.argv[1])
|
normal
|
{
"blob_id": "5bfaadcd54aaf239d0d89158bfb723c0174c56b1",
"index": 9176,
"step-1": "import sys\nfrom elftools.elf.elffile import ELFFile\nfrom capstone import *\n\ndef process_file(filename):\n with open(filename, 'rb') as f:\n elffile = ELFFile(f)\n code = elffile.get_section_by_name('.text')\n rodata = elffile.get_section_by_name('.rodata')\n plt = elffile.get_section_by_name('.plt')\n data = elffile.get_section_by_name('.data')\n bss = elffile.get_section_by_name('.bss')\n\n opcodes = code.data()\n addr = code['sh_addr']\n #print \"Entry point: {0}\".format(hex(elffile.header['e_entry']))\n md = Cs(CS_ARCH_X86, CS_MODE_64)\n for i in md.disasm(opcodes, addr):\n print \"0x%x:\\t%s\\t%s\\t\" %(i.address, i.mnemonic, i.op_str)\n print \"\\n\\nrodata:\\n\"\n print rodata.data()\n print \"\\n\\nplt\\n\"\n print plt.data()\n print \"\\n\\ndata\\n\"\n print data.data()\n print \"\\n\\nbss\\n\"\n print bss.data()\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n process_file(sys.argv[1])\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Procedures for automatic COBD calculation.
# The useful ones are:
# - get_heuristic4_OBD() as a heuristic one [the only heuristic one here that does not miss-out solutions]
# - getOBD2plus4() as the fastest exhaustive one [uses two filtering techniques for early detection of graphs without an OBD]
import itertools
import time
import pickle
import numpy
import networkx as nx
import matplotlib.pyplot as plt
def insertOBDlabels(P, obd):
allOK = True
for n in P.nodes():
label = None
for i in range(len(obd)): # obd is a list of elements (lists), if n is in i-th element, then i is its label
if n in obd[i]:
label = i
if label == None:
allOK = False
print "Warning: not all nodes are in the provided OBD."
break
P.node[n]['OBDlabel'] = label
return allOK
def OBDnodeCondition(n, P):
"""assumes that nodes have ['OBDlabel'] set already (this is why insertOBDlabels() must be called beforehand) """
condition = True
higherNeighborLabel = None
for neigh in P.neighbors(n):
if P.node[neigh]['OBDlabel'] == P.node[n]['OBDlabel']:
condition = False
break
elif P.node[neigh]['OBDlabel'] > P.node[n]['OBDlabel']:
if higherNeighborLabel == None:
higherNeighborLabel = P.node[neigh]['OBDlabel']
else:
if P.node[neigh]['OBDlabel'] != higherNeighborLabel:
condition = False
break
return condition
def OBDcorrect(P, obd):
correct = True
ans = insertOBDlabels(P, obd) # adds 'OBDlabel' to each node in P, according to decomposition obd
if ans == False:
correct = False
else:
for n in P.nodes():
if not OBDnodeCondition(n, P): #do all the neighbors have different labels, and all with higher label have the same one?
correct = False
break
return correct
def connectedOBD(P, obd):
'''test whether the obd is such, that each node with higher level is connected to some node with lower level (needed in our depth-first kind of algorithm)'''
connected = True
seen = []
if len(obd[0]) > 1:
connected = False
##print "Warning: more than one root element in obd."
else:
seen.append(obd[0][0])
for i in range(len(obd)):
if i == 0:
pass
else:
for el in obd[i]:
test = False
neighbors = P.neighbors(el)
for neigh in neighbors:
if neigh in seen:
test = True
if test == False:
connected = False
else:
seen.append(el)
return connected
# create all possible permutations of elements (IDs) - and on each permutation then try all possible splits....first with len(P) parts (optimal) and then lower.
def split_list(data, n):
#""" splits a list into n parts in all possible ways
#>>> list(split_list([1, 2, 3, 4], 2))
#[[[1], [2, 3, 4]], [[1, 2], [3, 4]], [[1, 2, 3], [4]]]
#>>> list(split_list([1, 2, 3, 4], 3))
#[[[1], [2], [3, 4]], [[1], [2, 3], [4]], [[1, 2], [3], [4]]]"""
from itertools import combinations, chain
for splits in combinations(range(1, len(data)), n-1):
result = []
prev = None
for split in chain(splits, [None]):
result.append(data[prev:split])
prev = split
yield result
def getOBD(P):
result = None
found = False
IDs = []
for n in P.nodes():
IDs.append(P.node[n]['id'])
# we will try with largest possible decomposition size and then go lower, if nothing is found
decomp_size = len(IDs)
while decomp_size > 0:
# now we go over all possible permutations of IDs
permutations = itertools.permutations(IDs) # this has to be recreated each time we go over it again
for perm in permutations:
splits = split_list(list(perm), decomp_size)
for s in splits:
# now this is our candidate OBD
if ( OBDcorrect(P, s) and connectedOBD(P, s) ): # connectedOBD is additional condition because of our depth-first approach
result = s
found = True
if found == True: break;
if found == True: break;
if found == True: break;
decomp_size = decomp_size -1
if found == False:
##print "OBD was not found for this pattern."
result = None
return result
#------------------------------HEURISTIC 1--------------------------------
def heuristic1_label_OBD(n, P, current_label):
P.node[n]['OBDlabel'] = current_label
current_label = current_label + 1
neighbors = P.neighbors(n)
for neigh in neighbors:
if 'OBDlabel' in P.node[neigh].keys():
if P.node[neigh]['OBDlabel'] > current_label:
current_label = P.node[neigh]['OBDlabel']
# we got maximum of current label or any node that neighbors have - now we label them all with that
for neigh in neighbors:
if 'OBDlabel' in P.node[neigh].keys():
if P.node[neigh]['OBDlabel'] >= P.node[n]['OBDlabel']:
heuristic1_label_OBD(neigh, P, current_label)
else: # if set and smaller than mine, leave them alone
pass
else: # if not set, then not lower and not labelled
heuristic1_label_OBD(neigh, P, current_label)
def produceOBDlist(P):
"""expects pattern P which has OBDlabel set for all the nodes. OBDlist is created accoring to labels (some might be skipped! so this is taken into account)"""
# first we'll get all OBD labels, so that we can see how many different ones are there...
output = []
OBDlabels = set() # set, so that we do not collect duplicate labels
for n in P.nodes():
OBDlabels.add(P.node[n]['OBDlabel'])
OBDlabels = list(OBDlabels) # now we have a list of labels without duplicates
OBDlabels.sort() # in-place sorting (OBDlabels is changed)
for el in OBDlabels:
innerlist = []
for n in P.nodes():
if P.node[n]['OBDlabel'] == el:
innerlist.append(n)
output.append(innerlist)
return output
def get_heuristic1_OBD(P):
heuristic1_label_OBD(P.nodes()[0], P, 1)
obd = produceOBDlist(P)
if ( OBDcorrect(P, obd) and connectedOBD(P, obd) ):
return obd
else:
return None
# result will be put into ['OBDlabel'] of nodes in P, so you have to create then the proper format...
#------------------------------HEURISTIC 2--------------------------------
def heuristic2_label_OBD(n, P, label, critical=None):
"""heuristic approach with backtracking"""
print "trying to label " + str(n) + " with " + str(label)
nodes_labeled = []
if ('critical' in P.node[n].keys()) and (P.node[n]['critical']==True) and (P.node[n]['OBDlabel'] != label) :
print "FAIL on critical and not the same label."
return (False, []) # being critical, we could avoid failure only if the label to set would be the same (it happens)
else:
P.node[n]['OBDlabel'] = label
nodes_labeled.append(n) # this is a list that gets passed through recursions
if critical == True:
P.node[n]['critical'] = True
# labeling part done
flag_critical = False # if I will label more than one neighbor from now on, then the labels will be critical (not to be changed by others)
new_label = label + 1
neighbors = P.neighbors(n)
for neigh in neighbors:
if 'OBDlabel' in P.node[neigh].keys():
if P.node[neigh]['OBDlabel'] > new_label:
new_label = P.node[neigh]['OBDlabel']
# we got maximum of current label or any node that neighbors have - now we label them all with that
neighbors_to_label = []
for neigh in neighbors:
if 'OBDlabel' in P.node[neigh].keys():
if (P.node[neigh]['OBDlabel'] >= P.node[n]['OBDlabel']) or (P.node[neigh]['OBDlabel'] == None): # now they can have it, but set to None (because of removal in failers)
neighbors_to_label.append(neigh)
else: # if set and smaller than mine, leave them alone
pass
else: # if not set, then not lower and not labelled
neighbors_to_label.append(neigh)
# now we have all the neighbors that need to be labeled
if len(neighbors_to_label) > 1:
flag_critical = True
# and now the recursive step - labeling all these nodes
permutations = itertools.permutations(neighbors_to_label) # iterator : gets exhausted as we access elements
for perm in permutations:
print "trying perm: " + str(perm)
this_run_success = True
this_run_labeled = []
for el in perm:
(s, nl) = heuristic2_label_OBD(el, P, new_label, flag_critical)
this_run_labeled = this_run_labeled + nl
if s == False:
this_run_success = False
break
if this_run_success == False:
# then unlabel all that were labelled up to now
for nn in this_run_labeled:
print "removing label of " + str(nn)
P.node[nn]['OBDlabel'] = None
P.node[nn]['critical'] = False
else: # obviously success is True, we managed to label all others...
nodes_labeled = nodes_labeled + this_run_labeled
print "Win in labeling neighbors of " + str(n)
return (True, nodes_labeled)
break
# if no permutation is successful, we end up returning the last line
return (False, nodes_labeled)
print "FAIL of all permutations from " + str(n)
def get_heuristic2_OBD(P):
heuristic2_label_OBD(P.nodes()[0], P, 1)
#------------------------------HEURISTIC 2B--------------------------------
def heuristic2B_label_OBD(n, P, label, critical=None):
"""heuristic approach with backtracking"""
nodes_labeled = []
flag_critical = False # if I will label more than one neighbor from now on, then the labels will be critical (not to be changed by others)
new_label = label + 1
neighbors = P.neighbors(n)
for neigh in neighbors:
if 'OBDlabel' in P.node[neigh].keys(): # if it has a label
if P.node[neigh]['OBDlabel'] > new_label: # and it is higher than what I would use for labeling
new_label = P.node[neigh]['OBDlabel']
# we got maximum of current label or any node that neighbors have - now we label them all with that
neighbors_to_label = []
for neigh in neighbors:
if 'OBDlabel' in P.node[neigh].keys():
if (P.node[neigh]['OBDlabel'] >= P.node[n]['OBDlabel']) or (P.node[neigh]['OBDlabel'] == None): # now they can have it, but set to None (because of removal in failers)
neighbors_to_label.append(neigh)
else: # if set and smaller than mine, leave them alone
pass
else: # if not set, then not lower and not labelled
neighbors_to_label.append(neigh)
# now we have all the neighbors that need to be labeled
if len(neighbors_to_label) > 1:
flag_critical = True
# and now labeling all these nodes
for neigh in neighbors_to_label:
if ('critical' in P.node[neigh].keys()) and (P.node[neigh]['critical']==True) and (P.node[neigh]['OBDlabel'] != new_label) :
return (False, nodes_labeled) # being critical, we could avoid failure only if the label to set would be the same (it happens)
else:
P.node[neigh]['OBDlabel'] = new_label
nodes_labeled.append(neigh) # this is a list that gets passed through recursions
if flag_critical == True:
P.node[neigh]['critical'] = True
# labeling part done
# and now recursive step - going into each neighbor to continue, in any order if necessary
permutations = itertools.permutations(neighbors_to_label) # iterator : gets exhausted as we access elements
for perm in permutations:
this_run_success = True
this_run_labeled = []
for el in perm:
(s, nl) = heuristic2B_label_OBD(el, P, new_label, flag_critical)
this_run_labeled = this_run_labeled + nl
if s == False:
this_run_success = False
if this_run_success == False:
# then unlabel all that were labelled up to now
for nn in this_run_labeled:
P.node[nn]['OBDlabel'] = None
P.node[nn]['critical'] = False
else: # obviously success is True, we managed to label all others...
nodes_labeled = nodes_labeled + this_run_labeled
return (True, nodes_labeled)
break
# if no permutation is successful, we end up returning the last line
return (False, nodes_labeled)
def get_heuristic2B_OBD(P):
# in this version we label the root before recursion
for n in P.nodes():
root = n
P.node[root]['OBDlabel'] = 1
(success, result) = heuristic2B_label_OBD(root, P, 1)
if success:
obd = produceOBDlist(P)
if ( OBDcorrect(P, obd) and connectedOBD(P, obd) ):
return obd
else:
for no in P.nodes():
P.node[no]['OBDlabel'] = None
P.node[no]['critical'] = False
else: # in case of failure of all attempts with this node as a root - we have to clean up all flags and labels before the new root is tried
for nn in P.nodes():
P.node[nn]['OBDlabel'] = None
P.node[nn]['critical'] = False
# if we did not return any solution before, then None was found
return None
#----------------------------------------------------------------------------------
#------------------------------exhaustive 2--------------------------------
def any_neighbors(nodelist, G):
"""If any two nodes in the nodelist are neighbors in graph G, it outputs TRUE, otherwise FALSE."""
outcome = False
#neighbors = P.neighbors(n)
for i in range(len(nodelist)):
for j in range(i+1, len(nodelist)):
if G.has_edge(nodelist[i], nodelist[j]) or G.has_edge(nodelist[j], nodelist[i]):
##if nodelist[j] in G.neighbors(nodelist[i]):
outcome = True
return outcome
return outcome
def getOBD2(P):
result = None
found = False
IDs = []
for n in P.nodes():
IDs.append(P.node[n]['id'])
# we will try with largest possible decomposition size and then go lower, if nothing is found
decomp_size = len(IDs)
while decomp_size > 0:
# now we go over all possible permutations of IDs
permutations = itertools.permutations(IDs) # this has to be recreated each time we go over it again
for perm in permutations:
splits = split_list(list(perm), decomp_size)
for s in splits:
# now this is our candidate OBD
# -------speedup A: checking for neighbors in elements of split
noneighbors = True
for nodelist in s:
if len(nodelist)>1:
if any_neighbors(nodelist, P):
noneighbors = False
# -------
if noneighbors and OBDcorrect(P, s) and connectedOBD(P, s): # connectedOBD is additional condition because of our depth-first approach
result = s
found = True
if found == True: break;
if found == True: break;
if found == True: break;
decomp_size = decomp_size -1
if found == False:
result = None
return result
#----------------------------------------------------------------------------------
#------------------------------exhaustive 3--------------------------------
def size_degree_check(obd, P):
"""for every node in OBD calculates its [degree(n) - linksToNodesAlreadyInOBD]
and verifies whether in the remaining part of OBD there is an element of at least that size (all bigger must have equal label)"""
outcome = True
flatOBD = [item for sublist in obd for item in sublist] # we get a flat list from a list of lists
seen = []
for i in range(len(flatOBD)):
n = flatOBD[i]
linksback = 0
for el in seen:
if P.has_edge(el, n) or P.has_edge(n, el):
linksback = linksback + 1
out_degree = P.degree(n) - linksback
# now verify whether we have such strength in the rest of obd
targetElement = None
for elobd in obd:
if n in elobd:
targetElement = elobd
# we now in which element is n - now check from here on
remaining_obd = obd[obd.index(targetElement)+1:]
sizes = [len(x) for x in remaining_obd]
if (len(sizes)>0) and (max(sizes) < out_degree):
outcome = False
return outcome
seen.append(n)
return outcome
def getOBD3(P):
result = None
found = False
max_degree = max(list(P.degree().values()))
IDs = []
for n in P.nodes():
IDs.append(P.node[n]['id'])
# we will try with largest possible decomposition size and then go lower, if nothing is found
decomp_size = len(IDs)
while decomp_size > 0:
# now we go over all possible permutations of IDs
permutations = itertools.permutations(IDs) # this has to be recreated each time we go over it again
for perm in permutations:
splits = split_list(list(perm), decomp_size)
for s in splits:
# now this is our candidate OBD
# -------speedup B: checking sizes of decomposition elements against out-degrees
sizeCheck = size_degree_check(s, P)
# -------
if sizeCheck and OBDcorrect(P, s) and connectedOBD(P, s): # connectedOBD is additional condition because of our depth-first approach
result = s
found = True
if found == True: break;
if found == True: break;
if found == True: break;
decomp_size = decomp_size -1
if found == False:
result = None
return result
#----------------------------------------------------------------------------------
#------------------------------exhaustive 4--------------------------------
def any_triangles(G):
"""checks and outputs (True, False) whether there are any triangles in graph G"""
for x in G.nodes():
for y in G.nodes():
for z in G.nodes():
if (x != y) and (x !=z) and (y!=z):
if (G.has_edge(x, y) or G.has_edge(y, x)) and (G.has_edge(x, z) or G.has_edge(z, x)) and (G.has_edge(z, y) or G.has_edge(y, z)):
return True
# if all triplets were checked and we did not find a triangle, then we can only return False
return False
def getOBD4(P):
if any_triangles(P):
return None
result = None
found = False
max_degree = max(list(P.degree().values()))
IDs = []
for n in P.nodes():
IDs.append(P.node[n]['id'])
# we will try with largest possible decomposition size and then go lower, if nothing is found
decomp_size = len(IDs)
while decomp_size > 0:
# now we go over all possible permutations of IDs
permutations = itertools.permutations(IDs) # this has to be recreated each time we go over it again
for perm in permutations:
splits = split_list(list(perm), decomp_size)
for s in splits:
# now this is our candidate OBD
if OBDcorrect(P, s) and connectedOBD(P, s): # connectedOBD is additional condition because of our depth-first approach
result = s
found = True
if found == True: break;
if found == True: break;
if found == True: break;
decomp_size = decomp_size -1
if found == False:
result = None
return result
#----------------------------------------------------------------------------------
#------------------------------exhaustive 2plus4--------------------------
def getOBD2plus4(P):
if any_triangles(P):
return None
result = None
found = False
IDs = []
for n in P.nodes():
IDs.append(P.node[n]['id'])
# we will try with largest possible decomposition size and then go lower, if nothing is found
decomp_size = len(IDs)
while decomp_size > 0:
# now we go over all possible permutations of IDs
permutations = itertools.permutations(IDs) # this has to be recreated each time we go over it again
for perm in permutations:
splits = split_list(list(perm), decomp_size)
for s in splits:
# now this is our candidate OBD
# -------speedup A: checking for neighbors in elements of split
noneighbors = True
for nodelist in s:
if len(nodelist)>1:
if any_neighbors(nodelist, P):
noneighbors = False
# -------
if noneighbors and OBDcorrect(P, s) and connectedOBD(P, s): # connectedOBD is additional condition because of our depth-first approach
result = s
found = True
if found == True: break;
if found == True: break;
if found == True: break;
decomp_size = decomp_size -1
if found == False:
result = None
return result
#----------------------------------------------------------------------------------
#------------------------------HEURISTIC 3--------------------------------
def to_graph(l):
""" l is a list of lists"""
G = nx.Graph()
for part in l:
# each sublist is a bunch of nodes
G.add_nodes_from(part)
# it also imlies a number of edges:
G.add_edges_from(to_edges(part))
return G
def to_edges(l):
"""
treat `l` as a Graph and returns it's edges
to_edges(['a','b','c','d']) -> [(a,b), (b,c),(c,d)]
"""
it = iter(l)
last = next(it)
for current in it:
yield last, current
last = current
#G = to_graph(l)
#print connected_components(G)
def partitions(set_):
if not set_:
yield []
return
for i in xrange(2**len(set_)/2):
parts = [set(), set()]
for item in set_:
parts[i&1].add(item)
i >>= 1
for b in partitions(parts[1]):
yield [parts[0]]+b
#for p in partitions(["a", "b", "c", "d"]):
#print p
def h3_step(d, P, label):
## print "started with decomp element %s" % str(d)
# trenutna dekompozicija d na P, hocem celotno od tu dalje
# d is a list like [2, 3]
# first we check if d has any neighbors:
if any_neighbors(d, P):
## print "Fail because neighbors detected in %s" % str(d)
return (False, [])
else:
#---now lets get the situation
labeledOnes = []
for n in d:
if (('OBDlabel' in P.node[n].keys()) and (P.node[n]['OBDlabel'] != None)):
labeledOnes.append(n)
if len(labeledOnes) == len(d):
return (True, []) # was done already from some other decomp. element
elif ((len(labeledOnes) < len(d)) and (len(labeledOnes) > 0)): # so, if some are labeled, but not all
return (False, [])
else: # none are labeled
for n in d:
P.node[n]['OBDlabel'] = label
new_label = label + 1
all_labeled = d
output = [d]
neighbors_to_d = [] # this will be a list of lists, for each element e in d it will hold e's neighbors that are not labeled yet
for el in d:
neighbors_to_d.append([x for x in P.neighbors(el) if (('OBDlabel' not in P.node[x].keys()) or (P.node[x]['OBDlabel']==None) or (P.node[x]['OBDlabel']>=P.node[el]['OBDlabel'])) ])
if neighbors_to_d == []:
## print "Success, because no more unlabeled neighbors for %s" % str(d)
return (True, [d])
#now we'll merge them according to connected components
tempG = to_graph(neighbors_to_d)
components = nx.connected_components(tempG)
# components contains all groups of nodes that can have different decomposition labels, at least according to local information
# we try with the most defragmented components, and then merge them (PARTITIONING) if it fails in later steps
# when all partitions are exhausted, we report failure back
indices = set(range(len(components))) # set of indices will be partitioned
## print "components: %s" % str(components)
## print "indices: %s" % str(indices)
for partits in partitions(indices):
for par in itertools.permutations(partits):
# par is one partition of indeces, like: [ set([0]) , set([1]) , set([2]) ] or [ [0], [1,2] ] that correspond to e.g. [ [1], [2,3,4] ]
## print "trying par: %s" % str(par)
this_try = True # all decomposition elements in partition have to succeed
all_decomps = []
this_try_labeled = []
for d_next_inds in par:
d_next_inds = list(d_next_inds) # we make a list back from a set
# now we have to merge the components with these indices into a decomposition element candidate
d_next = []
for i in d_next_inds:
d_next = d_next + components[i]
# d_next is now the new candidate partition class
## print "and trying the next decomp candidate in next recursive step: %s" % str(d_next)
(success, partial_decomp) = h3_step(d_next, P, new_label)
if success == True:
all_decomps = all_decomps + partial_decomp
this_try_labeled = this_try_labeled + partial_decomp
# XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX problem: several possible solutions and not all elements are duplicates!!!
else:
this_try = False
if this_try == True: # obviously this partition was OK in recursions
output = output + all_decomps
## print "Success in recursion below. Outputting %s" % str(output)
return (True, output)
else:
for alist in this_try_labeled:
for nodeid in alist:
P.node[nodeid]['OBDlabel'] = None
# if we came to here it means all partitions of indices of components were exhausted without solution
## print "Fail because all options exhausted"
return (False, output)
def get_heuristic3_OBD(P):
#
for n in P.nodes():
root = n
(success, result) = h3_step([root], P, 1)
if success:
#----might have duplicates, so we'll remove them
nice_result = []
for el in result:
if el not in nice_result:
nice_result.append(el)
## print "as success we get OBD: %s" % str(nice_result)
if ( OBDcorrect(P, nice_result) and connectedOBD(P, nice_result) ):
return nice_result
else:
pass
## print "The produced OBD was either not correct or not connected"
## print "----------------------------------"
#----cleaning after this root node was not successful
for nn in P.nodes():
if ('OBDlabel' in P.node[nn].keys()):
P.node[nn]['OBDlabel'] = None
#-----------------
# if we did not return any solution before, then None was found
return None
#----------------------------------------------------------------------------------
#------------HEURISTIC 4 ---------------------------------------------
def get_components(partOBD, P):
flat_partialOBD = [item for sublist in partOBD for item in sublist] # we get a flat list from a list of lists
#
meta_neighbors = [] # this will contain all contents of neighbors_to_d for all d-s
for d in partOBD:
neighbors_to_d = [] # this will be a list of lists, for each element e in d it will hold e's neighbors that are not labeled yet
for el in d:
neighbors_to_d.append([x for x in P.neighbors(el) if (x not in flat_partialOBD)])
meta_neighbors = meta_neighbors + neighbors_to_d
#now we'll merge them according to connected components
tempG = to_graph(meta_neighbors)
components = nx.connected_components(tempG)
return components
def labelon(partialOBD, P):
## print "came into labelon() with partialOBD: %s" % str(partialOBD)
flat_partialOBD = [item for sublist in partialOBD for item in sublist] # we get a flat list from a list of lists
if len(flat_partialOBD) == len(P.nodes()): # check for the end of recursion
## print "and YES, we are at recursion end"
if ( OBDcorrect(P, partialOBD) and connectedOBD(P, partialOBD) ):
## print "and even correct and connected - FINISH."
return partialOBD
else:
## print "but not correct OBD or not connected"
return None
else: # else: get all candidates to continue (next connected components) and try on all of them
components = list(get_components(partialOBD, P))
# now to partialOBD we add each component separately, but also each possible merging of these components, including full merge
candidates = [] # this will hold all such candidates, each candidate is a list of vertices
for L in range(1, len(components)+1):
for subset in itertools.combinations(components, L):
cand = subset # but this is a list of lists - we have to flatten it
candFlat = [x for sub in cand for x in sub]
candidates.append(candFlat)
for c in candidates:
new_partial_OBD = partialOBD + [c]
## print "starting recursive call with new_partialOBD: %s" % str(new_partial_OBD)
result = labelon(new_partial_OBD, P)
## print "back from recursion call for new_partialOBD: %s" % str(new_partial_OBD)
## print "and result is: %s" % str(result)
if result != None:
return result
# if I came here without returning something , then nothing was found below me
return None
def get_heuristic4_OBD(P, startNode = None):
#
if startNode == None:
for n in P.nodes():
## print "starting with node %s" % str(n)
result = labelon([[n]], P)
if result != None:
return result
return None
else:
result = labelon([[startNode]], P)
if result != None:
return result
return None
####pattern_file_name = "pattern1.gml"
##pattern_file_name = "graph6c_15.gml"
## ./problemAnalysis/graph8c_random_663.gml
####P = nx.read_gml(pattern_file_name)
####print "reading done."
#pattern_file_name = "./graphs/7c/graph7c_104.gml"; P = nx.read_gml(pattern_file_name); get_heuristic3_OBD(P)
# OBdecomp = [ [0], [1] , [2, 3], [4], [5] ]
##start = time.time()
##res = get_heuristic1_OBD(P)
##stop = time.time()
##
##print res
##print "Calculation took %.2f seconds." % (stop-start)
# call with: > python OBDsearch.py patternX.gml [resultfile.obd] [computer_name]
##if __name__=="__main__":
## import sys
## pattern_file_name = sys.argv[1]
## result_file_name = None
## computer_name = None
## if len(sys.argv)>2:
## result_file_name = sys.argv[2]
## if len(sys.argv)>3:
## computer_name = sys.argv[3]
## P = nx.read_gml(pattern_file_name)
## start = time.time()
## obd = getOBD(P)
## stop = time.time()
## if obd != None:
## print obd
## else:
## print "None, OBD not found."
## if result_file_name != None:
## resultfile = open(result_file_name, 'w')
## resultfile.write(str(obd)); resultfile.write('\n')
## if computer_name !=None:
## resultfile.write("Finding OBD took %.2f seconds on %s." % (stop-start, computer_name))
## else:
## resultfile.write("Finding OBD took %.2f seconds." % (stop-start))
|
normal
|
{
"blob_id": "51711c9293f8b5d9dc4d299569da04e2d1bc0064",
"index": 1982,
"step-1": "\n\n# Procedures for automatic COBD calculation.\n# The useful ones are:\n# - get_heuristic4_OBD() as a heuristic one [the only heuristic one here that does not miss-out solutions]\n# - getOBD2plus4() as the fastest exhaustive one [uses two filtering techniques for early detection of graphs without an OBD]\n\nimport itertools\nimport time\nimport pickle\nimport numpy\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\ndef insertOBDlabels(P, obd):\n allOK = True\n for n in P.nodes():\n label = None\n for i in range(len(obd)): # obd is a list of elements (lists), if n is in i-th element, then i is its label\n if n in obd[i]:\n label = i\n if label == None:\n allOK = False\n print \"Warning: not all nodes are in the provided OBD.\"\n break\n P.node[n]['OBDlabel'] = label\n return allOK\n\n\ndef OBDnodeCondition(n, P):\n \"\"\"assumes that nodes have ['OBDlabel'] set already (this is why insertOBDlabels() must be called beforehand) \"\"\"\n condition = True\n higherNeighborLabel = None\n for neigh in P.neighbors(n):\n if P.node[neigh]['OBDlabel'] == P.node[n]['OBDlabel']:\n condition = False\n break\n elif P.node[neigh]['OBDlabel'] > P.node[n]['OBDlabel']:\n if higherNeighborLabel == None:\n higherNeighborLabel = P.node[neigh]['OBDlabel']\n else:\n if P.node[neigh]['OBDlabel'] != higherNeighborLabel:\n condition = False\n break\n return condition\n \n\ndef OBDcorrect(P, obd):\n correct = True\n ans = insertOBDlabels(P, obd) # adds 'OBDlabel' to each node in P, according to decomposition obd\n if ans == False:\n correct = False\n else:\n for n in P.nodes():\n if not OBDnodeCondition(n, P): #do all the neighbors have different labels, and all with higher label have the same one?\n correct = False\n break\n return correct\n\ndef connectedOBD(P, obd):\n '''test whether the obd is such, that each node with higher level is connected to some node with lower level (needed in our depth-first kind of algorithm)'''\n connected = True\n seen = []\n if len(obd[0]) > 1:\n connected = False\n ##print \"Warning: more than one root element in obd.\"\n else:\n seen.append(obd[0][0])\n for i in range(len(obd)):\n if i == 0:\n pass\n else:\n for el in obd[i]:\n test = False\n neighbors = P.neighbors(el)\n for neigh in neighbors:\n if neigh in seen:\n test = True\n if test == False:\n connected = False\n else:\n seen.append(el)\n return connected\n\n\n\n# create all possible permutations of elements (IDs) - and on each permutation then try all possible splits....first with len(P) parts (optimal) and then lower.\ndef split_list(data, n):\n#\"\"\" splits a list into n parts in all possible ways\n#>>> list(split_list([1, 2, 3, 4], 2))\n#[[[1], [2, 3, 4]], [[1, 2], [3, 4]], [[1, 2, 3], [4]]]\n#>>> list(split_list([1, 2, 3, 4], 3))\n#[[[1], [2], [3, 4]], [[1], [2, 3], [4]], [[1, 2], [3], [4]]]\"\"\"\n from itertools import combinations, chain\n for splits in combinations(range(1, len(data)), n-1):\n result = []\n prev = None\n for split in chain(splits, [None]):\n result.append(data[prev:split])\n prev = split\n yield result\n\n\ndef getOBD(P):\n result = None\n found = False\n IDs = []\n for n in P.nodes():\n IDs.append(P.node[n]['id'])\n # we will try with largest possible decomposition size and then go lower, if nothing is found\n decomp_size = len(IDs)\n while decomp_size > 0:\n # now we go over all possible permutations of IDs\n permutations = itertools.permutations(IDs) # this has to be recreated each time we go over it again\n for perm in permutations:\n splits = split_list(list(perm), decomp_size)\n for s in splits:\n # now this is our candidate OBD\n if ( OBDcorrect(P, s) and connectedOBD(P, s) ): # connectedOBD is additional condition because of our depth-first approach\n result = s\n found = True\n if found == True: break;\n if found == True: break;\n if found == True: break;\n decomp_size = decomp_size -1\n if found == False:\n ##print \"OBD was not found for this pattern.\"\n result = None\n return result\n\n#------------------------------HEURISTIC 1--------------------------------\n\ndef heuristic1_label_OBD(n, P, current_label):\n P.node[n]['OBDlabel'] = current_label\n current_label = current_label + 1\n neighbors = P.neighbors(n)\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if P.node[neigh]['OBDlabel'] > current_label:\n current_label = P.node[neigh]['OBDlabel']\n # we got maximum of current label or any node that neighbors have - now we label them all with that\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if P.node[neigh]['OBDlabel'] >= P.node[n]['OBDlabel']:\n heuristic1_label_OBD(neigh, P, current_label)\n else: # if set and smaller than mine, leave them alone\n pass\n else: # if not set, then not lower and not labelled\n heuristic1_label_OBD(neigh, P, current_label)\n\ndef produceOBDlist(P):\n \"\"\"expects pattern P which has OBDlabel set for all the nodes. OBDlist is created accoring to labels (some might be skipped! so this is taken into account)\"\"\"\n # first we'll get all OBD labels, so that we can see how many different ones are there...\n output = []\n OBDlabels = set() # set, so that we do not collect duplicate labels\n for n in P.nodes():\n OBDlabels.add(P.node[n]['OBDlabel'])\n OBDlabels = list(OBDlabels) # now we have a list of labels without duplicates\n OBDlabels.sort() # in-place sorting (OBDlabels is changed)\n for el in OBDlabels:\n innerlist = []\n for n in P.nodes():\n if P.node[n]['OBDlabel'] == el:\n innerlist.append(n)\n output.append(innerlist)\n return output\n\n\ndef get_heuristic1_OBD(P):\n heuristic1_label_OBD(P.nodes()[0], P, 1)\n obd = produceOBDlist(P)\n if ( OBDcorrect(P, obd) and connectedOBD(P, obd) ): \n return obd\n else:\n return None\n # result will be put into ['OBDlabel'] of nodes in P, so you have to create then the proper format...\n\n\n#------------------------------HEURISTIC 2--------------------------------\n\ndef heuristic2_label_OBD(n, P, label, critical=None):\n \"\"\"heuristic approach with backtracking\"\"\"\n print \"trying to label \" + str(n) + \" with \" + str(label)\n nodes_labeled = []\n if ('critical' in P.node[n].keys()) and (P.node[n]['critical']==True) and (P.node[n]['OBDlabel'] != label) :\n print \"FAIL on critical and not the same label.\"\n return (False, []) # being critical, we could avoid failure only if the label to set would be the same (it happens)\n else:\n P.node[n]['OBDlabel'] = label\n nodes_labeled.append(n) # this is a list that gets passed through recursions\n if critical == True:\n P.node[n]['critical'] = True\n # labeling part done\n flag_critical = False # if I will label more than one neighbor from now on, then the labels will be critical (not to be changed by others)\n new_label = label + 1\n neighbors = P.neighbors(n)\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if P.node[neigh]['OBDlabel'] > new_label:\n new_label = P.node[neigh]['OBDlabel']\n # we got maximum of current label or any node that neighbors have - now we label them all with that\n neighbors_to_label = []\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if (P.node[neigh]['OBDlabel'] >= P.node[n]['OBDlabel']) or (P.node[neigh]['OBDlabel'] == None): # now they can have it, but set to None (because of removal in failers)\n neighbors_to_label.append(neigh)\n else: # if set and smaller than mine, leave them alone\n pass\n else: # if not set, then not lower and not labelled\n neighbors_to_label.append(neigh)\n # now we have all the neighbors that need to be labeled\n if len(neighbors_to_label) > 1:\n flag_critical = True\n # and now the recursive step - labeling all these nodes\n permutations = itertools.permutations(neighbors_to_label) # iterator : gets exhausted as we access elements\n for perm in permutations:\n print \"trying perm: \" + str(perm)\n this_run_success = True\n this_run_labeled = []\n for el in perm:\n (s, nl) = heuristic2_label_OBD(el, P, new_label, flag_critical)\n this_run_labeled = this_run_labeled + nl\n if s == False:\n this_run_success = False\n break\n if this_run_success == False:\n # then unlabel all that were labelled up to now\n for nn in this_run_labeled:\n print \"removing label of \" + str(nn)\n P.node[nn]['OBDlabel'] = None\n P.node[nn]['critical'] = False\n else: # obviously success is True, we managed to label all others...\n nodes_labeled = nodes_labeled + this_run_labeled\n print \"Win in labeling neighbors of \" + str(n)\n return (True, nodes_labeled)\n break\n # if no permutation is successful, we end up returning the last line\n return (False, nodes_labeled)\n print \"FAIL of all permutations from \" + str(n)\n\n\n\n\ndef get_heuristic2_OBD(P):\n heuristic2_label_OBD(P.nodes()[0], P, 1)\n\n\n#------------------------------HEURISTIC 2B--------------------------------\n\ndef heuristic2B_label_OBD(n, P, label, critical=None):\n \"\"\"heuristic approach with backtracking\"\"\"\n nodes_labeled = []\n\n flag_critical = False # if I will label more than one neighbor from now on, then the labels will be critical (not to be changed by others)\n new_label = label + 1\n \n neighbors = P.neighbors(n)\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys(): # if it has a label\n if P.node[neigh]['OBDlabel'] > new_label: # and it is higher than what I would use for labeling\n new_label = P.node[neigh]['OBDlabel']\n # we got maximum of current label or any node that neighbors have - now we label them all with that\n \n neighbors_to_label = []\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if (P.node[neigh]['OBDlabel'] >= P.node[n]['OBDlabel']) or (P.node[neigh]['OBDlabel'] == None): # now they can have it, but set to None (because of removal in failers)\n neighbors_to_label.append(neigh)\n else: # if set and smaller than mine, leave them alone\n pass\n else: # if not set, then not lower and not labelled\n neighbors_to_label.append(neigh)\n # now we have all the neighbors that need to be labeled\n \n if len(neighbors_to_label) > 1:\n flag_critical = True\n # and now labeling all these nodes\n \n for neigh in neighbors_to_label:\n if ('critical' in P.node[neigh].keys()) and (P.node[neigh]['critical']==True) and (P.node[neigh]['OBDlabel'] != new_label) :\n return (False, nodes_labeled) # being critical, we could avoid failure only if the label to set would be the same (it happens)\n else:\n P.node[neigh]['OBDlabel'] = new_label\n nodes_labeled.append(neigh) # this is a list that gets passed through recursions\n if flag_critical == True:\n P.node[neigh]['critical'] = True\n # labeling part done\n \n # and now recursive step - going into each neighbor to continue, in any order if necessary\n permutations = itertools.permutations(neighbors_to_label) # iterator : gets exhausted as we access elements\n for perm in permutations:\n this_run_success = True\n this_run_labeled = []\n for el in perm:\n (s, nl) = heuristic2B_label_OBD(el, P, new_label, flag_critical)\n this_run_labeled = this_run_labeled + nl\n if s == False:\n this_run_success = False\n if this_run_success == False:\n # then unlabel all that were labelled up to now\n for nn in this_run_labeled:\n P.node[nn]['OBDlabel'] = None\n P.node[nn]['critical'] = False\n else: # obviously success is True, we managed to label all others...\n nodes_labeled = nodes_labeled + this_run_labeled\n return (True, nodes_labeled)\n break\n # if no permutation is successful, we end up returning the last line\n return (False, nodes_labeled)\n\n\ndef get_heuristic2B_OBD(P):\n # in this version we label the root before recursion\n for n in P.nodes():\n root = n\n P.node[root]['OBDlabel'] = 1\n (success, result) = heuristic2B_label_OBD(root, P, 1)\n if success:\n obd = produceOBDlist(P)\n if ( OBDcorrect(P, obd) and connectedOBD(P, obd) ): \n return obd\n else:\n for no in P.nodes():\n P.node[no]['OBDlabel'] = None\n P.node[no]['critical'] = False\n else: # in case of failure of all attempts with this node as a root - we have to clean up all flags and labels before the new root is tried\n for nn in P.nodes():\n P.node[nn]['OBDlabel'] = None\n P.node[nn]['critical'] = False\n # if we did not return any solution before, then None was found\n return None\n\n\n\n#----------------------------------------------------------------------------------\n#------------------------------exhaustive 2--------------------------------\n\ndef any_neighbors(nodelist, G):\n \"\"\"If any two nodes in the nodelist are neighbors in graph G, it outputs TRUE, otherwise FALSE.\"\"\"\n outcome = False\n #neighbors = P.neighbors(n)\n for i in range(len(nodelist)):\n for j in range(i+1, len(nodelist)):\n if G.has_edge(nodelist[i], nodelist[j]) or G.has_edge(nodelist[j], nodelist[i]):\n ##if nodelist[j] in G.neighbors(nodelist[i]):\n outcome = True\n return outcome\n return outcome\n\n\n\ndef getOBD2(P):\n result = None\n found = False\n IDs = []\n for n in P.nodes():\n IDs.append(P.node[n]['id'])\n # we will try with largest possible decomposition size and then go lower, if nothing is found\n decomp_size = len(IDs)\n while decomp_size > 0:\n # now we go over all possible permutations of IDs\n permutations = itertools.permutations(IDs) # this has to be recreated each time we go over it again\n for perm in permutations:\n splits = split_list(list(perm), decomp_size)\n for s in splits:\n # now this is our candidate OBD\n # -------speedup A: checking for neighbors in elements of split\n noneighbors = True\n for nodelist in s:\n if len(nodelist)>1:\n if any_neighbors(nodelist, P):\n noneighbors = False\n # -------\n if noneighbors and OBDcorrect(P, s) and connectedOBD(P, s): # connectedOBD is additional condition because of our depth-first approach\n result = s\n found = True\n if found == True: break;\n if found == True: break;\n if found == True: break;\n decomp_size = decomp_size -1\n if found == False:\n result = None\n return result\n#----------------------------------------------------------------------------------\n\n#------------------------------exhaustive 3--------------------------------\n\ndef size_degree_check(obd, P):\n \"\"\"for every node in OBD calculates its [degree(n) - linksToNodesAlreadyInOBD]\n and verifies whether in the remaining part of OBD there is an element of at least that size (all bigger must have equal label)\"\"\"\n outcome = True\n flatOBD = [item for sublist in obd for item in sublist] # we get a flat list from a list of lists\n seen = []\n for i in range(len(flatOBD)):\n n = flatOBD[i]\n linksback = 0\n for el in seen:\n if P.has_edge(el, n) or P.has_edge(n, el):\n linksback = linksback + 1\n out_degree = P.degree(n) - linksback\n # now verify whether we have such strength in the rest of obd\n targetElement = None\n for elobd in obd:\n if n in elobd:\n targetElement = elobd\n # we now in which element is n - now check from here on\n remaining_obd = obd[obd.index(targetElement)+1:]\n sizes = [len(x) for x in remaining_obd]\n if (len(sizes)>0) and (max(sizes) < out_degree):\n outcome = False\n return outcome\n seen.append(n)\n return outcome\n\n\ndef getOBD3(P):\n result = None\n found = False\n max_degree = max(list(P.degree().values()))\n IDs = []\n for n in P.nodes():\n IDs.append(P.node[n]['id'])\n # we will try with largest possible decomposition size and then go lower, if nothing is found\n decomp_size = len(IDs)\n while decomp_size > 0:\n # now we go over all possible permutations of IDs\n permutations = itertools.permutations(IDs) # this has to be recreated each time we go over it again\n for perm in permutations:\n splits = split_list(list(perm), decomp_size)\n for s in splits:\n # now this is our candidate OBD\n # -------speedup B: checking sizes of decomposition elements against out-degrees\n sizeCheck = size_degree_check(s, P)\n # -------\n if sizeCheck and OBDcorrect(P, s) and connectedOBD(P, s): # connectedOBD is additional condition because of our depth-first approach\n result = s\n found = True\n if found == True: break;\n if found == True: break;\n if found == True: break;\n decomp_size = decomp_size -1\n if found == False:\n result = None\n return result\n#----------------------------------------------------------------------------------\n#------------------------------exhaustive 4--------------------------------\n\ndef any_triangles(G):\n \"\"\"checks and outputs (True, False) whether there are any triangles in graph G\"\"\"\n for x in G.nodes():\n for y in G.nodes():\n for z in G.nodes():\n if (x != y) and (x !=z) and (y!=z):\n if (G.has_edge(x, y) or G.has_edge(y, x)) and (G.has_edge(x, z) or G.has_edge(z, x)) and (G.has_edge(z, y) or G.has_edge(y, z)):\n return True\n # if all triplets were checked and we did not find a triangle, then we can only return False\n return False\n\n\ndef getOBD4(P):\n if any_triangles(P):\n return None\n result = None\n found = False\n max_degree = max(list(P.degree().values()))\n IDs = []\n for n in P.nodes():\n IDs.append(P.node[n]['id'])\n # we will try with largest possible decomposition size and then go lower, if nothing is found\n decomp_size = len(IDs)\n while decomp_size > 0:\n # now we go over all possible permutations of IDs\n permutations = itertools.permutations(IDs) # this has to be recreated each time we go over it again\n for perm in permutations:\n splits = split_list(list(perm), decomp_size)\n for s in splits:\n # now this is our candidate OBD\n if OBDcorrect(P, s) and connectedOBD(P, s): # connectedOBD is additional condition because of our depth-first approach\n result = s\n found = True\n if found == True: break;\n if found == True: break;\n if found == True: break;\n decomp_size = decomp_size -1\n if found == False:\n result = None\n return result\n#----------------------------------------------------------------------------------\n\n#------------------------------exhaustive 2plus4--------------------------\n\ndef getOBD2plus4(P):\n if any_triangles(P):\n return None \n result = None\n found = False\n IDs = []\n for n in P.nodes():\n IDs.append(P.node[n]['id'])\n # we will try with largest possible decomposition size and then go lower, if nothing is found\n decomp_size = len(IDs)\n while decomp_size > 0:\n # now we go over all possible permutations of IDs\n permutations = itertools.permutations(IDs) # this has to be recreated each time we go over it again\n for perm in permutations:\n splits = split_list(list(perm), decomp_size)\n for s in splits:\n # now this is our candidate OBD\n # -------speedup A: checking for neighbors in elements of split\n noneighbors = True\n for nodelist in s:\n if len(nodelist)>1:\n if any_neighbors(nodelist, P):\n noneighbors = False\n # -------\n if noneighbors and OBDcorrect(P, s) and connectedOBD(P, s): # connectedOBD is additional condition because of our depth-first approach\n result = s\n found = True\n if found == True: break;\n if found == True: break;\n if found == True: break;\n decomp_size = decomp_size -1\n if found == False:\n result = None\n return result\n#----------------------------------------------------------------------------------\n\n\n\n\n\n#------------------------------HEURISTIC 3--------------------------------\n\n\n\ndef to_graph(l):\n \"\"\" l is a list of lists\"\"\"\n G = nx.Graph()\n for part in l:\n # each sublist is a bunch of nodes\n G.add_nodes_from(part)\n # it also imlies a number of edges:\n G.add_edges_from(to_edges(part))\n return G\n\ndef to_edges(l):\n \"\"\" \n treat `l` as a Graph and returns it's edges \n to_edges(['a','b','c','d']) -> [(a,b), (b,c),(c,d)]\n \"\"\"\n it = iter(l)\n last = next(it)\n\n for current in it:\n yield last, current\n last = current \n\n#G = to_graph(l)\n#print connected_components(G)\n\ndef partitions(set_):\n if not set_:\n yield []\n return\n for i in xrange(2**len(set_)/2):\n parts = [set(), set()]\n for item in set_:\n parts[i&1].add(item)\n i >>= 1\n for b in partitions(parts[1]):\n yield [parts[0]]+b\n\n#for p in partitions([\"a\", \"b\", \"c\", \"d\"]):\n#print p\n\n\ndef h3_step(d, P, label):\n## print \"started with decomp element %s\" % str(d)\n # trenutna dekompozicija d na P, hocem celotno od tu dalje\n # d is a list like [2, 3]\n # first we check if d has any neighbors:\n if any_neighbors(d, P):\n## print \"Fail because neighbors detected in %s\" % str(d)\n return (False, [])\n else:\n #---now lets get the situation\n labeledOnes = []\n for n in d:\n if (('OBDlabel' in P.node[n].keys()) and (P.node[n]['OBDlabel'] != None)):\n labeledOnes.append(n)\n if len(labeledOnes) == len(d):\n return (True, []) # was done already from some other decomp. element\n elif ((len(labeledOnes) < len(d)) and (len(labeledOnes) > 0)): # so, if some are labeled, but not all\n return (False, [])\n else: # none are labeled\n for n in d:\n P.node[n]['OBDlabel'] = label\n new_label = label + 1\n all_labeled = d\n output = [d]\n neighbors_to_d = [] # this will be a list of lists, for each element e in d it will hold e's neighbors that are not labeled yet\n for el in d:\n neighbors_to_d.append([x for x in P.neighbors(el) if (('OBDlabel' not in P.node[x].keys()) or (P.node[x]['OBDlabel']==None) or (P.node[x]['OBDlabel']>=P.node[el]['OBDlabel'])) ])\n if neighbors_to_d == []:\n## print \"Success, because no more unlabeled neighbors for %s\" % str(d)\n return (True, [d])\n #now we'll merge them according to connected components\n tempG = to_graph(neighbors_to_d)\n components = nx.connected_components(tempG)\n # components contains all groups of nodes that can have different decomposition labels, at least according to local information\n # we try with the most defragmented components, and then merge them (PARTITIONING) if it fails in later steps\n # when all partitions are exhausted, we report failure back\n indices = set(range(len(components))) # set of indices will be partitioned\n## print \"components: %s\" % str(components)\n## print \"indices: %s\" % str(indices)\n for partits in partitions(indices):\n for par in itertools.permutations(partits):\n # par is one partition of indeces, like: [ set([0]) , set([1]) , set([2]) ] or [ [0], [1,2] ] that correspond to e.g. [ [1], [2,3,4] ]\n## print \"trying par: %s\" % str(par)\n this_try = True # all decomposition elements in partition have to succeed\n all_decomps = []\n this_try_labeled = []\n for d_next_inds in par:\n d_next_inds = list(d_next_inds) # we make a list back from a set\n # now we have to merge the components with these indices into a decomposition element candidate\n d_next = []\n for i in d_next_inds:\n d_next = d_next + components[i]\n # d_next is now the new candidate partition class\n## print \"and trying the next decomp candidate in next recursive step: %s\" % str(d_next)\n (success, partial_decomp) = h3_step(d_next, P, new_label)\n if success == True:\n all_decomps = all_decomps + partial_decomp\n this_try_labeled = this_try_labeled + partial_decomp\n # XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX problem: several possible solutions and not all elements are duplicates!!!\n else:\n this_try = False\n if this_try == True: # obviously this partition was OK in recursions\n output = output + all_decomps\n## print \"Success in recursion below. Outputting %s\" % str(output)\n return (True, output)\n else:\n for alist in this_try_labeled:\n for nodeid in alist:\n P.node[nodeid]['OBDlabel'] = None\n # if we came to here it means all partitions of indices of components were exhausted without solution\n## print \"Fail because all options exhausted\"\n return (False, output)\n\ndef get_heuristic3_OBD(P):\n #\n for n in P.nodes():\n root = n\n (success, result) = h3_step([root], P, 1)\n if success:\n #----might have duplicates, so we'll remove them\n nice_result = []\n for el in result:\n if el not in nice_result:\n nice_result.append(el)\n## print \"as success we get OBD: %s\" % str(nice_result)\n if ( OBDcorrect(P, nice_result) and connectedOBD(P, nice_result) ): \n return nice_result\n else:\n pass\n## print \"The produced OBD was either not correct or not connected\"\n## print \"----------------------------------\"\n #----cleaning after this root node was not successful\n for nn in P.nodes():\n if ('OBDlabel' in P.node[nn].keys()):\n P.node[nn]['OBDlabel'] = None\n #-----------------\n # if we did not return any solution before, then None was found\n return None\n\n\n\n\n#----------------------------------------------------------------------------------\n\n#------------HEURISTIC 4 ---------------------------------------------\n\ndef get_components(partOBD, P):\n flat_partialOBD = [item for sublist in partOBD for item in sublist] # we get a flat list from a list of lists\n #\n meta_neighbors = [] # this will contain all contents of neighbors_to_d for all d-s\n for d in partOBD:\n neighbors_to_d = [] # this will be a list of lists, for each element e in d it will hold e's neighbors that are not labeled yet\n for el in d:\n neighbors_to_d.append([x for x in P.neighbors(el) if (x not in flat_partialOBD)])\n meta_neighbors = meta_neighbors + neighbors_to_d\n #now we'll merge them according to connected components\n tempG = to_graph(meta_neighbors)\n components = nx.connected_components(tempG)\n return components\n \n\n\ndef labelon(partialOBD, P):\n## print \"came into labelon() with partialOBD: %s\" % str(partialOBD)\n flat_partialOBD = [item for sublist in partialOBD for item in sublist] # we get a flat list from a list of lists\n if len(flat_partialOBD) == len(P.nodes()): # check for the end of recursion\n## print \"and YES, we are at recursion end\"\n if ( OBDcorrect(P, partialOBD) and connectedOBD(P, partialOBD) ):\n## print \"and even correct and connected - FINISH.\"\n return partialOBD\n else:\n## print \"but not correct OBD or not connected\"\n return None\n else: # else: get all candidates to continue (next connected components) and try on all of them\n components = list(get_components(partialOBD, P))\n # now to partialOBD we add each component separately, but also each possible merging of these components, including full merge\n candidates = [] # this will hold all such candidates, each candidate is a list of vertices\n for L in range(1, len(components)+1):\n for subset in itertools.combinations(components, L):\n cand = subset # but this is a list of lists - we have to flatten it\n candFlat = [x for sub in cand for x in sub]\n candidates.append(candFlat)\n for c in candidates:\n new_partial_OBD = partialOBD + [c]\n## print \"starting recursive call with new_partialOBD: %s\" % str(new_partial_OBD)\n result = labelon(new_partial_OBD, P)\n## print \"back from recursion call for new_partialOBD: %s\" % str(new_partial_OBD)\n## print \"and result is: %s\" % str(result)\n if result != None:\n return result\n # if I came here without returning something , then nothing was found below me\n return None\n \n\ndef get_heuristic4_OBD(P, startNode = None):\n #\n if startNode == None:\n for n in P.nodes():\n ## print \"starting with node %s\" % str(n)\n result = labelon([[n]], P)\n if result != None:\n return result\n return None\n else:\n result = labelon([[startNode]], P)\n if result != None:\n return result\n return None\n\n\n\n\n####pattern_file_name = \"pattern1.gml\"\n##pattern_file_name = \"graph6c_15.gml\"\n## ./problemAnalysis/graph8c_random_663.gml\n####P = nx.read_gml(pattern_file_name)\n####print \"reading done.\"\n\n#pattern_file_name = \"./graphs/7c/graph7c_104.gml\"; P = nx.read_gml(pattern_file_name); get_heuristic3_OBD(P)\n\n# OBdecomp = [ [0], [1] , [2, 3], [4], [5] ]\n\n##start = time.time()\n##res = get_heuristic1_OBD(P)\n##stop = time.time()\n##\n##print res\n##print \"Calculation took %.2f seconds.\" % (stop-start)\n\n\n# call with: > python OBDsearch.py patternX.gml [resultfile.obd] [computer_name]\n##if __name__==\"__main__\":\n## import sys\n## pattern_file_name = sys.argv[1]\n## result_file_name = None\n## computer_name = None\n## if len(sys.argv)>2:\n## result_file_name = sys.argv[2]\n## if len(sys.argv)>3:\n## computer_name = sys.argv[3]\n## P = nx.read_gml(pattern_file_name)\n## start = time.time()\n## obd = getOBD(P)\n## stop = time.time()\n## if obd != None:\n## print obd\n## else:\n## print \"None, OBD not found.\"\n## if result_file_name != None:\n## resultfile = open(result_file_name, 'w')\n## resultfile.write(str(obd)); resultfile.write('\\n')\n## if computer_name !=None:\n## resultfile.write(\"Finding OBD took %.2f seconds on %s.\" % (stop-start, computer_name))\n## else:\n## resultfile.write(\"Finding OBD took %.2f seconds.\" % (stop-start))\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding:utf-8 -*-
# author:Kyseng
# file: cRandomString.py
# time: 2018/11/8 11:41 PM
# functhion:
import random
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class cRandomString():
@staticmethod
def RandomTitle(name):
# name = name.decode('utf8')
# print name
platform = ["PS4", "XBOX", "PC", "NS", "IOS"]
random.shuffle(platform)
platform = "/".join(platform)
firstWord = ['Cool', 'Hot', 'New', '2018', 'Gift', '*Cool*', '*Hot*', '*New*', '$Cool$', '$Hot$', '$New$']
firstWord = random.choice(firstWord)
title = firstWord + ' 🤑 FREE Fortnite XXXX SKIN ' + platform
title = title.replace('XXXX', name)
return title
@staticmethod
def RandomDescription(name):
platform = ["PS4", "Xbox One", "PC", "Nintendo Switch", "IOS"]
random.shuffle(platform)
platform = ", ".join(platform)
description_temp = "Hey Guys!\n\nIn today's video I will show you how to get the XXXX skin for free in fortnite!\n\nThis is working on xbox, ps4, ios, pc and nintendo switch!\n\nThis method is 100% free and working as of 2018.\n\nThis is the best way to get a fortnite XXXX skin for free key code! \n\nThis is a working and legal method!\n\nHow To Get FREE SKINS In Fortnite: Battle Royale! [{0}]".format(platform)
description_final = description_temp.replace('XXXX', name)
return description_final
@staticmethod
def RandomTag(name):
tag_temp = "XXXX, XXXX fortnite, XXXX free, XXXX skin,fortnite XXXX skin free, how to get the XXXX skin, iPhone XXXX free skins, iPad XXXX free skins"
tag_final = tag_temp.replace('XXXX', name)
return tag_final
if __name__ == "__main__":
cRandomString.RandomDescription("123")
|
normal
|
{
"blob_id": "ed02cbf3ebef307d6209004e1e388312bfda0b50",
"index": 2027,
"step-1": "<mask token>\n\n\nclass cRandomString:\n\n @staticmethod\n def RandomTitle(name):\n platform = ['PS4', 'XBOX', 'PC', 'NS', 'IOS']\n random.shuffle(platform)\n platform = '/'.join(platform)\n firstWord = ['Cool', 'Hot', 'New', '2018', 'Gift', '*Cool*',\n '*Hot*', '*New*', '$Cool$', '$Hot$', '$New$']\n firstWord = random.choice(firstWord)\n title = firstWord + ' 🤑 FREE Fortnite XXXX SKIN ' + platform\n title = title.replace('XXXX', name)\n return title\n <mask token>\n\n @staticmethod\n def RandomTag(name):\n tag_temp = (\n 'XXXX, XXXX fortnite, XXXX free, XXXX skin,fortnite XXXX skin free, how to get the XXXX skin, iPhone XXXX free skins, iPad XXXX free skins'\n )\n tag_final = tag_temp.replace('XXXX', name)\n return tag_final\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass cRandomString:\n\n @staticmethod\n def RandomTitle(name):\n platform = ['PS4', 'XBOX', 'PC', 'NS', 'IOS']\n random.shuffle(platform)\n platform = '/'.join(platform)\n firstWord = ['Cool', 'Hot', 'New', '2018', 'Gift', '*Cool*',\n '*Hot*', '*New*', '$Cool$', '$Hot$', '$New$']\n firstWord = random.choice(firstWord)\n title = firstWord + ' 🤑 FREE Fortnite XXXX SKIN ' + platform\n title = title.replace('XXXX', name)\n return title\n\n @staticmethod\n def RandomDescription(name):\n platform = ['PS4', 'Xbox One', 'PC', 'Nintendo Switch', 'IOS']\n random.shuffle(platform)\n platform = ', '.join(platform)\n description_temp = (\n \"\"\"Hey Guys!\n\nIn today's video I will show you how to get the XXXX skin for free in fortnite!\n\nThis is working on xbox, ps4, ios, pc and nintendo switch!\n\nThis method is 100% free and working as of 2018.\n\nThis is the best way to get a fortnite XXXX skin for free key code! \n\nThis is a working and legal method!\n\nHow To Get FREE SKINS In Fortnite: Battle Royale! [{0}]\"\"\"\n .format(platform))\n description_final = description_temp.replace('XXXX', name)\n return description_final\n\n @staticmethod\n def RandomTag(name):\n tag_temp = (\n 'XXXX, XXXX fortnite, XXXX free, XXXX skin,fortnite XXXX skin free, how to get the XXXX skin, iPhone XXXX free skins, iPad XXXX free skins'\n )\n tag_final = tag_temp.replace('XXXX', name)\n return tag_final\n\n\n<mask token>\n",
"step-3": "<mask token>\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\nclass cRandomString:\n\n @staticmethod\n def RandomTitle(name):\n platform = ['PS4', 'XBOX', 'PC', 'NS', 'IOS']\n random.shuffle(platform)\n platform = '/'.join(platform)\n firstWord = ['Cool', 'Hot', 'New', '2018', 'Gift', '*Cool*',\n '*Hot*', '*New*', '$Cool$', '$Hot$', '$New$']\n firstWord = random.choice(firstWord)\n title = firstWord + ' 🤑 FREE Fortnite XXXX SKIN ' + platform\n title = title.replace('XXXX', name)\n return title\n\n @staticmethod\n def RandomDescription(name):\n platform = ['PS4', 'Xbox One', 'PC', 'Nintendo Switch', 'IOS']\n random.shuffle(platform)\n platform = ', '.join(platform)\n description_temp = (\n \"\"\"Hey Guys!\n\nIn today's video I will show you how to get the XXXX skin for free in fortnite!\n\nThis is working on xbox, ps4, ios, pc and nintendo switch!\n\nThis method is 100% free and working as of 2018.\n\nThis is the best way to get a fortnite XXXX skin for free key code! \n\nThis is a working and legal method!\n\nHow To Get FREE SKINS In Fortnite: Battle Royale! [{0}]\"\"\"\n .format(platform))\n description_final = description_temp.replace('XXXX', name)\n return description_final\n\n @staticmethod\n def RandomTag(name):\n tag_temp = (\n 'XXXX, XXXX fortnite, XXXX free, XXXX skin,fortnite XXXX skin free, how to get the XXXX skin, iPhone XXXX free skins, iPad XXXX free skins'\n )\n tag_final = tag_temp.replace('XXXX', name)\n return tag_final\n\n\nif __name__ == '__main__':\n cRandomString.RandomDescription('123')\n",
"step-4": "import random\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\nclass cRandomString:\n\n @staticmethod\n def RandomTitle(name):\n platform = ['PS4', 'XBOX', 'PC', 'NS', 'IOS']\n random.shuffle(platform)\n platform = '/'.join(platform)\n firstWord = ['Cool', 'Hot', 'New', '2018', 'Gift', '*Cool*',\n '*Hot*', '*New*', '$Cool$', '$Hot$', '$New$']\n firstWord = random.choice(firstWord)\n title = firstWord + ' 🤑 FREE Fortnite XXXX SKIN ' + platform\n title = title.replace('XXXX', name)\n return title\n\n @staticmethod\n def RandomDescription(name):\n platform = ['PS4', 'Xbox One', 'PC', 'Nintendo Switch', 'IOS']\n random.shuffle(platform)\n platform = ', '.join(platform)\n description_temp = (\n \"\"\"Hey Guys!\n\nIn today's video I will show you how to get the XXXX skin for free in fortnite!\n\nThis is working on xbox, ps4, ios, pc and nintendo switch!\n\nThis method is 100% free and working as of 2018.\n\nThis is the best way to get a fortnite XXXX skin for free key code! \n\nThis is a working and legal method!\n\nHow To Get FREE SKINS In Fortnite: Battle Royale! [{0}]\"\"\"\n .format(platform))\n description_final = description_temp.replace('XXXX', name)\n return description_final\n\n @staticmethod\n def RandomTag(name):\n tag_temp = (\n 'XXXX, XXXX fortnite, XXXX free, XXXX skin,fortnite XXXX skin free, how to get the XXXX skin, iPhone XXXX free skins, iPad XXXX free skins'\n )\n tag_final = tag_temp.replace('XXXX', name)\n return tag_final\n\n\nif __name__ == '__main__':\n cRandomString.RandomDescription('123')\n",
"step-5": "# -*- coding:utf-8 -*-\n# author:Kyseng\n# file: cRandomString.py\n# time: 2018/11/8 11:41 PM\n# functhion:\nimport random\nimport sys\n\nreload(sys)\n\nsys.setdefaultencoding('utf-8')\n\nclass cRandomString():\n @staticmethod\n def RandomTitle(name):\n # name = name.decode('utf8')\n # print name\n platform = [\"PS4\", \"XBOX\", \"PC\", \"NS\", \"IOS\"]\n random.shuffle(platform)\n platform = \"/\".join(platform)\n\n\n firstWord = ['Cool', 'Hot', 'New', '2018', 'Gift', '*Cool*', '*Hot*', '*New*', '$Cool$', '$Hot$', '$New$']\n firstWord = random.choice(firstWord)\n\n title = firstWord + ' 🤑 FREE Fortnite XXXX SKIN ' + platform\n\n title = title.replace('XXXX', name)\n\n return title\n\n @staticmethod\n def RandomDescription(name):\n platform = [\"PS4\", \"Xbox One\", \"PC\", \"Nintendo Switch\", \"IOS\"]\n random.shuffle(platform)\n platform = \", \".join(platform)\n\n description_temp = \"Hey Guys!\\n\\nIn today's video I will show you how to get the XXXX skin for free in fortnite!\\n\\nThis is working on xbox, ps4, ios, pc and nintendo switch!\\n\\nThis method is 100% free and working as of 2018.\\n\\nThis is the best way to get a fortnite XXXX skin for free key code! \\n\\nThis is a working and legal method!\\n\\nHow To Get FREE SKINS In Fortnite: Battle Royale! [{0}]\".format(platform)\n\n description_final = description_temp.replace('XXXX', name)\n\n return description_final\n\n @staticmethod\n def RandomTag(name):\n tag_temp = \"XXXX, XXXX fortnite, XXXX free, XXXX skin,fortnite XXXX skin free, how to get the XXXX skin, iPhone XXXX free skins, iPad XXXX free skins\"\n tag_final = tag_temp.replace('XXXX', name)\n\n return tag_final\n\nif __name__ == \"__main__\":\n cRandomString.RandomDescription(\"123\")",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import unittest
from common.ReqLogin import req
import os
import yaml
from common import util
from TestCase.runnerBase import TestInterfaceCase
import paramunittest
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
def getYam(homeyaml):
try:
with open(homeyaml, encoding='utf-8') as f:
x = yaml.load(f)
return x
except FileNotFoundError:
print(u"找不到文件")
x = getYam(PATH("./case_user_api.yml"))
class UserinfoTest(TestInterfaceCase):
def setUp(self):
login = req.reqData(req)
self.infoma = {}
self.response = ""
self.infoma["id"] = x["testinfo"][0]["id"]
self.infoma["module"] = x["testinfo"][0]["module"]
self.infoma["intr"] = x["testinfo"][0]["intr"]
def base_check(self):
baseCheck = x["basecheck"]
if self.response["c"] == baseCheck["c"] and self.response["m"] == baseCheck["m"]:
return True
else:
util.DATA["fail"] = util.DATA["fail"] + 1
self.infoma["result"] = "失败"
self.infoma["reason"] = "接口未正确返回"
return False
def detailCkeck_list(self,case):
if self.base_check() is True:
if "list" in self.response:
util.DATA["pass"] = util.DATA["pass"] + 1
self.infoma["result"] = "通过"
else:
util.DATA["fail"] = util.DATA["fail"] + 1
self.infoma["result"] = "失败"
self.infoma["reason"] = self.response["c"]
self.infoma["casename"] = case["casename"]
util.DATA["sum"] = util.DATA["sum"] + 1
util.INFO.append(self.infoma)
def detailCheck_id(self,case):
if self.base_check() is True:
if self.response["r"]["id"] == case["data"]["id"]:
util.DATA["pass"] = util.DATA["pass"] + 1
self.infoma["result"] = "通过"
else:
util.DATA["fail"] = util.DATA["fail"] + 1
self.infoma["result"] = "失败"
self.infoma["reason"] = "断言预期与实际不符"
self.infoma["casename"] = case["casename"]
util.DATA["sum"] = util.DATA["sum"] + 1
util.INFO.append(self.infoma)
'''正常测试'''
def test_user_info_conrrect(self):
case1 = x["userinfo"]["case1"]
self.response = Login.req(Login,case1["api"],case1["data"])
self.detailCheck_id(case1)
#
# '''异常测试--value字段长度不够'''
# def test_user_info_poorvalue(self):
# case2 = x["userinfo"]["case2"]
# self.response = Login.req(Login, case2["api"], case2["data"])
# if self.check1() is True:
# if self.response["r"]["id"] != case2["data"]["id"]:
# util.DATA["pass"] = util.DATA["pass"] + 1
# self.infoma["result"] = "通过"
# else:
# util.DATA["fail"] = util.DATA["fail"] + 1
# self.infoma["result"] = "失败"
# self.infoma["reason"] = "断言预期与实际不符"
# self.infoma["casename"] = case2["casename"]
# util.DATA["sum"] = util.DATA["sum"] + 1
# util.INFO.append(self.infoma)
# '''异常测试--接口所需参数为空'''
# def test_user_info_poorkey(self):
# case3 = x["userinfo"]["case3"]
# self.response = Login.req(Login,case3["api"],case3["data"])
# if self.check1() is False:
# if self.response["massage"] == case3["massage"]:
# util.DATA["pass"] = util.DATA["pass"] + 1
# self.infoma["result"] = "通过"
# else:
# util.DATA["fail"] = util.DATA["fail"] + 1
# self.infoma["result"] = "失败"
# self.infoma["reason"] = "断言预期与实际不符"
# self.infoma["casename"] = case3["casename"]
# util.DATA["sum"] = util.DATA["sum"] + 1
# util.INFO.append(self.infoma)
def test_user_item_conrrect(self):
case1 = x["useritems"]["case1"]
self.response = Login.req(Login, case1["api"], case1["data"])
self.detailCkeck_list(case1)
def test_user_projectboards(self):
case1 = x["userprojectboards"]["case1"]
self.response = Login.req(Login, case1["api"], case1["data"])
self.detailCkeck_list(case1)
def test_me_info(self):
case1 = x["me"]["case1"]
self.response = Login.req(Login, case1["api"], case1["data"])
self.base_check(case1)
def test_me_orders(self):
case1 = x["me"]["case2"]
self.response = Login.req(Login, case1["api"], case1["data"])
self.detailCkeck_list(case1)
def tearDown(self):
quit = Login.req(Login,'http://192.168.4.15:8001/api/0.2/account/signout',datas='')
if __name__ =='__main__':
suite = unittest.TestSuite()
# tests = ['test_user_info_conrrect','test_user_info_poorvalue','test_user_info_poorkey']
# suite.addTests(map(UserinfoTest,tests))
# suite.addTest(UserItemsTest("test_user_item_conrrect"))
filename = r'C:\Users\xp\Desktop\result.html'
fp = open(filename, 'wb')
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title=u'自动化测试报告',
description=u'注册- -自动化测试报告')
runner.run(suite)
|
normal
|
{
"blob_id": "aea196566bbbe9d37bf03b9b17a4062659a27bb6",
"index": 1446,
"step-1": "<mask token>\n\n\nclass UserinfoTest(TestInterfaceCase):\n\n def setUp(self):\n login = req.reqData(req)\n self.infoma = {}\n self.response = ''\n self.infoma['id'] = x['testinfo'][0]['id']\n self.infoma['module'] = x['testinfo'][0]['module']\n self.infoma['intr'] = x['testinfo'][0]['intr']\n\n def base_check(self):\n baseCheck = x['basecheck']\n if self.response['c'] == baseCheck['c'] and self.response['m'\n ] == baseCheck['m']:\n return True\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '接口未正确返回'\n return False\n\n def detailCkeck_list(self, case):\n if self.base_check() is True:\n if 'list' in self.response:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = self.response['c']\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n\n def detailCheck_id(self, case):\n if self.base_check() is True:\n if self.response['r']['id'] == case['data']['id']:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '断言预期与实际不符'\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n <mask token>\n\n def test_user_info_conrrect(self):\n case1 = x['userinfo']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCheck_id(case1)\n\n def test_user_item_conrrect(self):\n case1 = x['useritems']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def test_user_projectboards(self):\n case1 = x['userprojectboards']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n <mask token>\n\n def test_me_orders(self):\n case1 = x['me']['case2']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def tearDown(self):\n quit = Login.req(Login,\n 'http://192.168.4.15:8001/api/0.2/account/signout', datas='')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserinfoTest(TestInterfaceCase):\n\n def setUp(self):\n login = req.reqData(req)\n self.infoma = {}\n self.response = ''\n self.infoma['id'] = x['testinfo'][0]['id']\n self.infoma['module'] = x['testinfo'][0]['module']\n self.infoma['intr'] = x['testinfo'][0]['intr']\n\n def base_check(self):\n baseCheck = x['basecheck']\n if self.response['c'] == baseCheck['c'] and self.response['m'\n ] == baseCheck['m']:\n return True\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '接口未正确返回'\n return False\n\n def detailCkeck_list(self, case):\n if self.base_check() is True:\n if 'list' in self.response:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = self.response['c']\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n\n def detailCheck_id(self, case):\n if self.base_check() is True:\n if self.response['r']['id'] == case['data']['id']:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '断言预期与实际不符'\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n <mask token>\n\n def test_user_info_conrrect(self):\n case1 = x['userinfo']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCheck_id(case1)\n\n def test_user_item_conrrect(self):\n case1 = x['useritems']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def test_user_projectboards(self):\n case1 = x['userprojectboards']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def test_me_info(self):\n case1 = x['me']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.base_check(case1)\n\n def test_me_orders(self):\n case1 = x['me']['case2']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def tearDown(self):\n quit = Login.req(Login,\n 'http://192.168.4.15:8001/api/0.2/account/signout', datas='')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getYam(homeyaml):\n try:\n with open(homeyaml, encoding='utf-8') as f:\n x = yaml.load(f)\n return x\n except FileNotFoundError:\n print(u'找不到文件')\n\n\n<mask token>\n\n\nclass UserinfoTest(TestInterfaceCase):\n\n def setUp(self):\n login = req.reqData(req)\n self.infoma = {}\n self.response = ''\n self.infoma['id'] = x['testinfo'][0]['id']\n self.infoma['module'] = x['testinfo'][0]['module']\n self.infoma['intr'] = x['testinfo'][0]['intr']\n\n def base_check(self):\n baseCheck = x['basecheck']\n if self.response['c'] == baseCheck['c'] and self.response['m'\n ] == baseCheck['m']:\n return True\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '接口未正确返回'\n return False\n\n def detailCkeck_list(self, case):\n if self.base_check() is True:\n if 'list' in self.response:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = self.response['c']\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n\n def detailCheck_id(self, case):\n if self.base_check() is True:\n if self.response['r']['id'] == case['data']['id']:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '断言预期与实际不符'\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n \"\"\"正常测试\"\"\"\n\n def test_user_info_conrrect(self):\n case1 = x['userinfo']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCheck_id(case1)\n\n def test_user_item_conrrect(self):\n case1 = x['useritems']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def test_user_projectboards(self):\n case1 = x['userprojectboards']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def test_me_info(self):\n case1 = x['me']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.base_check(case1)\n\n def test_me_orders(self):\n case1 = x['me']['case2']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def tearDown(self):\n quit = Login.req(Login,\n 'http://192.168.4.15:8001/api/0.2/account/signout', datas='')\n\n\n<mask token>\n",
"step-4": "<mask token>\nPATH = lambda p: os.path.abspath(os.path.join(os.path.dirname(__file__), p))\n\n\ndef getYam(homeyaml):\n try:\n with open(homeyaml, encoding='utf-8') as f:\n x = yaml.load(f)\n return x\n except FileNotFoundError:\n print(u'找不到文件')\n\n\nx = getYam(PATH('./case_user_api.yml'))\n\n\nclass UserinfoTest(TestInterfaceCase):\n\n def setUp(self):\n login = req.reqData(req)\n self.infoma = {}\n self.response = ''\n self.infoma['id'] = x['testinfo'][0]['id']\n self.infoma['module'] = x['testinfo'][0]['module']\n self.infoma['intr'] = x['testinfo'][0]['intr']\n\n def base_check(self):\n baseCheck = x['basecheck']\n if self.response['c'] == baseCheck['c'] and self.response['m'\n ] == baseCheck['m']:\n return True\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '接口未正确返回'\n return False\n\n def detailCkeck_list(self, case):\n if self.base_check() is True:\n if 'list' in self.response:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = self.response['c']\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n\n def detailCheck_id(self, case):\n if self.base_check() is True:\n if self.response['r']['id'] == case['data']['id']:\n util.DATA['pass'] = util.DATA['pass'] + 1\n self.infoma['result'] = '通过'\n else:\n util.DATA['fail'] = util.DATA['fail'] + 1\n self.infoma['result'] = '失败'\n self.infoma['reason'] = '断言预期与实际不符'\n self.infoma['casename'] = case['casename']\n util.DATA['sum'] = util.DATA['sum'] + 1\n util.INFO.append(self.infoma)\n \"\"\"正常测试\"\"\"\n\n def test_user_info_conrrect(self):\n case1 = x['userinfo']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCheck_id(case1)\n\n def test_user_item_conrrect(self):\n case1 = x['useritems']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def test_user_projectboards(self):\n case1 = x['userprojectboards']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def test_me_info(self):\n case1 = x['me']['case1']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.base_check(case1)\n\n def test_me_orders(self):\n case1 = x['me']['case2']\n self.response = Login.req(Login, case1['api'], case1['data'])\n self.detailCkeck_list(case1)\n\n def tearDown(self):\n quit = Login.req(Login,\n 'http://192.168.4.15:8001/api/0.2/account/signout', datas='')\n\n\nif __name__ == '__main__':\n suite = unittest.TestSuite()\n filename = 'C:\\\\Users\\\\xp\\\\Desktop\\\\result.html'\n fp = open(filename, 'wb')\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=u'自动化测试报告',\n description=u'注册- -自动化测试报告')\n runner.run(suite)\n",
"step-5": "import unittest\nfrom common.ReqLogin import req\nimport os\nimport yaml\nfrom common import util\nfrom TestCase.runnerBase import TestInterfaceCase\nimport paramunittest\n\nPATH = lambda p: os.path.abspath(\n os.path.join(os.path.dirname(__file__), p)\n)\ndef getYam(homeyaml):\n try:\n with open(homeyaml, encoding='utf-8') as f:\n x = yaml.load(f)\n return x\n except FileNotFoundError:\n print(u\"找不到文件\")\nx = getYam(PATH(\"./case_user_api.yml\"))\n\nclass UserinfoTest(TestInterfaceCase):\n def setUp(self):\n login = req.reqData(req)\n self.infoma = {}\n self.response = \"\"\n self.infoma[\"id\"] = x[\"testinfo\"][0][\"id\"]\n self.infoma[\"module\"] = x[\"testinfo\"][0][\"module\"]\n self.infoma[\"intr\"] = x[\"testinfo\"][0][\"intr\"]\n\n def base_check(self):\n baseCheck = x[\"basecheck\"]\n if self.response[\"c\"] == baseCheck[\"c\"] and self.response[\"m\"] == baseCheck[\"m\"]:\n return True\n else:\n util.DATA[\"fail\"] = util.DATA[\"fail\"] + 1\n self.infoma[\"result\"] = \"失败\"\n self.infoma[\"reason\"] = \"接口未正确返回\"\n return False\n\n def detailCkeck_list(self,case):\n if self.base_check() is True:\n if \"list\" in self.response:\n util.DATA[\"pass\"] = util.DATA[\"pass\"] + 1\n self.infoma[\"result\"] = \"通过\"\n else:\n util.DATA[\"fail\"] = util.DATA[\"fail\"] + 1\n self.infoma[\"result\"] = \"失败\"\n self.infoma[\"reason\"] = self.response[\"c\"]\n self.infoma[\"casename\"] = case[\"casename\"]\n util.DATA[\"sum\"] = util.DATA[\"sum\"] + 1\n util.INFO.append(self.infoma)\n\n def detailCheck_id(self,case):\n if self.base_check() is True:\n if self.response[\"r\"][\"id\"] == case[\"data\"][\"id\"]:\n util.DATA[\"pass\"] = util.DATA[\"pass\"] + 1\n self.infoma[\"result\"] = \"通过\"\n else:\n util.DATA[\"fail\"] = util.DATA[\"fail\"] + 1\n self.infoma[\"result\"] = \"失败\"\n self.infoma[\"reason\"] = \"断言预期与实际不符\"\n self.infoma[\"casename\"] = case[\"casename\"]\n util.DATA[\"sum\"] = util.DATA[\"sum\"] + 1\n util.INFO.append(self.infoma)\n\n\n '''正常测试'''\n def test_user_info_conrrect(self):\n case1 = x[\"userinfo\"][\"case1\"]\n self.response = Login.req(Login,case1[\"api\"],case1[\"data\"])\n self.detailCheck_id(case1)\n #\n # '''异常测试--value字段长度不够'''\n # def test_user_info_poorvalue(self):\n # case2 = x[\"userinfo\"][\"case2\"]\n # self.response = Login.req(Login, case2[\"api\"], case2[\"data\"])\n # if self.check1() is True:\n # if self.response[\"r\"][\"id\"] != case2[\"data\"][\"id\"]:\n # util.DATA[\"pass\"] = util.DATA[\"pass\"] + 1\n # self.infoma[\"result\"] = \"通过\"\n # else:\n # util.DATA[\"fail\"] = util.DATA[\"fail\"] + 1\n # self.infoma[\"result\"] = \"失败\"\n # self.infoma[\"reason\"] = \"断言预期与实际不符\"\n # self.infoma[\"casename\"] = case2[\"casename\"]\n # util.DATA[\"sum\"] = util.DATA[\"sum\"] + 1\n # util.INFO.append(self.infoma)\n # '''异常测试--接口所需参数为空'''\n # def test_user_info_poorkey(self):\n # case3 = x[\"userinfo\"][\"case3\"]\n # self.response = Login.req(Login,case3[\"api\"],case3[\"data\"])\n # if self.check1() is False:\n # if self.response[\"massage\"] == case3[\"massage\"]:\n # util.DATA[\"pass\"] = util.DATA[\"pass\"] + 1\n # self.infoma[\"result\"] = \"通过\"\n # else:\n # util.DATA[\"fail\"] = util.DATA[\"fail\"] + 1\n # self.infoma[\"result\"] = \"失败\"\n # self.infoma[\"reason\"] = \"断言预期与实际不符\"\n # self.infoma[\"casename\"] = case3[\"casename\"]\n # util.DATA[\"sum\"] = util.DATA[\"sum\"] + 1\n # util.INFO.append(self.infoma)\n\n def test_user_item_conrrect(self):\n case1 = x[\"useritems\"][\"case1\"]\n self.response = Login.req(Login, case1[\"api\"], case1[\"data\"])\n self.detailCkeck_list(case1)\n\n def test_user_projectboards(self):\n case1 = x[\"userprojectboards\"][\"case1\"]\n self.response = Login.req(Login, case1[\"api\"], case1[\"data\"])\n self.detailCkeck_list(case1)\n def test_me_info(self):\n case1 = x[\"me\"][\"case1\"]\n self.response = Login.req(Login, case1[\"api\"], case1[\"data\"])\n self.base_check(case1)\n def test_me_orders(self):\n case1 = x[\"me\"][\"case2\"]\n self.response = Login.req(Login, case1[\"api\"], case1[\"data\"])\n self.detailCkeck_list(case1)\n\n def tearDown(self):\n quit = Login.req(Login,'http://192.168.4.15:8001/api/0.2/account/signout',datas='')\n\nif __name__ =='__main__':\n suite = unittest.TestSuite()\n # tests = ['test_user_info_conrrect','test_user_info_poorvalue','test_user_info_poorkey']\n # suite.addTests(map(UserinfoTest,tests))\n # suite.addTest(UserItemsTest(\"test_user_item_conrrect\"))\n\n filename = r'C:\\Users\\xp\\Desktop\\result.html'\n fp = open(filename, 'wb')\n runner = HTMLTestRunner.HTMLTestRunner(\n stream=fp,\n title=u'自动化测试报告',\n description=u'注册- -自动化测试报告')\n runner.run(suite)\n",
"step-ids": [
10,
11,
13,
15,
17
]
}
|
[
10,
11,
13,
15,
17
] |
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from PIL import Image
from scipy.misc import imsave, imread
def plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error, test_error,filename):
plt.style.use('bmh')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_acc, 'r', epochs,test_acc, 'g')
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train_acc', 'test_acc'], loc='upper left')
fig.savefig(filename + '_accuracy.png')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_loss, 'r', epochs,test_loss, 'g')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_loss', 'test_loss'], loc='upper left')
fig.savefig(filename + '_loss.png')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_error, 'r', epochs,test_error, 'g')
plt.title('model error rate')
plt.ylabel('error rate')
plt.xlabel('epoch')
plt.legend(['train_error', 'test_error'], loc='upper left')
fig.savefig(filename + '_error.png')
plt.close('all')
def write_csv(filename, train_acc,test_acc,train_loss,test_loss,train_error,test_error,epoch):
if epoch==0:
with open(filename, 'w') as f:
f.write('train_acc,test_acc,train_loss, test_loss, train_error, test_error\n')
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],\
test_acc[-1],\
train_loss[-1],\
test_loss[-1],\
train_error[-1],\
test_error[-1]))
else:
with open(filename, 'a') as f:
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],\
test_acc[-1],\
train_loss[-1],\
test_loss[-1],\
train_error[-1],\
test_error[-1]))
|
normal
|
{
"blob_id": "93150eb1c6746e2b1967eb5305fa526ae36968fd",
"index": 2003,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef write_csv(filename, train_acc, test_acc, train_loss, test_loss,\n train_error, test_error, epoch):\n if epoch == 0:\n with open(filename, 'w') as f:\n f.write(\n 'train_acc,test_acc,train_loss, test_loss, train_error, test_error\\n'\n )\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n else:\n with open(filename, 'a') as f:\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n",
"step-3": "<mask token>\n\n\ndef plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error,\n test_error, filename):\n plt.style.use('bmh')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_acc, 'r', epochs, test_acc, 'g')\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train_acc', 'test_acc'], loc='upper left')\n fig.savefig(filename + '_accuracy.png')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_loss, 'r', epochs, test_loss, 'g')\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train_loss', 'test_loss'], loc='upper left')\n fig.savefig(filename + '_loss.png')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_error, 'r', epochs, test_error, 'g')\n plt.title('model error rate')\n plt.ylabel('error rate')\n plt.xlabel('epoch')\n plt.legend(['train_error', 'test_error'], loc='upper left')\n fig.savefig(filename + '_error.png')\n plt.close('all')\n\n\ndef write_csv(filename, train_acc, test_acc, train_loss, test_loss,\n train_error, test_error, epoch):\n if epoch == 0:\n with open(filename, 'w') as f:\n f.write(\n 'train_acc,test_acc,train_loss, test_loss, train_error, test_error\\n'\n )\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n else:\n with open(filename, 'a') as f:\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n",
"step-4": "import matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nfrom PIL import Image\nfrom scipy.misc import imsave, imread\n\n\ndef plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error,\n test_error, filename):\n plt.style.use('bmh')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_acc, 'r', epochs, test_acc, 'g')\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train_acc', 'test_acc'], loc='upper left')\n fig.savefig(filename + '_accuracy.png')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_loss, 'r', epochs, test_loss, 'g')\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train_loss', 'test_loss'], loc='upper left')\n fig.savefig(filename + '_loss.png')\n fig = plt.figure(figsize=(8, 6))\n plt.plot(epochs, train_error, 'r', epochs, test_error, 'g')\n plt.title('model error rate')\n plt.ylabel('error rate')\n plt.xlabel('epoch')\n plt.legend(['train_error', 'test_error'], loc='upper left')\n fig.savefig(filename + '_error.png')\n plt.close('all')\n\n\ndef write_csv(filename, train_acc, test_acc, train_loss, test_loss,\n train_error, test_error, epoch):\n if epoch == 0:\n with open(filename, 'w') as f:\n f.write(\n 'train_acc,test_acc,train_loss, test_loss, train_error, test_error\\n'\n )\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n else:\n with open(filename, 'a') as f:\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\n test_acc[-1], train_loss[-1], test_loss[-1], train_error[-1\n ], test_error[-1]))\n",
"step-5": "import matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nfrom PIL import Image\nfrom scipy.misc import imsave, imread\n\n\ndef plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error, test_error,filename):\n plt.style.use('bmh')\n\n fig=plt.figure(figsize=(8,6))\n plt.plot(epochs,train_acc, 'r', epochs,test_acc, 'g')\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train_acc', 'test_acc'], loc='upper left')\n fig.savefig(filename + '_accuracy.png')\n\n fig=plt.figure(figsize=(8,6))\n plt.plot(epochs,train_loss, 'r', epochs,test_loss, 'g')\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train_loss', 'test_loss'], loc='upper left')\n fig.savefig(filename + '_loss.png')\n \n fig=plt.figure(figsize=(8,6))\n plt.plot(epochs,train_error, 'r', epochs,test_error, 'g')\n plt.title('model error rate')\n plt.ylabel('error rate')\n plt.xlabel('epoch')\n plt.legend(['train_error', 'test_error'], loc='upper left')\n fig.savefig(filename + '_error.png')\n\n plt.close('all')\n\n\n\ndef write_csv(filename, train_acc,test_acc,train_loss,test_loss,train_error,test_error,epoch):\n if epoch==0:\n \n with open(filename, 'w') as f:\n f.write('train_acc,test_acc,train_loss, test_loss, train_error, test_error\\n') \n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\\\n test_acc[-1],\\\n train_loss[-1],\\\n test_loss[-1],\\\n train_error[-1],\\\n test_error[-1]))\n \n else:\n with open(filename, 'a') as f:\n f.write('{0},{1},{2},{3},{4},{5}\\n'.format(train_acc[-1],\\\n test_acc[-1],\\\n train_loss[-1],\\\n test_loss[-1],\\\n train_error[-1],\\\n test_error[-1]))\n \n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import random
def less(i1, i2):
return i1[0] * i2[1] < i2[0] * i1[1]
def equal(i1, i2):
return i1[0] * i2[1] == i2[0] * i1[1]
def more(i1, i2):
return i1[0] * i2[1] > i2[0] * i1[1]
def partition(x, l, r, pivot):
il = l
ir = l
for i in range(l, r):
if x[i] < pivot and ir < r:
x[il], x[i] = x[i], x[il]
if il != ir:
x[ir], x[i] = x[i], x[ir]
il += 1
ir += 1
elif x[i] == pivot and ir < r:
x[ir], x[i] = x[i], x[ir]
ir += 1
return il, ir
def qsort(x, l=0, r=None):
if r is None:
r = len(x)
if (r - l) > 1:
pivot = x[random.randint(l, r - 1)]
il, ir = partition(x, l, r, pivot)
qsort(x, l, il)
qsort(x, ir, r)
N, w = list(map(int, input().split()))
x = []
for i in range(N):
x.append(tuple(map(int, input().split())))
qsort(x)
x = x[::-1]
s = 0
i = 0
while (i < N) and (w >= x[i][1]):
s += x[i][0]
w -= x[i][1]
i += 1
if i < N:
s += (x[i][0] * w // x[i][1])
print(s)
|
normal
|
{
"blob_id": "a5e693a79211570f2d27575657496992f8fee164",
"index": 9075,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef less(i1, i2):\n return i1[0] * i2[1] < i2[0] * i1[1]\n\n\ndef equal(i1, i2):\n return i1[0] * i2[1] == i2[0] * i1[1]\n\n\ndef more(i1, i2):\n return i1[0] * i2[1] > i2[0] * i1[1]\n\n\ndef partition(x, l, r, pivot):\n il = l\n ir = l\n for i in range(l, r):\n if x[i] < pivot and ir < r:\n x[il], x[i] = x[i], x[il]\n if il != ir:\n x[ir], x[i] = x[i], x[ir]\n il += 1\n ir += 1\n elif x[i] == pivot and ir < r:\n x[ir], x[i] = x[i], x[ir]\n ir += 1\n return il, ir\n\n\ndef qsort(x, l=0, r=None):\n if r is None:\n r = len(x)\n if r - l > 1:\n pivot = x[random.randint(l, r - 1)]\n il, ir = partition(x, l, r, pivot)\n qsort(x, l, il)\n qsort(x, ir, r)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef less(i1, i2):\n return i1[0] * i2[1] < i2[0] * i1[1]\n\n\ndef equal(i1, i2):\n return i1[0] * i2[1] == i2[0] * i1[1]\n\n\ndef more(i1, i2):\n return i1[0] * i2[1] > i2[0] * i1[1]\n\n\ndef partition(x, l, r, pivot):\n il = l\n ir = l\n for i in range(l, r):\n if x[i] < pivot and ir < r:\n x[il], x[i] = x[i], x[il]\n if il != ir:\n x[ir], x[i] = x[i], x[ir]\n il += 1\n ir += 1\n elif x[i] == pivot and ir < r:\n x[ir], x[i] = x[i], x[ir]\n ir += 1\n return il, ir\n\n\ndef qsort(x, l=0, r=None):\n if r is None:\n r = len(x)\n if r - l > 1:\n pivot = x[random.randint(l, r - 1)]\n il, ir = partition(x, l, r, pivot)\n qsort(x, l, il)\n qsort(x, ir, r)\n\n\nN, w = list(map(int, input().split()))\nx = []\nfor i in range(N):\n x.append(tuple(map(int, input().split())))\nqsort(x)\nx = x[::-1]\ns = 0\ni = 0\nwhile i < N and w >= x[i][1]:\n s += x[i][0]\n w -= x[i][1]\n i += 1\nif i < N:\n s += x[i][0] * w // x[i][1]\nprint(s)\n",
"step-4": "import random\n\n\ndef less(i1, i2):\n return i1[0] * i2[1] < i2[0] * i1[1]\n\n\ndef equal(i1, i2):\n return i1[0] * i2[1] == i2[0] * i1[1]\n\n\ndef more(i1, i2):\n return i1[0] * i2[1] > i2[0] * i1[1]\n\n\ndef partition(x, l, r, pivot):\n il = l\n ir = l\n for i in range(l, r):\n if x[i] < pivot and ir < r:\n x[il], x[i] = x[i], x[il]\n if il != ir:\n x[ir], x[i] = x[i], x[ir]\n il += 1\n ir += 1\n elif x[i] == pivot and ir < r:\n x[ir], x[i] = x[i], x[ir]\n ir += 1\n return il, ir\n\n\ndef qsort(x, l=0, r=None):\n if r is None:\n r = len(x)\n if r - l > 1:\n pivot = x[random.randint(l, r - 1)]\n il, ir = partition(x, l, r, pivot)\n qsort(x, l, il)\n qsort(x, ir, r)\n\n\nN, w = list(map(int, input().split()))\nx = []\nfor i in range(N):\n x.append(tuple(map(int, input().split())))\nqsort(x)\nx = x[::-1]\ns = 0\ni = 0\nwhile i < N and w >= x[i][1]:\n s += x[i][0]\n w -= x[i][1]\n i += 1\nif i < N:\n s += x[i][0] * w // x[i][1]\nprint(s)\n",
"step-5": "import random\n\n\ndef less(i1, i2):\n return i1[0] * i2[1] < i2[0] * i1[1]\n\n\ndef equal(i1, i2):\n return i1[0] * i2[1] == i2[0] * i1[1]\n\n\ndef more(i1, i2):\n return i1[0] * i2[1] > i2[0] * i1[1]\n\n\ndef partition(x, l, r, pivot):\n il = l\n ir = l\n for i in range(l, r):\n if x[i] < pivot and ir < r:\n x[il], x[i] = x[i], x[il]\n if il != ir:\n x[ir], x[i] = x[i], x[ir]\n il += 1\n ir += 1\n elif x[i] == pivot and ir < r:\n x[ir], x[i] = x[i], x[ir]\n ir += 1\n return il, ir\n\n\ndef qsort(x, l=0, r=None):\n if r is None:\n r = len(x)\n if (r - l) > 1:\n pivot = x[random.randint(l, r - 1)]\n il, ir = partition(x, l, r, pivot)\n qsort(x, l, il)\n qsort(x, ir, r)\n\n\nN, w = list(map(int, input().split()))\nx = []\nfor i in range(N):\n x.append(tuple(map(int, input().split())))\nqsort(x)\nx = x[::-1]\n\ns = 0\ni = 0\nwhile (i < N) and (w >= x[i][1]):\n s += x[i][0]\n w -= x[i][1]\n i += 1\nif i < N:\n s += (x[i][0] * w // x[i][1])\n\nprint(s)\n",
"step-ids": [
0,
5,
7,
8,
9
]
}
|
[
0,
5,
7,
8,
9
] |
import tkinter as tk
import random
import numpy as np
import copy
import time
#################################################################################
#
# Données de partie
NbSimulation = 20000
Data = [ [1,1,1,1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,0,0,0,0,0,0,0,0,0,0,1],
[1,1,1,1,1,1,1,1,1,1,1,1,1] ]
GInit = np.array(Data,dtype=np.int8)
GInit = np.flip(GInit,0).transpose()
LARGEUR = 13
HAUTEUR = 17
# container pour passer efficacement toutes les données de la partie
class Game:
def __init__(self, Grille, PlayerX, PlayerY, Score=0):
self.PlayerX = PlayerX
self.PlayerY = PlayerY
self.Score = Score
self.Grille = Grille
def copy(self):
return copy.deepcopy(self)
GameInit = Game(GInit,3,5)
##############################################################
#
# création de la fenetre principale - NE PAS TOUCHER
L = 20 # largeur d'une case du jeu en pixel
largeurPix = LARGEUR * L
hauteurPix = HAUTEUR * L
Window = tk.Tk()
Window.geometry(str(largeurPix)+"x"+str(hauteurPix)) # taille de la fenetre
Window.title("TRON")
# création de la frame principale stockant toutes les pages
F = tk.Frame(Window)
F.pack(side="top", fill="both", expand=True)
F.grid_rowconfigure(0, weight=1)
F.grid_columnconfigure(0, weight=1)
# gestion des différentes pages
ListePages = {}
PageActive = 0
def CreerUnePage(id):
Frame = tk.Frame(F)
ListePages[id] = Frame
Frame.grid(row=0, column=0, sticky="nsew")
return Frame
def AfficherPage(id):
global PageActive
PageActive = id
ListePages[id].tkraise()
Frame0 = CreerUnePage(0)
canvas = tk.Canvas(Frame0,width = largeurPix, height = hauteurPix, bg ="black" )
canvas.place(x=0,y=0)
# Dessine la grille de jeu - ne pas toucher
def Affiche(Game):
canvas.delete("all")
H = canvas.winfo_height()
def DrawCase(x,y,coul):
x *= L
y *= L
canvas.create_rectangle(x,H-y,x+L,H-y-L,fill=coul)
# dessin des murs
for x in range (LARGEUR):
for y in range (HAUTEUR):
if Game.Grille[x,y] == 1 : DrawCase(x,y,"gray" )
if Game.Grille[x,y] == 2 : DrawCase(x,y,"cyan" )
# dessin de la moto
DrawCase(Game.PlayerX,Game.PlayerY,"red" )
def AfficheScore(Game):
info = "SCORE : " + str(Game.Score)
canvas.create_text(80, 13, font='Helvetica 12 bold', fill="yellow", text=info)
###########################################################
#
# gestion du joueur IA
# VOTRE CODE ICI
dx = np.array([0, -1, 0, 1, 0],dtype=np.int8)
dy = np.array([0, 0, 1, 0, -1],dtype=np.int8)
# scores associés à chaque déplacement
ds = np.array([0, 1, 1, 1, 1],dtype=np.int8)
def GetAllExectuableMove(Game):
possibleMove = [(0,+1),(0,-1),(+1,0),(-1,0)]
executableMove = []
for tup in possibleMove :
x,y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]
v = Game.Grille[x,y]
if v == 0 :
executableMove.append((x,y))
return executableMove
def Simulate(Game):
nb = NbSimulation
# on copie les datas de départ pour créer plusieurs parties
G = np.tile(Game.Grille,(nb,1,1)) # grille (x,y) pour chaque partie
X = np.tile(Game.PlayerX,nb) # playerX (x) pour chaque partie
Y = np.tile(Game.PlayerY,nb) # playerY (y) pour chaque partie
S = np.tile(Game.Score,nb) # score (s) pour chaque partie
I = np.arange(nb) # 0,1,2,3,...,nb-1
# VOTRE CODE ICI
continuer = True
while(continuer) :
# pour chaque partie, on fait une affectation à 2 le passage de la moto
G[I, X, Y] = 2
### pour chaque partie, on gère tous les index de déplacements possibles
# pour chaque partie, on associe une liste de taille 4 initialisée à 0
LPossibles = np.zeros((nb, 4),dtype=np.int8)
# pour chaque partie, on associe la liste de taille 4 à i si le joueur peut bouger dans cette direction, 0 sinon
for i in range(4):
LPossibles[I,i] = np.where(G[I, X+dx[i+1], Y+dy[i+1]] == 0,i+1,0)
# pour chaque partie, on trie la liste des directions de manière décroissante
LPossibles.sort(axis=1)
LPossibles = np.fliplr(LPossibles)
### pour chaque partie, on compte le nombre de déplacements possibles
# pour chaque partie, on compte le nombre d'éléments de LPossibles non nuls
Indices = np.count_nonzero(LPossibles, axis=1)
# pour chaque partie, on remplace les index de 0 par 1 pour pas planter sur le modulo
Indices[Indices == 0] = 1
# pour chaque partie, on génère un index de direction aléatoire
R = np.random.randint(12,size=nb,dtype=np.int8)
# pour chaque partie, on réucupère un vecteur position
Position = LPossibles[I, R % Indices[I]]
### on gère les déplacement et le code
# on arrete le traitement si, on est statique sur l'ensemble des parties
if(nb == np.count_nonzero(Position == 0)): continuer = False
# pour chaque partie, on incrémente le score
S[I] += ds[Position]
# pour chaque partie, on déplace le joueur
X += dx[Position]
Y += dy[Position]
# on retourne la moyenne des scores
return np.mean(S)
def MonteCarlo(Game):
return Simulate(Game)
def MovePlayerWithIA(Game):
executableMove = GetAllExectuableMove(Game)
result = (None, None)
maxi = 0
if(len(executableMove)==0):
return None, None
for x,y in executableMove:
Game.PlayerX = x
Game.PlayerY = y
total = MonteCarlo(Game)
if(total>maxi):
result = (x,y)
maxi = total
return result
def Play(Game):
x,y = Game.PlayerX, Game.PlayerY
Game.Grille[x,y] = 2 # laisse la trace de la moto
x,y = MovePlayerWithIA(Game)
if x == None or y == None :
# collision détectée
return True # partie terminée
else :
Game.PlayerX = x # valide le déplacement
Game.PlayerY = y # valide le déplacement
Game.Score += 1
return False # la partie continue
################################################################################
CurrentGame = GameInit.copy()
def Partie():
Tstart = time.time()
PartieTermine = Play(CurrentGame)
print(time.time() - Tstart)
if not PartieTermine :
Affiche(CurrentGame)
# rappelle la fonction Partie() dans 30ms
# entre temps laisse l'OS réafficher l'interface
Window.after(1000,Partie)
else :
AfficheScore(CurrentGame)
#####################################################################################
#
# Mise en place de l'interface - ne pas toucher
AfficherPage(0)
Window.after(100,Partie)
Window.mainloop()
|
normal
|
{
"blob_id": "86177dfa9b8bed5916703edcc16ea4d01cbabf84",
"index": 3278,
"step-1": "<mask token>\n\n\nclass Game:\n\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n\n def copy(self):\n return copy.deepcopy(self)\n\n\n<mask token>\n\n\ndef Affiche(Game):\n canvas.delete('all')\n H = canvas.winfo_height()\n\n def DrawCase(x, y, coul):\n x *= L\n y *= L\n canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)\n for x in range(LARGEUR):\n for y in range(HAUTEUR):\n if Game.Grille[x, y] == 1:\n DrawCase(x, y, 'gray')\n if Game.Grille[x, y] == 2:\n DrawCase(x, y, 'cyan')\n DrawCase(Game.PlayerX, Game.PlayerY, 'red')\n\n\ndef AfficheScore(Game):\n info = 'SCORE : ' + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',\n text=info)\n\n\n<mask token>\n\n\ndef GetAllExectuableMove(Game):\n possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]\n executableMove = []\n for tup in possibleMove:\n x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x, y]\n if v == 0:\n executableMove.append((x, y))\n return executableMove\n\n\n<mask token>\n\n\ndef MonteCarlo(Game):\n return Simulate(Game)\n\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = None, None\n maxi = 0\n if len(executableMove) == 0:\n return None, None\n for x, y in executableMove:\n Game.PlayerX = x\n Game.PlayerY = y\n total = MonteCarlo(Game)\n if total > maxi:\n result = x, y\n maxi = total\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Game:\n\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n\n def copy(self):\n return copy.deepcopy(self)\n\n\n<mask token>\n\n\ndef CreerUnePage(id):\n Frame = tk.Frame(F)\n ListePages[id] = Frame\n Frame.grid(row=0, column=0, sticky='nsew')\n return Frame\n\n\ndef AfficherPage(id):\n global PageActive\n PageActive = id\n ListePages[id].tkraise()\n\n\n<mask token>\n\n\ndef Affiche(Game):\n canvas.delete('all')\n H = canvas.winfo_height()\n\n def DrawCase(x, y, coul):\n x *= L\n y *= L\n canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)\n for x in range(LARGEUR):\n for y in range(HAUTEUR):\n if Game.Grille[x, y] == 1:\n DrawCase(x, y, 'gray')\n if Game.Grille[x, y] == 2:\n DrawCase(x, y, 'cyan')\n DrawCase(Game.PlayerX, Game.PlayerY, 'red')\n\n\ndef AfficheScore(Game):\n info = 'SCORE : ' + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',\n text=info)\n\n\n<mask token>\n\n\ndef GetAllExectuableMove(Game):\n possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]\n executableMove = []\n for tup in possibleMove:\n x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x, y]\n if v == 0:\n executableMove.append((x, y))\n return executableMove\n\n\ndef Simulate(Game):\n nb = NbSimulation\n G = np.tile(Game.Grille, (nb, 1, 1))\n X = np.tile(Game.PlayerX, nb)\n Y = np.tile(Game.PlayerY, nb)\n S = np.tile(Game.Score, nb)\n I = np.arange(nb)\n continuer = True\n while continuer:\n G[I, X, Y] = 2\n LPossibles = np.zeros((nb, 4), dtype=np.int8)\n for i in range(4):\n LPossibles[I, i] = np.where(G[I, X + dx[i + 1], Y + dy[i + 1]] ==\n 0, i + 1, 0)\n LPossibles.sort(axis=1)\n LPossibles = np.fliplr(LPossibles)\n Indices = np.count_nonzero(LPossibles, axis=1)\n Indices[Indices == 0] = 1\n R = np.random.randint(12, size=nb, dtype=np.int8)\n Position = LPossibles[I, R % Indices[I]]\n if nb == np.count_nonzero(Position == 0):\n continuer = False\n S[I] += ds[Position]\n X += dx[Position]\n Y += dy[Position]\n return np.mean(S)\n\n\ndef MonteCarlo(Game):\n return Simulate(Game)\n\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = None, None\n maxi = 0\n if len(executableMove) == 0:\n return None, None\n for x, y in executableMove:\n Game.PlayerX = x\n Game.PlayerY = y\n total = MonteCarlo(Game)\n if total > maxi:\n result = x, y\n maxi = total\n return result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Game:\n\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n\n def copy(self):\n return copy.deepcopy(self)\n\n\n<mask token>\nWindow.geometry(str(largeurPix) + 'x' + str(hauteurPix))\nWindow.title('TRON')\n<mask token>\nF.pack(side='top', fill='both', expand=True)\nF.grid_rowconfigure(0, weight=1)\nF.grid_columnconfigure(0, weight=1)\n<mask token>\n\n\ndef CreerUnePage(id):\n Frame = tk.Frame(F)\n ListePages[id] = Frame\n Frame.grid(row=0, column=0, sticky='nsew')\n return Frame\n\n\ndef AfficherPage(id):\n global PageActive\n PageActive = id\n ListePages[id].tkraise()\n\n\n<mask token>\ncanvas.place(x=0, y=0)\n\n\ndef Affiche(Game):\n canvas.delete('all')\n H = canvas.winfo_height()\n\n def DrawCase(x, y, coul):\n x *= L\n y *= L\n canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)\n for x in range(LARGEUR):\n for y in range(HAUTEUR):\n if Game.Grille[x, y] == 1:\n DrawCase(x, y, 'gray')\n if Game.Grille[x, y] == 2:\n DrawCase(x, y, 'cyan')\n DrawCase(Game.PlayerX, Game.PlayerY, 'red')\n\n\ndef AfficheScore(Game):\n info = 'SCORE : ' + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',\n text=info)\n\n\n<mask token>\n\n\ndef GetAllExectuableMove(Game):\n possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]\n executableMove = []\n for tup in possibleMove:\n x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x, y]\n if v == 0:\n executableMove.append((x, y))\n return executableMove\n\n\ndef Simulate(Game):\n nb = NbSimulation\n G = np.tile(Game.Grille, (nb, 1, 1))\n X = np.tile(Game.PlayerX, nb)\n Y = np.tile(Game.PlayerY, nb)\n S = np.tile(Game.Score, nb)\n I = np.arange(nb)\n continuer = True\n while continuer:\n G[I, X, Y] = 2\n LPossibles = np.zeros((nb, 4), dtype=np.int8)\n for i in range(4):\n LPossibles[I, i] = np.where(G[I, X + dx[i + 1], Y + dy[i + 1]] ==\n 0, i + 1, 0)\n LPossibles.sort(axis=1)\n LPossibles = np.fliplr(LPossibles)\n Indices = np.count_nonzero(LPossibles, axis=1)\n Indices[Indices == 0] = 1\n R = np.random.randint(12, size=nb, dtype=np.int8)\n Position = LPossibles[I, R % Indices[I]]\n if nb == np.count_nonzero(Position == 0):\n continuer = False\n S[I] += ds[Position]\n X += dx[Position]\n Y += dy[Position]\n return np.mean(S)\n\n\ndef MonteCarlo(Game):\n return Simulate(Game)\n\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = None, None\n maxi = 0\n if len(executableMove) == 0:\n return None, None\n for x, y in executableMove:\n Game.PlayerX = x\n Game.PlayerY = y\n total = MonteCarlo(Game)\n if total > maxi:\n result = x, y\n maxi = total\n return result\n\n\ndef Play(Game):\n x, y = Game.PlayerX, Game.PlayerY\n Game.Grille[x, y] = 2\n x, y = MovePlayerWithIA(Game)\n if x == None or y == None:\n return True\n else:\n Game.PlayerX = x\n Game.PlayerY = y\n Game.Score += 1\n return False\n\n\n<mask token>\n\n\ndef Partie():\n Tstart = time.time()\n PartieTermine = Play(CurrentGame)\n print(time.time() - Tstart)\n if not PartieTermine:\n Affiche(CurrentGame)\n Window.after(1000, Partie)\n else:\n AfficheScore(CurrentGame)\n\n\nAfficherPage(0)\nWindow.after(100, Partie)\nWindow.mainloop()\n",
"step-4": "<mask token>\nNbSimulation = 20000\nData = [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\nGInit = np.array(Data, dtype=np.int8)\nGInit = np.flip(GInit, 0).transpose()\nLARGEUR = 13\nHAUTEUR = 17\n\n\nclass Game:\n\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n\n def copy(self):\n return copy.deepcopy(self)\n\n\nGameInit = Game(GInit, 3, 5)\nL = 20\nlargeurPix = LARGEUR * L\nhauteurPix = HAUTEUR * L\nWindow = tk.Tk()\nWindow.geometry(str(largeurPix) + 'x' + str(hauteurPix))\nWindow.title('TRON')\nF = tk.Frame(Window)\nF.pack(side='top', fill='both', expand=True)\nF.grid_rowconfigure(0, weight=1)\nF.grid_columnconfigure(0, weight=1)\nListePages = {}\nPageActive = 0\n\n\ndef CreerUnePage(id):\n Frame = tk.Frame(F)\n ListePages[id] = Frame\n Frame.grid(row=0, column=0, sticky='nsew')\n return Frame\n\n\ndef AfficherPage(id):\n global PageActive\n PageActive = id\n ListePages[id].tkraise()\n\n\nFrame0 = CreerUnePage(0)\ncanvas = tk.Canvas(Frame0, width=largeurPix, height=hauteurPix, bg='black')\ncanvas.place(x=0, y=0)\n\n\ndef Affiche(Game):\n canvas.delete('all')\n H = canvas.winfo_height()\n\n def DrawCase(x, y, coul):\n x *= L\n y *= L\n canvas.create_rectangle(x, H - y, x + L, H - y - L, fill=coul)\n for x in range(LARGEUR):\n for y in range(HAUTEUR):\n if Game.Grille[x, y] == 1:\n DrawCase(x, y, 'gray')\n if Game.Grille[x, y] == 2:\n DrawCase(x, y, 'cyan')\n DrawCase(Game.PlayerX, Game.PlayerY, 'red')\n\n\ndef AfficheScore(Game):\n info = 'SCORE : ' + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill='yellow',\n text=info)\n\n\ndx = np.array([0, -1, 0, 1, 0], dtype=np.int8)\ndy = np.array([0, 0, 1, 0, -1], dtype=np.int8)\nds = np.array([0, 1, 1, 1, 1], dtype=np.int8)\n\n\ndef GetAllExectuableMove(Game):\n possibleMove = [(0, +1), (0, -1), (+1, 0), (-1, 0)]\n executableMove = []\n for tup in possibleMove:\n x, y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x, y]\n if v == 0:\n executableMove.append((x, y))\n return executableMove\n\n\ndef Simulate(Game):\n nb = NbSimulation\n G = np.tile(Game.Grille, (nb, 1, 1))\n X = np.tile(Game.PlayerX, nb)\n Y = np.tile(Game.PlayerY, nb)\n S = np.tile(Game.Score, nb)\n I = np.arange(nb)\n continuer = True\n while continuer:\n G[I, X, Y] = 2\n LPossibles = np.zeros((nb, 4), dtype=np.int8)\n for i in range(4):\n LPossibles[I, i] = np.where(G[I, X + dx[i + 1], Y + dy[i + 1]] ==\n 0, i + 1, 0)\n LPossibles.sort(axis=1)\n LPossibles = np.fliplr(LPossibles)\n Indices = np.count_nonzero(LPossibles, axis=1)\n Indices[Indices == 0] = 1\n R = np.random.randint(12, size=nb, dtype=np.int8)\n Position = LPossibles[I, R % Indices[I]]\n if nb == np.count_nonzero(Position == 0):\n continuer = False\n S[I] += ds[Position]\n X += dx[Position]\n Y += dy[Position]\n return np.mean(S)\n\n\ndef MonteCarlo(Game):\n return Simulate(Game)\n\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = None, None\n maxi = 0\n if len(executableMove) == 0:\n return None, None\n for x, y in executableMove:\n Game.PlayerX = x\n Game.PlayerY = y\n total = MonteCarlo(Game)\n if total > maxi:\n result = x, y\n maxi = total\n return result\n\n\ndef Play(Game):\n x, y = Game.PlayerX, Game.PlayerY\n Game.Grille[x, y] = 2\n x, y = MovePlayerWithIA(Game)\n if x == None or y == None:\n return True\n else:\n Game.PlayerX = x\n Game.PlayerY = y\n Game.Score += 1\n return False\n\n\nCurrentGame = GameInit.copy()\n\n\ndef Partie():\n Tstart = time.time()\n PartieTermine = Play(CurrentGame)\n print(time.time() - Tstart)\n if not PartieTermine:\n Affiche(CurrentGame)\n Window.after(1000, Partie)\n else:\n AfficheScore(CurrentGame)\n\n\nAfficherPage(0)\nWindow.after(100, Partie)\nWindow.mainloop()\n",
"step-5": "import tkinter as tk\nimport random\nimport numpy as np\nimport copy \nimport time\n\n#################################################################################\n#\n# Données de partie\nNbSimulation = 20000\nData = [ [1,1,1,1,1,1,1,1,1,1,1,1,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0,0,0,1],\n [1,1,0,0,0,0,0,0,0,0,0,0,1],\n [1,1,1,1,1,1,1,1,1,1,1,1,1] ]\n\nGInit = np.array(Data,dtype=np.int8)\nGInit = np.flip(GInit,0).transpose()\n\nLARGEUR = 13\nHAUTEUR = 17\n\n# container pour passer efficacement toutes les données de la partie\n\nclass Game:\n def __init__(self, Grille, PlayerX, PlayerY, Score=0):\n self.PlayerX = PlayerX\n self.PlayerY = PlayerY\n self.Score = Score\n self.Grille = Grille\n \n def copy(self): \n return copy.deepcopy(self)\n\nGameInit = Game(GInit,3,5)\n\n##############################################################\n#\n# création de la fenetre principale - NE PAS TOUCHER\n\nL = 20 # largeur d'une case du jeu en pixel \nlargeurPix = LARGEUR * L\nhauteurPix = HAUTEUR * L\n\n\nWindow = tk.Tk()\nWindow.geometry(str(largeurPix)+\"x\"+str(hauteurPix)) # taille de la fenetre\nWindow.title(\"TRON\")\n\n\n# création de la frame principale stockant toutes les pages\n\nF = tk.Frame(Window)\nF.pack(side=\"top\", fill=\"both\", expand=True)\nF.grid_rowconfigure(0, weight=1)\nF.grid_columnconfigure(0, weight=1)\n\n# gestion des différentes pages\n\nListePages = {}\nPageActive = 0\n\ndef CreerUnePage(id):\n Frame = tk.Frame(F)\n ListePages[id] = Frame\n Frame.grid(row=0, column=0, sticky=\"nsew\")\n return Frame\n\ndef AfficherPage(id):\n global PageActive\n PageActive = id\n ListePages[id].tkraise()\n \nFrame0 = CreerUnePage(0)\n\ncanvas = tk.Canvas(Frame0,width = largeurPix, height = hauteurPix, bg =\"black\" )\ncanvas.place(x=0,y=0)\n\n# Dessine la grille de jeu - ne pas toucher\n\n\ndef Affiche(Game):\n canvas.delete(\"all\")\n H = canvas.winfo_height()\n \n def DrawCase(x,y,coul):\n x *= L\n y *= L\n canvas.create_rectangle(x,H-y,x+L,H-y-L,fill=coul)\n \n # dessin des murs \n \n for x in range (LARGEUR):\n for y in range (HAUTEUR):\n if Game.Grille[x,y] == 1 : DrawCase(x,y,\"gray\" )\n if Game.Grille[x,y] == 2 : DrawCase(x,y,\"cyan\" )\n \n \n # dessin de la moto\n DrawCase(Game.PlayerX,Game.PlayerY,\"red\" )\n\ndef AfficheScore(Game):\n info = \"SCORE : \" + str(Game.Score)\n canvas.create_text(80, 13, font='Helvetica 12 bold', fill=\"yellow\", text=info)\n\n\n###########################################################\n#\n# gestion du joueur IA\n\n# VOTRE CODE ICI \ndx = np.array([0, -1, 0, 1, 0],dtype=np.int8)\ndy = np.array([0, 0, 1, 0, -1],dtype=np.int8)\n\n# scores associés à chaque déplacement\nds = np.array([0, 1, 1, 1, 1],dtype=np.int8)\ndef GetAllExectuableMove(Game):\n possibleMove = [(0,+1),(0,-1),(+1,0),(-1,0)]\n executableMove = []\n for tup in possibleMove :\n x,y = Game.PlayerX + tup[0], Game.PlayerY + tup[1]\n v = Game.Grille[x,y]\n if v == 0 :\n executableMove.append((x,y))\n \n return executableMove\n\ndef Simulate(Game):\n\n nb = NbSimulation\n # on copie les datas de départ pour créer plusieurs parties\n G = np.tile(Game.Grille,(nb,1,1)) # grille (x,y) pour chaque partie\n X = np.tile(Game.PlayerX,nb) # playerX (x) pour chaque partie\n Y = np.tile(Game.PlayerY,nb) # playerY (y) pour chaque partie\n S = np.tile(Game.Score,nb) # score (s) pour chaque partie\n I = np.arange(nb) # 0,1,2,3,...,nb-1\n\n # VOTRE CODE ICI\n continuer = True\n\n while(continuer) :\n\n # pour chaque partie, on fait une affectation à 2 le passage de la moto\n G[I, X, Y] = 2\n\n\n ### pour chaque partie, on gère tous les index de déplacements possibles\n # pour chaque partie, on associe une liste de taille 4 initialisée à 0 \n LPossibles = np.zeros((nb, 4),dtype=np.int8)\n\n # pour chaque partie, on associe la liste de taille 4 à i si le joueur peut bouger dans cette direction, 0 sinon\n for i in range(4): \n LPossibles[I,i] = np.where(G[I, X+dx[i+1], Y+dy[i+1]] == 0,i+1,0)\n\n # pour chaque partie, on trie la liste des directions de manière décroissante\n LPossibles.sort(axis=1)\n LPossibles = np.fliplr(LPossibles)\n\n\n ### pour chaque partie, on compte le nombre de déplacements possibles\n # pour chaque partie, on compte le nombre d'éléments de LPossibles non nuls\n Indices = np.count_nonzero(LPossibles, axis=1)\n \n # pour chaque partie, on remplace les index de 0 par 1 pour pas planter sur le modulo\n Indices[Indices == 0] = 1\n\n # pour chaque partie, on génère un index de direction aléatoire\n R = np.random.randint(12,size=nb,dtype=np.int8)\n\n # pour chaque partie, on réucupère un vecteur position\n Position = LPossibles[I, R % Indices[I]]\n \n\n ### on gère les déplacement et le code\n\n # on arrete le traitement si, on est statique sur l'ensemble des parties\n if(nb == np.count_nonzero(Position == 0)): continuer = False\n\n # pour chaque partie, on incrémente le score\n S[I] += ds[Position]\n\n # pour chaque partie, on déplace le joueur\n X += dx[Position]\n Y += dy[Position]\n\n # on retourne la moyenne des scores\n return np.mean(S)\n\n\n \ndef MonteCarlo(Game):\n return Simulate(Game)\n\ndef MovePlayerWithIA(Game):\n executableMove = GetAllExectuableMove(Game)\n result = (None, None)\n maxi = 0\n if(len(executableMove)==0):\n return None, None\n\n for x,y in executableMove:\n Game.PlayerX = x \n Game.PlayerY = y\n total = MonteCarlo(Game)\n if(total>maxi):\n result = (x,y)\n maxi = total\n return result\n\ndef Play(Game): \n \n x,y = Game.PlayerX, Game.PlayerY\n\n Game.Grille[x,y] = 2 # laisse la trace de la moto\n\n x,y = MovePlayerWithIA(Game)\n if x == None or y == None :\n # collision détectée\n return True # partie terminée\n else :\n Game.PlayerX = x # valide le déplacement\n Game.PlayerY = y # valide le déplacement\n Game.Score += 1\n return False # la partie continue\n \n\n################################################################################\n \nCurrentGame = GameInit.copy()\n \n\ndef Partie():\n Tstart = time.time()\n PartieTermine = Play(CurrentGame)\n print(time.time() - Tstart)\n if not PartieTermine :\n Affiche(CurrentGame)\n # rappelle la fonction Partie() dans 30ms\n # entre temps laisse l'OS réafficher l'interface\n Window.after(1000,Partie) \n else :\n AfficheScore(CurrentGame)\n\n\n#####################################################################################\n#\n# Mise en place de l'interface - ne pas toucher\n\nAfficherPage(0)\nWindow.after(100,Partie)\nWindow.mainloop()\n \n\n \n \n\n \n \n\n",
"step-ids": [
8,
11,
14,
15,
17
]
}
|
[
8,
11,
14,
15,
17
] |
import hashlib
import sys
def getHashcode(string):
for i in range(10000000000):
hash_md5 = hashlib.md5(str(i).encode('utf-8'))
res = hash_md5.hexdigest()
if res[0:len(string)] == string:
print(i)
exit()
if __name__ == '__main__':
getHashcode(sys.argv[1])
|
normal
|
{
"blob_id": "4c8e3c21dd478606cf09f2e97dc9deed6597dae5",
"index": 4375,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getHashcode(string):\n for i in range(10000000000):\n hash_md5 = hashlib.md5(str(i).encode('utf-8'))\n res = hash_md5.hexdigest()\n if res[0:len(string)] == string:\n print(i)\n exit()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getHashcode(string):\n for i in range(10000000000):\n hash_md5 = hashlib.md5(str(i).encode('utf-8'))\n res = hash_md5.hexdigest()\n if res[0:len(string)] == string:\n print(i)\n exit()\n\n\nif __name__ == '__main__':\n getHashcode(sys.argv[1])\n",
"step-4": "import hashlib\nimport sys\n\n\ndef getHashcode(string):\n for i in range(10000000000):\n hash_md5 = hashlib.md5(str(i).encode('utf-8'))\n res = hash_md5.hexdigest()\n if res[0:len(string)] == string:\n print(i)\n exit()\n\n\nif __name__ == '__main__':\n getHashcode(sys.argv[1])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
l = int(input("Enter lower range: "))
u = int(input("Enter upper range: "))
if(l<=0):
print "invalid"
if (u<=0):
print "invalid"
for num in range(l,u+1):
n = len(str(num))
sum = 0
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** n
temp //= 10
if num == sum:
print(num)
|
normal
|
{
"blob_id": "42fa0aa98e2d3336bdb56cba97596d8532d46cb4",
"index": 2896,
"step-1": "l = int(input(\"Enter lower range: \"))\nu = int(input(\"Enter upper range: \"))\nif(l<=0):\n print \"invalid\"\nif (u<=0):\n print \"invalid\"\n for num in range(l,u+1):\n n = len(str(num))\n sum = 0\n temp = num\n while temp > 0:\n digit = temp % 10\n sum += digit ** n\n temp //= 10\n if num == sum:\n print(num)\n \n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/local/bin/python
''' side_on.py
Open a 3d trajectory file (x y z) and produce a side-on plot of the
y-z plane, with straight line between start and end and a virtual
wall superimposed at 10 yards.
arg1 = infile
arg2 = optional outfile
'''
import sys
import matplotlib.pyplot as plt
infilename = sys.argv[1]
outfilename = None
try:
outfilename = sys.argv[2]
except IndexError:
pass
with open(infilename) as datafile:
data = datafile.read()
datafile.close()
data = data.split('\n')
# get rid of any empty line at the end of file
if data[-1] in ['\n', '\r\n', '']:
data.pop(-1)
x = [row.split()[0] for row in data]
y = [row.split()[1] for row in data]
z = [row.split()[2] for row in data]
# Get goalpost corner points
bly = y.pop(0)
blz = z.pop(0)
tly = y.pop(0)
tlz = z.pop(0)
try_ = y.pop(0)
trz = z.pop(0)
bry = y.pop(0)
brz = z.pop(0)
max_height = max(y)
# Work out everage depth of goalposts
avgz = (float(blz) + float(tlz) + float(trz) + float(brz)) / 4
fig = plt.figure('Side On Projection with Virtual Wall')
ax = fig.add_subplot(111, aspect='equal')
string = "Maximum height: " + str(max_height) + "m"
# annotate with maximum height
trans = ax.get_xaxis_transform()
ann = ax.annotate(string, xy=(8, -1), xycoords=trans)
ax.set_xlabel("Distance Travelled to Goal / m")
ax.set_ylabel("Height / m")
ax.plot(z, y, 'k.')
# Draw a red 2.14m wall at 9.14m
ax.plot([9.14, 9.14], [0, 1.82], c='r', linewidth=2)
ax.plot([avgz, avgz], [0, 2.44], c='k', linewidth=2)
plt.show()
# Save it if necessary
if outfilename is not None:
print "Save:", outfilename
fig.savefig(outfilename, bbox_inches='tight')
|
normal
|
{
"blob_id": "146aca6c7da17ddccb815638292cbcdda66f28e6",
"index": 7035,
"step-1": "#!/usr/local/bin/python\n\n''' side_on.py\n\n Open a 3d trajectory file (x y z) and produce a side-on plot of the\n y-z plane, with straight line between start and end and a virtual\n wall superimposed at 10 yards.\n\n arg1 = infile\n arg2 = optional outfile\n'''\n\nimport sys\nimport matplotlib.pyplot as plt\n\ninfilename = sys.argv[1]\noutfilename = None\ntry:\n outfilename = sys.argv[2]\nexcept IndexError:\n pass\n\nwith open(infilename) as datafile:\n data = datafile.read()\n datafile.close()\n\ndata = data.split('\\n')\n\n# get rid of any empty line at the end of file\nif data[-1] in ['\\n', '\\r\\n', '']:\n data.pop(-1)\n\nx = [row.split()[0] for row in data]\ny = [row.split()[1] for row in data]\nz = [row.split()[2] for row in data]\n\n# Get goalpost corner points\nbly = y.pop(0)\nblz = z.pop(0)\n\ntly = y.pop(0)\ntlz = z.pop(0)\n\ntry_ = y.pop(0)\ntrz = z.pop(0)\n\nbry = y.pop(0)\nbrz = z.pop(0)\n\nmax_height = max(y)\n\n# Work out everage depth of goalposts\navgz = (float(blz) + float(tlz) + float(trz) + float(brz)) / 4\n\nfig = plt.figure('Side On Projection with Virtual Wall')\nax = fig.add_subplot(111, aspect='equal')\n\nstring = \"Maximum height: \" + str(max_height) + \"m\"\n\n# annotate with maximum height\ntrans = ax.get_xaxis_transform()\nann = ax.annotate(string, xy=(8, -1), xycoords=trans)\n\nax.set_xlabel(\"Distance Travelled to Goal / m\")\nax.set_ylabel(\"Height / m\")\nax.plot(z, y, 'k.')\n\n# Draw a red 2.14m wall at 9.14m\nax.plot([9.14, 9.14], [0, 1.82], c='r', linewidth=2)\nax.plot([avgz, avgz], [0, 2.44], c='k', linewidth=2)\n\nplt.show()\n\n# Save it if necessary\nif outfilename is not None:\n print \"Save:\", outfilename\n fig.savefig(outfilename, bbox_inches='tight')\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import time
import ephem
import serial
import nmea
import orientation
import sys
import threading
from geomag import geomag
#Constants
initial_az = 180
initial_alt = 90
min_elevation = 10.0
sleep_time = 1.0
unwind_threshold = 180
sleep_on_unwind = 45.0
last_lon = '-88.787'
last_lat = '41.355'
last_heading = 0.0
mount_port = '/dev/ttyUSB0'
arduino_port = '/dev/ttyACM0'
class SerialTester:
def write(self,line):
print(line)
def read(self, num):
return
class Antenna:
azimuth = initial_az
altitude = initial_alt
parked = True
def set_position(self, az, alt):
self.azimuth = az
self.altitude = alt
az_int = round(az)
alt_int = round(alt)
ser.write(":Sz " + str(az_int) + "*00:00#")
ser.write(":Sa +" + str(alt_int) + "*00:00#")
ser.write(":MS#")
ser.read(64)
def park(self):
if (self.parked):
print('Antenna Parked')
else:
print('Parking Antenna')
self.set_position(initial_az, initial_alt)
self.parked = True
def move(self, az, alt):
if (self.parked):
self.parked = False
# Unwrap Cable if Azimuth will cross through True North
# In the above case, Set Azimuth to 180 Degrees, then pick up
# normal tracking
# Then sleep 45 seconds to give the positioner time to
# reposition
if ((self.azimuth - az) > unwind_threshold):
self.set_position(initial_az, self.altitude)
print('Repositioning to unwrap cable')
time.sleep(sleep_on_unwind)
else:
print('Tracking Mode')
self.set_position(az, alt)
def reset():
obs = ephem.Observer()
#Set LAT/LON Coordinates to IMSA's location
obs.date = ephem.now()
obs.lon = last_lon
obs.lat = last_lat
obs.elevation = 0.0
return obs
def update_gps(gprmc, obs):
obsc = obs.copy()
try:
if gprmc.is_fixed() and gprmc.checksum():
datetime = gprmc.get_date() + " " + gprmc.get_time()
obsc.date = datetime
obsc.lat = str(gprmc.get_lat())
last_lat = str(gprmc.get_lat())
obsc.lon = str(gprmc.get_lon())
last_lon = str(gprmc.get_lon())
return obsc
except:
return obs
def setup_serial(port, baud):
# Set Serial Port - USB0
ser = serial.Serial(port, baud)
print("Port used:" + ser.name)
return ser
# return SerialTester()
def setup_satellite():
# Read in TLE for target satellite ICO F2
icof2 = ephem.readtle('ICO F2',
'1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997',
'2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058')
return icof2
def to_degrees(radians):
return radians / ephem.degree
def get_sat_position(icof2, home):
icof2.compute(home)
icof2_az = to_degrees(icof2.az)
icof2_alt = to_degrees(icof2.alt)
print('Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' % (icof2_az, icof2_alt))
return icof2_az, icof2_alt
def read_message(port):
while True:
try:
line = port.readline().decode("ascii").replace('\r', '').replace('\n', '')
except:
line = ""
if len(line) > 0 and line[0] == "$":
return line
def nmea_tester(sentence):
mes = nmea.nmea(sentence)
print("Checksum: ")
print(mes.checksum())
print("Reformatted Date & Time: ")
print(mes.get_date())
print(mes.get_time())
print("Lat, Lon: ")
print(str(mes.get_lat()) + ", " + str(mes.get_lon()))
print("Heading, MagVar")
print(str(mes.get_magnetic_heading()) + ", " + str(mes.get_magnetic_var()))
def arduino_tester():
ard = setup_serial(arduino_port, 115200)
icof2 = setup_satellite()
while True:
try:
line = read_nmea(ard)
home = reset()
home, heading = update(nmea.nmea(line))
print(home.lat)
print(home.lon)
print(home.date)
print(heading)
except:
break
def display_stats(orient, position, obs):
try:
print("\n"*65)
magvar = get_magnetic_var(float(last_lat), float(last_lon))
print(''' _.:::::._
.:::'_|_':::.
/::' --|-- '::\\
|:" .---"---. ':|
|: ( O R E O ) :|
|:: `-------' ::|
\:::.......:::/
':::::::::::'
`'"""'`\n\n''')
print("Time: {}\n".format(ephem.now()))
print('GPS\n===\nFix: {fix}, Lat: {lat}, Lon: {lon}'
.format(fix = position.is_fixed(), lat = obs.lat, lon = obs.lon))
print(position.unparsed)
print("Sensor\n===")
print('Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, '\
'Roll: {roll:7.2f}\n---'.format(heading = orient.get_heading(),
pitch = orient.get_pitch(),
roll = orient.get_roll()))
print('CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]},'\
' Acc: {cal[2]}, Mag: {cal[3]}\n'
.format(cal=orient.get_calibration()))
print("\nMagnetic Declination: {magvar:7.2f}, "
"Adjusted Heading: {true_heading:7.2f}"
.format(magvar = magvar,
true_heading= (orient.get_heading() +
magvar+720)%360))
print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'
.format(bearing = position.get_bearing(),
speed = position.get_speed()))
except:
pass
def get_magnetic_var(lat, lon):
gm = geomag.GeoMag()
magobj = gm.GeoMag(lat, lon)
return magobj.dec
home = reset()
ard = setup_serial(arduino_port, 115200)
counter = time.time()
f = open("logs/log_"+str(float(ephem.now()))+".csv", 'w')
f.write("Epoch Time,Speed,Sensor,GPS,Waypoint\n")
orient = orientation.orientation("$IMU,0,0,0,0,0,0,0,0,0")
position = nmea.nmea("$GPRMC,0,V,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0")
magvar = get_magnetic_var(float(last_lat), float(last_lon))
class myThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global val
global ii
val = '@'
ii = ''
while True:
ii = input()
if ii == "q":
break
val = chr(ord(val) + 1)
pass
thread1 = myThread()
thread1.start()
while True:
mes = (read_message(ard))
if mes[:2] == "$G":
try:
position = nmea.nmea(mes)
except:
pass
elif mes[:2] == "$I":
try:
orient = orientation.orientation(mes)
except:
pass
# home.date = "2016-06-28 12:00:00"
# Operate the antenna if the satellite's elevation is greater than 10
# degrees
# If the elevation IS above 10 degrees and the antenna is parked, then
# unlatch the park_latch variable
home = update_gps(position, home)
home.date = ephem.now()
magvar = get_magnetic_var(float(last_lat), float(last_lon))
display_stats(orient, position, home)
print(val)
if time.time() - counter >= 1.0:
counter = time.time()
try:
f.write(str(ephem.now())+",")
f.write(str(position.get_speed())+",")
f.write(str(orient.get_heading())+",")
f.write(str(position.get_bearing())+",")
f.write(val+"\n")
except:
f.write("x\n")
if ii == "q":
f.close()
break
''' icof2_az, icof2_alt = get_sat_position(icof2, home)
if (icof2_alt >= min_elevation):
antenna.set_position(icof2_az - heading, icof2_alt)
else:
antenna.park()'''
|
normal
|
{
"blob_id": "468b5bd8d7b045ca8dd46c76a1829fc499e16950",
"index": 5756,
"step-1": "<mask token>\n\n\nclass SerialTester:\n\n def write(self, line):\n print(line)\n\n def read(self, num):\n return\n\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(':Sz ' + str(az_int) + '*00:00#')\n ser.write(':Sa +' + str(alt_int) + '*00:00#')\n ser.write(':MS#')\n ser.read(64)\n\n def park(self):\n if self.parked:\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if self.parked:\n self.parked = False\n if self.azimuth - az > unwind_threshold:\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\n\n<mask token>\n\n\ndef setup_serial(port, baud):\n ser = serial.Serial(port, baud)\n print('Port used:' + ser.name)\n return ser\n\n\n<mask token>\n\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode('ascii').replace('\\r', '').replace(\n '\\n', '')\n except:\n line = ''\n if len(line) > 0 and line[0] == '$':\n return line\n\n\n<mask token>\n\n\ndef display_stats(orient, position, obs):\n try:\n print('\\n' * 65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(\n \"\"\" _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\\\:::.......:::/\n ':::::::::::'\n `'\"\"\\\"'`\n\n\"\"\"\n )\n print('Time: {}\\n'.format(ephem.now()))\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=\n position.is_fixed(), lat=obs.lat, lon=obs.lon))\n print(position.unparsed)\n print('Sensor\\n===')\n print(\n 'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\\n---'\n .format(heading=orient.get_heading(), pitch=orient.get_pitch(),\n roll=orient.get_roll()))\n print(\n 'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\n '\\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'\n .format(magvar=magvar, true_heading=(orient.get_heading() +\n magvar + 720) % 360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing\n =position.get_bearing(), speed=position.get_speed()))\n except:\n pass\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\n<mask token>\n\n\nclass myThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == 'q':\n break\n val = chr(ord(val) + 1)\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SerialTester:\n\n def write(self, line):\n print(line)\n\n def read(self, num):\n return\n\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(':Sz ' + str(az_int) + '*00:00#')\n ser.write(':Sa +' + str(alt_int) + '*00:00#')\n ser.write(':MS#')\n ser.read(64)\n\n def park(self):\n if self.parked:\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if self.parked:\n self.parked = False\n if self.azimuth - az > unwind_threshold:\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\n\n<mask token>\n\n\ndef setup_serial(port, baud):\n ser = serial.Serial(port, baud)\n print('Port used:' + ser.name)\n return ser\n\n\ndef setup_satellite():\n icof2 = ephem.readtle('ICO F2',\n '1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997'\n ,\n '2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058'\n )\n return icof2\n\n\ndef to_degrees(radians):\n return radians / ephem.degree\n\n\n<mask token>\n\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode('ascii').replace('\\r', '').replace(\n '\\n', '')\n except:\n line = ''\n if len(line) > 0 and line[0] == '$':\n return line\n\n\n<mask token>\n\n\ndef arduino_tester():\n ard = setup_serial(arduino_port, 115200)\n icof2 = setup_satellite()\n while True:\n try:\n line = read_nmea(ard)\n home = reset()\n home, heading = update(nmea.nmea(line))\n print(home.lat)\n print(home.lon)\n print(home.date)\n print(heading)\n except:\n break\n\n\ndef display_stats(orient, position, obs):\n try:\n print('\\n' * 65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(\n \"\"\" _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\\\:::.......:::/\n ':::::::::::'\n `'\"\"\\\"'`\n\n\"\"\"\n )\n print('Time: {}\\n'.format(ephem.now()))\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=\n position.is_fixed(), lat=obs.lat, lon=obs.lon))\n print(position.unparsed)\n print('Sensor\\n===')\n print(\n 'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\\n---'\n .format(heading=orient.get_heading(), pitch=orient.get_pitch(),\n roll=orient.get_roll()))\n print(\n 'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\n '\\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'\n .format(magvar=magvar, true_heading=(orient.get_heading() +\n magvar + 720) % 360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing\n =position.get_bearing(), speed=position.get_speed()))\n except:\n pass\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\n<mask token>\n\n\nclass myThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == 'q':\n break\n val = chr(ord(val) + 1)\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SerialTester:\n\n def write(self, line):\n print(line)\n\n def read(self, num):\n return\n\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(':Sz ' + str(az_int) + '*00:00#')\n ser.write(':Sa +' + str(alt_int) + '*00:00#')\n ser.write(':MS#')\n ser.read(64)\n\n def park(self):\n if self.parked:\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if self.parked:\n self.parked = False\n if self.azimuth - az > unwind_threshold:\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\n\n<mask token>\n\n\ndef update_gps(gprmc, obs):\n obsc = obs.copy()\n try:\n if gprmc.is_fixed() and gprmc.checksum():\n datetime = gprmc.get_date() + ' ' + gprmc.get_time()\n obsc.date = datetime\n obsc.lat = str(gprmc.get_lat())\n last_lat = str(gprmc.get_lat())\n obsc.lon = str(gprmc.get_lon())\n last_lon = str(gprmc.get_lon())\n return obsc\n except:\n return obs\n\n\ndef setup_serial(port, baud):\n ser = serial.Serial(port, baud)\n print('Port used:' + ser.name)\n return ser\n\n\ndef setup_satellite():\n icof2 = ephem.readtle('ICO F2',\n '1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997'\n ,\n '2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058'\n )\n return icof2\n\n\ndef to_degrees(radians):\n return radians / ephem.degree\n\n\ndef get_sat_position(icof2, home):\n icof2.compute(home)\n icof2_az = to_degrees(icof2.az)\n icof2_alt = to_degrees(icof2.alt)\n print(\n 'Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' %\n (icof2_az, icof2_alt))\n return icof2_az, icof2_alt\n\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode('ascii').replace('\\r', '').replace(\n '\\n', '')\n except:\n line = ''\n if len(line) > 0 and line[0] == '$':\n return line\n\n\ndef nmea_tester(sentence):\n mes = nmea.nmea(sentence)\n print('Checksum: ')\n print(mes.checksum())\n print('Reformatted Date & Time: ')\n print(mes.get_date())\n print(mes.get_time())\n print('Lat, Lon: ')\n print(str(mes.get_lat()) + ', ' + str(mes.get_lon()))\n print('Heading, MagVar')\n print(str(mes.get_magnetic_heading()) + ', ' + str(mes.get_magnetic_var()))\n\n\ndef arduino_tester():\n ard = setup_serial(arduino_port, 115200)\n icof2 = setup_satellite()\n while True:\n try:\n line = read_nmea(ard)\n home = reset()\n home, heading = update(nmea.nmea(line))\n print(home.lat)\n print(home.lon)\n print(home.date)\n print(heading)\n except:\n break\n\n\ndef display_stats(orient, position, obs):\n try:\n print('\\n' * 65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(\n \"\"\" _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\\\:::.......:::/\n ':::::::::::'\n `'\"\"\\\"'`\n\n\"\"\"\n )\n print('Time: {}\\n'.format(ephem.now()))\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=\n position.is_fixed(), lat=obs.lat, lon=obs.lon))\n print(position.unparsed)\n print('Sensor\\n===')\n print(\n 'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\\n---'\n .format(heading=orient.get_heading(), pitch=orient.get_pitch(),\n roll=orient.get_roll()))\n print(\n 'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\n '\\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'\n .format(magvar=magvar, true_heading=(orient.get_heading() +\n magvar + 720) % 360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing\n =position.get_bearing(), speed=position.get_speed()))\n except:\n pass\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\n<mask token>\n\n\nclass myThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == 'q':\n break\n val = chr(ord(val) + 1)\n pass\n\n\n<mask token>\n",
"step-4": "import time\nimport ephem\nimport serial\nimport nmea\nimport orientation\nimport sys\nimport threading\nfrom geomag import geomag\ninitial_az = 180\ninitial_alt = 90\nmin_elevation = 10.0\nsleep_time = 1.0\nunwind_threshold = 180\nsleep_on_unwind = 45.0\nlast_lon = '-88.787'\nlast_lat = '41.355'\nlast_heading = 0.0\nmount_port = '/dev/ttyUSB0'\narduino_port = '/dev/ttyACM0'\n\n\nclass SerialTester:\n\n def write(self, line):\n print(line)\n\n def read(self, num):\n return\n\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(':Sz ' + str(az_int) + '*00:00#')\n ser.write(':Sa +' + str(alt_int) + '*00:00#')\n ser.write(':MS#')\n ser.read(64)\n\n def park(self):\n if self.parked:\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if self.parked:\n self.parked = False\n if self.azimuth - az > unwind_threshold:\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\n\ndef reset():\n obs = ephem.Observer()\n obs.date = ephem.now()\n obs.lon = last_lon\n obs.lat = last_lat\n obs.elevation = 0.0\n return obs\n\n\ndef update_gps(gprmc, obs):\n obsc = obs.copy()\n try:\n if gprmc.is_fixed() and gprmc.checksum():\n datetime = gprmc.get_date() + ' ' + gprmc.get_time()\n obsc.date = datetime\n obsc.lat = str(gprmc.get_lat())\n last_lat = str(gprmc.get_lat())\n obsc.lon = str(gprmc.get_lon())\n last_lon = str(gprmc.get_lon())\n return obsc\n except:\n return obs\n\n\ndef setup_serial(port, baud):\n ser = serial.Serial(port, baud)\n print('Port used:' + ser.name)\n return ser\n\n\ndef setup_satellite():\n icof2 = ephem.readtle('ICO F2',\n '1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997'\n ,\n '2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058'\n )\n return icof2\n\n\ndef to_degrees(radians):\n return radians / ephem.degree\n\n\ndef get_sat_position(icof2, home):\n icof2.compute(home)\n icof2_az = to_degrees(icof2.az)\n icof2_alt = to_degrees(icof2.alt)\n print(\n 'Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' %\n (icof2_az, icof2_alt))\n return icof2_az, icof2_alt\n\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode('ascii').replace('\\r', '').replace(\n '\\n', '')\n except:\n line = ''\n if len(line) > 0 and line[0] == '$':\n return line\n\n\ndef nmea_tester(sentence):\n mes = nmea.nmea(sentence)\n print('Checksum: ')\n print(mes.checksum())\n print('Reformatted Date & Time: ')\n print(mes.get_date())\n print(mes.get_time())\n print('Lat, Lon: ')\n print(str(mes.get_lat()) + ', ' + str(mes.get_lon()))\n print('Heading, MagVar')\n print(str(mes.get_magnetic_heading()) + ', ' + str(mes.get_magnetic_var()))\n\n\ndef arduino_tester():\n ard = setup_serial(arduino_port, 115200)\n icof2 = setup_satellite()\n while True:\n try:\n line = read_nmea(ard)\n home = reset()\n home, heading = update(nmea.nmea(line))\n print(home.lat)\n print(home.lon)\n print(home.date)\n print(heading)\n except:\n break\n\n\ndef display_stats(orient, position, obs):\n try:\n print('\\n' * 65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(\n \"\"\" _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\\\:::.......:::/\n ':::::::::::'\n `'\"\"\\\"'`\n\n\"\"\"\n )\n print('Time: {}\\n'.format(ephem.now()))\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'.format(fix=\n position.is_fixed(), lat=obs.lat, lon=obs.lon))\n print(position.unparsed)\n print('Sensor\\n===')\n print(\n 'Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, Roll: {roll:7.2f}\\n---'\n .format(heading=orient.get_heading(), pitch=orient.get_pitch(),\n roll=orient.get_roll()))\n print(\n 'CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]}, Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\n '\\nMagnetic Declination: {magvar:7.2f}, Adjusted Heading: {true_heading:7.2f}'\n .format(magvar=magvar, true_heading=(orient.get_heading() +\n magvar + 720) % 360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'.format(bearing\n =position.get_bearing(), speed=position.get_speed()))\n except:\n pass\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\nhome = reset()\nard = setup_serial(arduino_port, 115200)\ncounter = time.time()\nf = open('logs/log_' + str(float(ephem.now())) + '.csv', 'w')\nf.write('Epoch Time,Speed,Sensor,GPS,Waypoint\\n')\norient = orientation.orientation('$IMU,0,0,0,0,0,0,0,0,0')\nposition = nmea.nmea('$GPRMC,0,V,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')\nmagvar = get_magnetic_var(float(last_lat), float(last_lon))\n\n\nclass myThread(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == 'q':\n break\n val = chr(ord(val) + 1)\n pass\n\n\nthread1 = myThread()\nthread1.start()\nwhile True:\n mes = read_message(ard)\n if mes[:2] == '$G':\n try:\n position = nmea.nmea(mes)\n except:\n pass\n elif mes[:2] == '$I':\n try:\n orient = orientation.orientation(mes)\n except:\n pass\n home = update_gps(position, home)\n home.date = ephem.now()\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n display_stats(orient, position, home)\n print(val)\n if time.time() - counter >= 1.0:\n counter = time.time()\n try:\n f.write(str(ephem.now()) + ',')\n f.write(str(position.get_speed()) + ',')\n f.write(str(orient.get_heading()) + ',')\n f.write(str(position.get_bearing()) + ',')\n f.write(val + '\\n')\n except:\n f.write('x\\n')\n if ii == 'q':\n f.close()\n break\n<mask token>\n",
"step-5": "import time\nimport ephem\nimport serial\nimport nmea\nimport orientation\nimport sys\nimport threading\nfrom geomag import geomag\n\n#Constants\ninitial_az = 180\ninitial_alt = 90\nmin_elevation = 10.0\nsleep_time = 1.0\nunwind_threshold = 180\nsleep_on_unwind = 45.0\n\nlast_lon = '-88.787'\nlast_lat = '41.355'\nlast_heading = 0.0\n\nmount_port = '/dev/ttyUSB0'\narduino_port = '/dev/ttyACM0'\n\nclass SerialTester:\n def write(self,line):\n print(line)\n\n def read(self, num):\n return\n\nclass Antenna:\n azimuth = initial_az\n altitude = initial_alt\n parked = True\n\n def set_position(self, az, alt):\n self.azimuth = az\n self.altitude = alt\n az_int = round(az)\n alt_int = round(alt)\n ser.write(\":Sz \" + str(az_int) + \"*00:00#\")\n ser.write(\":Sa +\" + str(alt_int) + \"*00:00#\")\n ser.write(\":MS#\")\n ser.read(64)\n\n def park(self):\n if (self.parked):\n print('Antenna Parked')\n else:\n print('Parking Antenna')\n self.set_position(initial_az, initial_alt)\n self.parked = True\n\n def move(self, az, alt):\n if (self.parked):\n self.parked = False\n # Unwrap Cable if Azimuth will cross through True North\n # In the above case, Set Azimuth to 180 Degrees, then pick up\n # normal tracking\n # Then sleep 45 seconds to give the positioner time to\n # reposition\n if ((self.azimuth - az) > unwind_threshold):\n self.set_position(initial_az, self.altitude)\n print('Repositioning to unwrap cable')\n time.sleep(sleep_on_unwind)\n else:\n print('Tracking Mode')\n self.set_position(az, alt)\n\ndef reset():\n obs = ephem.Observer()\n #Set LAT/LON Coordinates to IMSA's location\n obs.date = ephem.now()\n obs.lon = last_lon\n obs.lat = last_lat\n obs.elevation = 0.0\n return obs\n\ndef update_gps(gprmc, obs):\n obsc = obs.copy()\n try:\n if gprmc.is_fixed() and gprmc.checksum():\n datetime = gprmc.get_date() + \" \" + gprmc.get_time()\n obsc.date = datetime\n obsc.lat = str(gprmc.get_lat())\n last_lat = str(gprmc.get_lat())\n obsc.lon = str(gprmc.get_lon())\n last_lon = str(gprmc.get_lon())\n return obsc\n except:\n return obs\n\n\ndef setup_serial(port, baud):\n # Set Serial Port - USB0\n ser = serial.Serial(port, baud)\n print(\"Port used:\" + ser.name)\n return ser\n# return SerialTester()\n\ndef setup_satellite():\n # Read in TLE for target satellite ICO F2\n icof2 = ephem.readtle('ICO F2',\n '1 26857U 01026A 16172.60175106 -.00000043 00000-0 00000+0 0 9997',\n '2 26857 044.9783 5.1953 0013193 227.2968 127.4685 03.92441898218058')\n return icof2\n\ndef to_degrees(radians):\n return radians / ephem.degree\n\ndef get_sat_position(icof2, home):\n icof2.compute(home)\n icof2_az = to_degrees(icof2.az)\n icof2_alt = to_degrees(icof2.alt)\n print('Current Satellite Location: Azimuth %3.2f deg, Altitude %3.2f deg' % (icof2_az, icof2_alt))\n return icof2_az, icof2_alt\n\ndef read_message(port):\n while True:\n try:\n line = port.readline().decode(\"ascii\").replace('\\r', '').replace('\\n', '')\n except:\n line = \"\"\n if len(line) > 0 and line[0] == \"$\":\n return line\n\ndef nmea_tester(sentence):\n mes = nmea.nmea(sentence)\n print(\"Checksum: \")\n print(mes.checksum())\n print(\"Reformatted Date & Time: \")\n print(mes.get_date())\n print(mes.get_time())\n print(\"Lat, Lon: \")\n print(str(mes.get_lat()) + \", \" + str(mes.get_lon()))\n print(\"Heading, MagVar\")\n print(str(mes.get_magnetic_heading()) + \", \" + str(mes.get_magnetic_var()))\n\n\ndef arduino_tester():\n ard = setup_serial(arduino_port, 115200)\n icof2 = setup_satellite()\n while True:\n try:\n line = read_nmea(ard)\n home = reset()\n home, heading = update(nmea.nmea(line))\n print(home.lat)\n print(home.lon)\n print(home.date)\n print(heading)\n except:\n break\n\ndef display_stats(orient, position, obs):\n try:\n print(\"\\n\"*65)\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n print(''' _.:::::._\n .:::'_|_':::.\n /::' --|-- '::\\\\\n |:\" .---\"---. ':|\n |: ( O R E O ) :|\n |:: `-------' ::|\n \\:::.......:::/\n ':::::::::::'\n `'\"\"\"'`\\n\\n''')\n print(\"Time: {}\\n\".format(ephem.now()))\n\n print('GPS\\n===\\nFix: {fix}, Lat: {lat}, Lon: {lon}'\n .format(fix = position.is_fixed(), lat = obs.lat, lon = obs.lon))\n print(position.unparsed)\n\n print(\"Sensor\\n===\")\n print('Heading: {heading:7.2f}, Pitch: {pitch:7.2f}, '\\\n 'Roll: {roll:7.2f}\\n---'.format(heading = orient.get_heading(),\n pitch = orient.get_pitch(),\n roll = orient.get_roll()))\n print('CALIBRATION Sys: {cal[0]}, Gyr: {cal[1]},'\\\n ' Acc: {cal[2]}, Mag: {cal[3]}\\n'\n .format(cal=orient.get_calibration()))\n print(\"\\nMagnetic Declination: {magvar:7.2f}, \"\n \"Adjusted Heading: {true_heading:7.2f}\"\n .format(magvar = magvar,\n true_heading= (orient.get_heading() +\n magvar+720)%360))\n print('Bearing: {bearing:7.2f}, Speed: {speed:4.2f}'\n .format(bearing = position.get_bearing(),\n speed = position.get_speed()))\n except:\n pass\n \n\n\n\ndef get_magnetic_var(lat, lon):\n gm = geomag.GeoMag()\n magobj = gm.GeoMag(lat, lon)\n return magobj.dec\n\n\n\nhome = reset()\nard = setup_serial(arduino_port, 115200)\ncounter = time.time()\nf = open(\"logs/log_\"+str(float(ephem.now()))+\".csv\", 'w')\nf.write(\"Epoch Time,Speed,Sensor,GPS,Waypoint\\n\")\norient = orientation.orientation(\"$IMU,0,0,0,0,0,0,0,0,0\")\nposition = nmea.nmea(\"$GPRMC,0,V,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\")\nmagvar = get_magnetic_var(float(last_lat), float(last_lon))\n\nclass myThread(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n\n\n def run(self):\n global val\n global ii\n val = '@'\n ii = ''\n while True:\n ii = input()\n if ii == \"q\":\n break\n val = chr(ord(val) + 1)\n pass\n\nthread1 = myThread()\n\nthread1.start()\n\nwhile True:\n mes = (read_message(ard))\n if mes[:2] == \"$G\":\n try:\n position = nmea.nmea(mes)\n except:\n pass\n elif mes[:2] == \"$I\":\n try:\n orient = orientation.orientation(mes)\n except:\n pass\n # home.date = \"2016-06-28 12:00:00\"\n\n # Operate the antenna if the satellite's elevation is greater than 10\n # degrees\n # If the elevation IS above 10 degrees and the antenna is parked, then\n # unlatch the park_latch variable\n home = update_gps(position, home)\n home.date = ephem.now()\n\n magvar = get_magnetic_var(float(last_lat), float(last_lon))\n\n display_stats(orient, position, home)\n print(val)\n if time.time() - counter >= 1.0:\n counter = time.time()\n try:\n f.write(str(ephem.now())+\",\")\n f.write(str(position.get_speed())+\",\")\n f.write(str(orient.get_heading())+\",\")\n f.write(str(position.get_bearing())+\",\")\n f.write(val+\"\\n\")\n except:\n f.write(\"x\\n\")\n if ii == \"q\":\n f.close()\n break\n\n''' icof2_az, icof2_alt = get_sat_position(icof2, home)\n if (icof2_alt >= min_elevation):\n antenna.set_position(icof2_az - heading, icof2_alt)\n\n else:\n antenna.park()'''\n",
"step-ids": [
15,
18,
21,
25,
26
]
}
|
[
15,
18,
21,
25,
26
] |
import numpy as np
import matplotlib.pyplot as plt
import sympy as sp
import matplotlib.pyplot as plt
from sympy import sympify, Symbol
curr_pos = 0
import numpy as np
def bisection(st,maxnum,maxer,xlf,xuf):
file2 = open("test.txt","w")
file2.write("Hello World")
file2.close()
fi = open("test.txt", "w")
x=sp.Symbol('x')
y=sp.Symbol('y')
H = sympify(st)
print(H)
table = []
x1=[]
y1=[]
xu=[]
xl=[]
xks=[]
ys=[]
errors=[]
plots=[]
print(float(H.subs(x,0)))
ys.append(float(H.subs(x,xuf)))
ys.append(float(H.subs(x,xlf)))
i=0.0
err=1
maxsize=maxnum
print(maxnum)
for i in range(0, maxsize, 1):
xl.append(xlf)
xu.append(xuf)
print('xl ='+ str(xlf))
print('xu ='+ str(xuf))
if(err<=maxer):
break
xk=xlf+xuf
xk=xk/2
print('xk ='+ str(xk))
x2=[xk,xk]
y2=[-100,100]
plots.append((x2,y2))
xks.append(xk)
if i==0:
errors.append(1.0)
print(i)
else:
err=abs((xks[i]-xks[i-1]))
print(str((xks[i]-xks[i-1])))
errors.append(err)
f=float(H.subs(x,xk))
print("fk ="+str(f))
f2=float(H.subs(x,xlf))
print("fl ="+str(f2))
f3=f*f2
ys.append(f)
print (xl[0],xu[0])
print(f)
table.append([xuf,xlf,xk])
if f3<0:
xuf=xk
else:
xlf=xk
i=min([xl[0],xu[0]])
add=(abs((xu[0])-(xl[0]))/100)
print ("min = "+str(i)+" add = "+str(add)+ "max = "+str(max([xl[0],xu[0]])))
while i <= max([xl[0],xu[0]]):
x1.append(i)
print("x="+str(i)+ " y = "+str(float(H.subs(x,i))))
y1.append(float(H.subs(x,i)))
i=i+add
teams_list = ["Xu", "Xl", "Xr"]
row_format ="{:>15}" * (len(teams_list) + 1)
fi.write(row_format.format("", *teams_list))
print (row_format.format("", *teams_list))
for row in table:
print (row_format.format("", *row))
fi.write(row_format.format("", *row))
fi.close()
def key_event(e):
global curr_pos
if e.key == "right":
curr_pos = curr_pos + 1
elif e.key == "left":
curr_pos = curr_pos - 1
else:
return
curr_pos = curr_pos % len(plots)
axes = plt.gca()
ax.cla()
axes.set_xlim([xl[0],xu[0]])
axes.set_ylim([min(ys),max(ys)])
ax.plot([xl[curr_pos],xl[curr_pos]], [-200,200],'r',plots2[0][0], plots2[0][1],'g',[xu[curr_pos],xu[curr_pos]],[-200,200],'b',[-200,200],[0,0],'y')
plt.title("Iteration "+str(curr_pos+1)+" xr= "+str(xks[curr_pos])+" errors= "+str(errors[curr_pos]*100)+"%")
fig.canvas.draw()
plots2 = [(x1,y1)]
curr_pos = 0
print(xl)
fig = plt.figure()
axes = plt.gca()
axes.set_xlim([xl[0],xu[0]])
axes.set_ylim([min(ys),max(ys)])
fig.canvas.mpl_connect('key_press_event', key_event)
ax = fig.add_subplot(111)
plt.title("Iteration "+str(curr_pos+1)+" xr= "+str(xks[curr_pos])+" errors= "+str(errors[curr_pos]*100)+"%")
ax.plot([xl[curr_pos],xl[curr_pos]], [-200,200],'r',plots2[0][0], plots2[0][1],'g',[xu[curr_pos],xu[curr_pos]],[-200,200],'b',[-200,200],[0,0],'y')
plt.show()
bisection('(3/2)*(x)-6-(1/2)*sin(2*x)',50,1*10**-3,4,5)
|
normal
|
{
"blob_id": "a1c1f18e7b95f36a214a1a16f2434be2825829c3",
"index": 3110,
"step-1": "<mask token>\n\n\ndef bisection(st, maxnum, maxer, xlf, xuf):\n file2 = open('test.txt', 'w')\n file2.write('Hello World')\n file2.close()\n fi = open('test.txt', 'w')\n x = sp.Symbol('x')\n y = sp.Symbol('y')\n H = sympify(st)\n print(H)\n table = []\n x1 = []\n y1 = []\n xu = []\n xl = []\n xks = []\n ys = []\n errors = []\n plots = []\n print(float(H.subs(x, 0)))\n ys.append(float(H.subs(x, xuf)))\n ys.append(float(H.subs(x, xlf)))\n i = 0.0\n err = 1\n maxsize = maxnum\n print(maxnum)\n for i in range(0, maxsize, 1):\n xl.append(xlf)\n xu.append(xuf)\n print('xl =' + str(xlf))\n print('xu =' + str(xuf))\n if err <= maxer:\n break\n xk = xlf + xuf\n xk = xk / 2\n print('xk =' + str(xk))\n x2 = [xk, xk]\n y2 = [-100, 100]\n plots.append((x2, y2))\n xks.append(xk)\n if i == 0:\n errors.append(1.0)\n print(i)\n else:\n err = abs(xks[i] - xks[i - 1])\n print(str(xks[i] - xks[i - 1]))\n errors.append(err)\n f = float(H.subs(x, xk))\n print('fk =' + str(f))\n f2 = float(H.subs(x, xlf))\n print('fl =' + str(f2))\n f3 = f * f2\n ys.append(f)\n print(xl[0], xu[0])\n print(f)\n table.append([xuf, xlf, xk])\n if f3 < 0:\n xuf = xk\n else:\n xlf = xk\n i = min([xl[0], xu[0]])\n add = abs(xu[0] - xl[0]) / 100\n print('min = ' + str(i) + ' add = ' + str(add) + 'max = ' + str(max([xl\n [0], xu[0]])))\n while i <= max([xl[0], xu[0]]):\n x1.append(i)\n print('x=' + str(i) + ' y = ' + str(float(H.subs(x, i))))\n y1.append(float(H.subs(x, i)))\n i = i + add\n teams_list = ['Xu', 'Xl', 'Xr']\n row_format = '{:>15}' * (len(teams_list) + 1)\n fi.write(row_format.format('', *teams_list))\n print(row_format.format('', *teams_list))\n for row in table:\n print(row_format.format('', *row))\n fi.write(row_format.format('', *row))\n fi.close()\n\n def key_event(e):\n global curr_pos\n if e.key == 'right':\n curr_pos = curr_pos + 1\n elif e.key == 'left':\n curr_pos = curr_pos - 1\n else:\n return\n curr_pos = curr_pos % len(plots)\n axes = plt.gca()\n ax.cla()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0\n ], plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200],\n 'b', [-200, 200], [0, 0], 'y')\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[\n curr_pos]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n fig.canvas.draw()\n plots2 = [(x1, y1)]\n curr_pos = 0\n print(xl)\n fig = plt.figure()\n axes = plt.gca()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n fig.canvas.mpl_connect('key_press_event', key_event)\n ax = fig.add_subplot(111)\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[curr_pos\n ]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0],\n plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200], 'b',\n [-200, 200], [0, 0], 'y')\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef bisection(st, maxnum, maxer, xlf, xuf):\n file2 = open('test.txt', 'w')\n file2.write('Hello World')\n file2.close()\n fi = open('test.txt', 'w')\n x = sp.Symbol('x')\n y = sp.Symbol('y')\n H = sympify(st)\n print(H)\n table = []\n x1 = []\n y1 = []\n xu = []\n xl = []\n xks = []\n ys = []\n errors = []\n plots = []\n print(float(H.subs(x, 0)))\n ys.append(float(H.subs(x, xuf)))\n ys.append(float(H.subs(x, xlf)))\n i = 0.0\n err = 1\n maxsize = maxnum\n print(maxnum)\n for i in range(0, maxsize, 1):\n xl.append(xlf)\n xu.append(xuf)\n print('xl =' + str(xlf))\n print('xu =' + str(xuf))\n if err <= maxer:\n break\n xk = xlf + xuf\n xk = xk / 2\n print('xk =' + str(xk))\n x2 = [xk, xk]\n y2 = [-100, 100]\n plots.append((x2, y2))\n xks.append(xk)\n if i == 0:\n errors.append(1.0)\n print(i)\n else:\n err = abs(xks[i] - xks[i - 1])\n print(str(xks[i] - xks[i - 1]))\n errors.append(err)\n f = float(H.subs(x, xk))\n print('fk =' + str(f))\n f2 = float(H.subs(x, xlf))\n print('fl =' + str(f2))\n f3 = f * f2\n ys.append(f)\n print(xl[0], xu[0])\n print(f)\n table.append([xuf, xlf, xk])\n if f3 < 0:\n xuf = xk\n else:\n xlf = xk\n i = min([xl[0], xu[0]])\n add = abs(xu[0] - xl[0]) / 100\n print('min = ' + str(i) + ' add = ' + str(add) + 'max = ' + str(max([xl\n [0], xu[0]])))\n while i <= max([xl[0], xu[0]]):\n x1.append(i)\n print('x=' + str(i) + ' y = ' + str(float(H.subs(x, i))))\n y1.append(float(H.subs(x, i)))\n i = i + add\n teams_list = ['Xu', 'Xl', 'Xr']\n row_format = '{:>15}' * (len(teams_list) + 1)\n fi.write(row_format.format('', *teams_list))\n print(row_format.format('', *teams_list))\n for row in table:\n print(row_format.format('', *row))\n fi.write(row_format.format('', *row))\n fi.close()\n\n def key_event(e):\n global curr_pos\n if e.key == 'right':\n curr_pos = curr_pos + 1\n elif e.key == 'left':\n curr_pos = curr_pos - 1\n else:\n return\n curr_pos = curr_pos % len(plots)\n axes = plt.gca()\n ax.cla()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0\n ], plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200],\n 'b', [-200, 200], [0, 0], 'y')\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[\n curr_pos]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n fig.canvas.draw()\n plots2 = [(x1, y1)]\n curr_pos = 0\n print(xl)\n fig = plt.figure()\n axes = plt.gca()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n fig.canvas.mpl_connect('key_press_event', key_event)\n ax = fig.add_subplot(111)\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[curr_pos\n ]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0],\n plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200], 'b',\n [-200, 200], [0, 0], 'y')\n plt.show()\n\n\nbisection('(3/2)*(x)-6-(1/2)*sin(2*x)', 50, 1 * 10 ** -3, 4, 5)\n",
"step-3": "<mask token>\ncurr_pos = 0\n<mask token>\n\n\ndef bisection(st, maxnum, maxer, xlf, xuf):\n file2 = open('test.txt', 'w')\n file2.write('Hello World')\n file2.close()\n fi = open('test.txt', 'w')\n x = sp.Symbol('x')\n y = sp.Symbol('y')\n H = sympify(st)\n print(H)\n table = []\n x1 = []\n y1 = []\n xu = []\n xl = []\n xks = []\n ys = []\n errors = []\n plots = []\n print(float(H.subs(x, 0)))\n ys.append(float(H.subs(x, xuf)))\n ys.append(float(H.subs(x, xlf)))\n i = 0.0\n err = 1\n maxsize = maxnum\n print(maxnum)\n for i in range(0, maxsize, 1):\n xl.append(xlf)\n xu.append(xuf)\n print('xl =' + str(xlf))\n print('xu =' + str(xuf))\n if err <= maxer:\n break\n xk = xlf + xuf\n xk = xk / 2\n print('xk =' + str(xk))\n x2 = [xk, xk]\n y2 = [-100, 100]\n plots.append((x2, y2))\n xks.append(xk)\n if i == 0:\n errors.append(1.0)\n print(i)\n else:\n err = abs(xks[i] - xks[i - 1])\n print(str(xks[i] - xks[i - 1]))\n errors.append(err)\n f = float(H.subs(x, xk))\n print('fk =' + str(f))\n f2 = float(H.subs(x, xlf))\n print('fl =' + str(f2))\n f3 = f * f2\n ys.append(f)\n print(xl[0], xu[0])\n print(f)\n table.append([xuf, xlf, xk])\n if f3 < 0:\n xuf = xk\n else:\n xlf = xk\n i = min([xl[0], xu[0]])\n add = abs(xu[0] - xl[0]) / 100\n print('min = ' + str(i) + ' add = ' + str(add) + 'max = ' + str(max([xl\n [0], xu[0]])))\n while i <= max([xl[0], xu[0]]):\n x1.append(i)\n print('x=' + str(i) + ' y = ' + str(float(H.subs(x, i))))\n y1.append(float(H.subs(x, i)))\n i = i + add\n teams_list = ['Xu', 'Xl', 'Xr']\n row_format = '{:>15}' * (len(teams_list) + 1)\n fi.write(row_format.format('', *teams_list))\n print(row_format.format('', *teams_list))\n for row in table:\n print(row_format.format('', *row))\n fi.write(row_format.format('', *row))\n fi.close()\n\n def key_event(e):\n global curr_pos\n if e.key == 'right':\n curr_pos = curr_pos + 1\n elif e.key == 'left':\n curr_pos = curr_pos - 1\n else:\n return\n curr_pos = curr_pos % len(plots)\n axes = plt.gca()\n ax.cla()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0\n ], plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200],\n 'b', [-200, 200], [0, 0], 'y')\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[\n curr_pos]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n fig.canvas.draw()\n plots2 = [(x1, y1)]\n curr_pos = 0\n print(xl)\n fig = plt.figure()\n axes = plt.gca()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n fig.canvas.mpl_connect('key_press_event', key_event)\n ax = fig.add_subplot(111)\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[curr_pos\n ]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0],\n plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200], 'b',\n [-200, 200], [0, 0], 'y')\n plt.show()\n\n\nbisection('(3/2)*(x)-6-(1/2)*sin(2*x)', 50, 1 * 10 ** -3, 4, 5)\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport sympy as sp\nimport matplotlib.pyplot as plt\nfrom sympy import sympify, Symbol\ncurr_pos = 0\nimport numpy as np\n\n\ndef bisection(st, maxnum, maxer, xlf, xuf):\n file2 = open('test.txt', 'w')\n file2.write('Hello World')\n file2.close()\n fi = open('test.txt', 'w')\n x = sp.Symbol('x')\n y = sp.Symbol('y')\n H = sympify(st)\n print(H)\n table = []\n x1 = []\n y1 = []\n xu = []\n xl = []\n xks = []\n ys = []\n errors = []\n plots = []\n print(float(H.subs(x, 0)))\n ys.append(float(H.subs(x, xuf)))\n ys.append(float(H.subs(x, xlf)))\n i = 0.0\n err = 1\n maxsize = maxnum\n print(maxnum)\n for i in range(0, maxsize, 1):\n xl.append(xlf)\n xu.append(xuf)\n print('xl =' + str(xlf))\n print('xu =' + str(xuf))\n if err <= maxer:\n break\n xk = xlf + xuf\n xk = xk / 2\n print('xk =' + str(xk))\n x2 = [xk, xk]\n y2 = [-100, 100]\n plots.append((x2, y2))\n xks.append(xk)\n if i == 0:\n errors.append(1.0)\n print(i)\n else:\n err = abs(xks[i] - xks[i - 1])\n print(str(xks[i] - xks[i - 1]))\n errors.append(err)\n f = float(H.subs(x, xk))\n print('fk =' + str(f))\n f2 = float(H.subs(x, xlf))\n print('fl =' + str(f2))\n f3 = f * f2\n ys.append(f)\n print(xl[0], xu[0])\n print(f)\n table.append([xuf, xlf, xk])\n if f3 < 0:\n xuf = xk\n else:\n xlf = xk\n i = min([xl[0], xu[0]])\n add = abs(xu[0] - xl[0]) / 100\n print('min = ' + str(i) + ' add = ' + str(add) + 'max = ' + str(max([xl\n [0], xu[0]])))\n while i <= max([xl[0], xu[0]]):\n x1.append(i)\n print('x=' + str(i) + ' y = ' + str(float(H.subs(x, i))))\n y1.append(float(H.subs(x, i)))\n i = i + add\n teams_list = ['Xu', 'Xl', 'Xr']\n row_format = '{:>15}' * (len(teams_list) + 1)\n fi.write(row_format.format('', *teams_list))\n print(row_format.format('', *teams_list))\n for row in table:\n print(row_format.format('', *row))\n fi.write(row_format.format('', *row))\n fi.close()\n\n def key_event(e):\n global curr_pos\n if e.key == 'right':\n curr_pos = curr_pos + 1\n elif e.key == 'left':\n curr_pos = curr_pos - 1\n else:\n return\n curr_pos = curr_pos % len(plots)\n axes = plt.gca()\n ax.cla()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0\n ], plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200],\n 'b', [-200, 200], [0, 0], 'y')\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[\n curr_pos]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n fig.canvas.draw()\n plots2 = [(x1, y1)]\n curr_pos = 0\n print(xl)\n fig = plt.figure()\n axes = plt.gca()\n axes.set_xlim([xl[0], xu[0]])\n axes.set_ylim([min(ys), max(ys)])\n fig.canvas.mpl_connect('key_press_event', key_event)\n ax = fig.add_subplot(111)\n plt.title('Iteration ' + str(curr_pos + 1) + ' xr= ' + str(xks[curr_pos\n ]) + ' errors= ' + str(errors[curr_pos] * 100) + '%')\n ax.plot([xl[curr_pos], xl[curr_pos]], [-200, 200], 'r', plots2[0][0],\n plots2[0][1], 'g', [xu[curr_pos], xu[curr_pos]], [-200, 200], 'b',\n [-200, 200], [0, 0], 'y')\n plt.show()\n\n\nbisection('(3/2)*(x)-6-(1/2)*sin(2*x)', 50, 1 * 10 ** -3, 4, 5)\n",
"step-5": "import numpy as np\nimport matplotlib.pyplot as plt\nimport sympy as sp\nimport matplotlib.pyplot as plt\nfrom sympy import sympify, Symbol\ncurr_pos = 0\nimport numpy as np\n\ndef bisection(st,maxnum,maxer,xlf,xuf):\n file2 = open(\"test.txt\",\"w\") \n file2.write(\"Hello World\") \n file2.close() \n fi = open(\"test.txt\", \"w\")\n x=sp.Symbol('x')\n y=sp.Symbol('y')\n H = sympify(st)\n print(H)\n table = []\n x1=[]\n y1=[]\n xu=[]\n xl=[]\n xks=[]\n ys=[]\n errors=[]\n plots=[]\n print(float(H.subs(x,0)))\n ys.append(float(H.subs(x,xuf)))\n ys.append(float(H.subs(x,xlf)))\n i=0.0\n err=1\n maxsize=maxnum\n print(maxnum)\n for i in range(0, maxsize, 1):\n xl.append(xlf)\n xu.append(xuf)\n print('xl ='+ str(xlf))\n print('xu ='+ str(xuf))\n if(err<=maxer):\n break\n xk=xlf+xuf\n xk=xk/2\n print('xk ='+ str(xk))\n x2=[xk,xk]\n y2=[-100,100]\n plots.append((x2,y2))\n xks.append(xk)\n if i==0:\n errors.append(1.0)\n print(i)\n else:\n err=abs((xks[i]-xks[i-1]))\n print(str((xks[i]-xks[i-1])))\n errors.append(err)\n f=float(H.subs(x,xk))\n print(\"fk =\"+str(f))\n f2=float(H.subs(x,xlf))\n print(\"fl =\"+str(f2))\n f3=f*f2\n ys.append(f)\n print (xl[0],xu[0])\n print(f)\n table.append([xuf,xlf,xk])\n if f3<0:\n xuf=xk\n else:\n xlf=xk \n i=min([xl[0],xu[0]])\n add=(abs((xu[0])-(xl[0]))/100)\n print (\"min = \"+str(i)+\" add = \"+str(add)+ \"max = \"+str(max([xl[0],xu[0]])))\n while i <= max([xl[0],xu[0]]):\n x1.append(i)\n print(\"x=\"+str(i)+ \" y = \"+str(float(H.subs(x,i))))\n y1.append(float(H.subs(x,i)))\n i=i+add\n teams_list = [\"Xu\", \"Xl\", \"Xr\"]\n row_format =\"{:>15}\" * (len(teams_list) + 1)\n fi.write(row_format.format(\"\", *teams_list))\n print (row_format.format(\"\", *teams_list))\n for row in table:\n print (row_format.format(\"\", *row))\n fi.write(row_format.format(\"\", *row))\n fi.close()\n def key_event(e):\n global curr_pos\n\n if e.key == \"right\":\n curr_pos = curr_pos + 1\n elif e.key == \"left\":\n curr_pos = curr_pos - 1\n else:\n return\n curr_pos = curr_pos % len(plots)\n axes = plt.gca()\n ax.cla()\n axes.set_xlim([xl[0],xu[0]])\n axes.set_ylim([min(ys),max(ys)])\n ax.plot([xl[curr_pos],xl[curr_pos]], [-200,200],'r',plots2[0][0], plots2[0][1],'g',[xu[curr_pos],xu[curr_pos]],[-200,200],'b',[-200,200],[0,0],'y')\n plt.title(\"Iteration \"+str(curr_pos+1)+\" xr= \"+str(xks[curr_pos])+\" errors= \"+str(errors[curr_pos]*100)+\"%\")\n fig.canvas.draw() \n plots2 = [(x1,y1)]\n curr_pos = 0\n print(xl)\n fig = plt.figure()\n axes = plt.gca()\n axes.set_xlim([xl[0],xu[0]])\n axes.set_ylim([min(ys),max(ys)])\n fig.canvas.mpl_connect('key_press_event', key_event)\n ax = fig.add_subplot(111)\n plt.title(\"Iteration \"+str(curr_pos+1)+\" xr= \"+str(xks[curr_pos])+\" errors= \"+str(errors[curr_pos]*100)+\"%\")\n ax.plot([xl[curr_pos],xl[curr_pos]], [-200,200],'r',plots2[0][0], plots2[0][1],'g',[xu[curr_pos],xu[curr_pos]],[-200,200],'b',[-200,200],[0,0],'y')\n plt.show()\nbisection('(3/2)*(x)-6-(1/2)*sin(2*x)',50,1*10**-3,4,5)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Set up path references and dependencies.
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
sys.path.append(os.path.join(parentdir, "utils"))
# Import important helper libraries.
from flask import Flask, render_template
import numpy as np
import plotly
import plotly.graph_objs as pgo
import json
# Import modules created to serve the project.
#from utils import DB_interface as DBI
#from utils import path_config as pc
from utils import model
app = Flask(__name__)
# Global variable
#DAYS = 500
@app.route('/')
def index():
result_plot = compute_model_output()
return render_template("index.html", graphJSON=result_plot)
def compute_model_output():
num_steps = 500
init_inf = 5
t_inc = 5
t_inf = 9
r_t = 2.5 #np.random.normal(2.5, 1.0)
rho = 1.0
kappa_0 = 0.0
kappa = 0.0
n_pop = 2000
seir = model.SEIRModel(num_steps,n_pop, init_inf, t_inc, t_inf, r_t, rho, kappa_0, kappa)
s, e, i, r = seir.run()
days = np.linspace(0, num_steps, num_steps)
trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(color='rgba(128, 223, 255, 1)'))
trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(color='rgba(200, 100, 0, 1)'))
trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(color='rgba(180, 0, 0, 1)'))
trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(color='rgba(0, 100, 50, 1)'))
data = [trace_0, trace_1, trace_2, trace_3]
graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)
return (graphJSON)
"""
@app.callback(
Output('test','children')
[Input('val_num_steps', 'num_steps')]
)
@app.route('/start_bckgrnd_update')
def start_bckgrnd_update():
p = Process(target=bckgrnd_update, name="background_update")
p.start()
#p.join()
now = datetime.now()
user = {'username': 'MSE!'}
posts = [
{
'author': {'username': 'Paul'},
'body': 'Henrik has the update just been started?'
},
{
'author': {'username': 'Henrik'},
'body': 'You bet your sweet ass it has!'
},
{
'author': {'username': 'Paul'},
'body': 'So what time was is when it started?'
},
{
'author': {'username': 'Henrik'},
'body': 'It was exactly %s !' % now
}
]
return render_template("start_bckgrnd_update.html", title="home", user = user, posts=posts)
def bckgrnd_update():
global updating
updating = True
while updating:
print(datetime.now())
print("updating RKI DBs now")
DB = DBI.DB_interface()
DB.update_RKI_csv()
DB.update_RKI_landkreise_csv()
day = 24 * 3600
time.sleep(day)
"""
if __name__ == "__main__":
app.run(debug=True)
|
normal
|
{
"blob_id": "7d099012584b84e9767bf0ce9d9df1596ca3bbab",
"index": 542,
"step-1": "<mask token>\n\n\n@app.route('/')\ndef index():\n result_plot = compute_model_output()\n return render_template('index.html', graphJSON=result_plot)\n\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n n_pop = 2000\n seir = model.SEIRModel(num_steps, n_pop, init_inf, t_inc, t_inf, r_t,\n rho, kappa_0, kappa)\n s, e, i, r = seir.run()\n days = np.linspace(0, num_steps, num_steps)\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(\n color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(\n color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(\n color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(\n color='rgba(0, 100, 50, 1)'))\n data = [trace_0, trace_1, trace_2, trace_3]\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, parentdir)\nsys.path.append(os.path.join(parentdir, 'utils'))\n<mask token>\n\n\n@app.route('/')\ndef index():\n result_plot = compute_model_output()\n return render_template('index.html', graphJSON=result_plot)\n\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n n_pop = 2000\n seir = model.SEIRModel(num_steps, n_pop, init_inf, t_inc, t_inf, r_t,\n rho, kappa_0, kappa)\n s, e, i, r = seir.run()\n days = np.linspace(0, num_steps, num_steps)\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(\n color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(\n color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(\n color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(\n color='rgba(0, 100, 50, 1)'))\n data = [trace_0, trace_1, trace_2, trace_3]\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n\n<mask token>\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.\n currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\nsys.path.append(os.path.join(parentdir, 'utils'))\n<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n result_plot = compute_model_output()\n return render_template('index.html', graphJSON=result_plot)\n\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n n_pop = 2000\n seir = model.SEIRModel(num_steps, n_pop, init_inf, t_inc, t_inf, r_t,\n rho, kappa_0, kappa)\n s, e, i, r = seir.run()\n days = np.linspace(0, num_steps, num_steps)\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(\n color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(\n color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(\n color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(\n color='rgba(0, 100, 50, 1)'))\n data = [trace_0, trace_1, trace_2, trace_3]\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n\n<mask token>\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "import os, sys, inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.\n currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\nsys.path.append(os.path.join(parentdir, 'utils'))\nfrom flask import Flask, render_template\nimport numpy as np\nimport plotly\nimport plotly.graph_objs as pgo\nimport json\nfrom utils import model\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n result_plot = compute_model_output()\n return render_template('index.html', graphJSON=result_plot)\n\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n n_pop = 2000\n seir = model.SEIRModel(num_steps, n_pop, init_inf, t_inc, t_inf, r_t,\n rho, kappa_0, kappa)\n s, e, i, r = seir.run()\n days = np.linspace(0, num_steps, num_steps)\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(\n color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(\n color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(\n color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(\n color='rgba(0, 100, 50, 1)'))\n data = [trace_0, trace_1, trace_2, trace_3]\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n\n<mask token>\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "# Set up path references and dependencies.\nimport os, sys, inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\nsys.path.append(os.path.join(parentdir, \"utils\"))\n\n# Import important helper libraries.\nfrom flask import Flask, render_template\nimport numpy as np\n\nimport plotly\nimport plotly.graph_objs as pgo\nimport json\n\n# Import modules created to serve the project.\n#from utils import DB_interface as DBI\n#from utils import path_config as pc\nfrom utils import model\n\napp = Flask(__name__)\n\n# Global variable\n#DAYS = 500\n\n@app.route('/')\ndef index():\n result_plot = compute_model_output()\n return render_template(\"index.html\", graphJSON=result_plot)\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5 #np.random.normal(2.5, 1.0)\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n\n n_pop = 2000\n\n seir = model.SEIRModel(num_steps,n_pop, init_inf, t_inc, t_inf, r_t, rho, kappa_0, kappa)\n\n s, e, i, r = seir.run()\n\n days = np.linspace(0, num_steps, num_steps)\n\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(color='rgba(0, 100, 50, 1)'))\n\n data = [trace_0, trace_1, trace_2, trace_3]\n\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n\n return (graphJSON)\n\n\"\"\"\n@app.callback(\n Output('test','children')\n [Input('val_num_steps', 'num_steps')]\n)\n\n\n@app.route('/start_bckgrnd_update')\ndef start_bckgrnd_update():\n p = Process(target=bckgrnd_update, name=\"background_update\")\n p.start()\n #p.join()\n now = datetime.now()\n user = {'username': 'MSE!'}\n posts = [\n {\n 'author': {'username': 'Paul'},\n 'body': 'Henrik has the update just been started?'\n },\n {\n 'author': {'username': 'Henrik'},\n 'body': 'You bet your sweet ass it has!'\n },\n {\n 'author': {'username': 'Paul'},\n 'body': 'So what time was is when it started?'\n },\n {\n 'author': {'username': 'Henrik'},\n 'body': 'It was exactly %s !' % now\n }\n\n ]\n return render_template(\"start_bckgrnd_update.html\", title=\"home\", user = user, posts=posts)\n\ndef bckgrnd_update():\n global updating\n updating = True\n while updating:\n print(datetime.now())\n print(\"updating RKI DBs now\")\n DB = DBI.DB_interface()\n DB.update_RKI_csv()\n DB.update_RKI_landkreise_csv()\n day = 24 * 3600\n time.sleep(day)\n\"\"\"\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# Generated by Django 2.2.2 on 2019-07-09 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0015_auto_20190709_1543'),
]
operations = [
migrations.CreateModel(
name='ExampleModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('model_pic', models.ImageField(null=True, upload_to='image/')),
],
),
migrations.RemoveField(
model_name='post',
name='photo',
),
]
|
normal
|
{
"blob_id": "d6e06a78c9a5d8184e5adf9b99cc6030c3434558",
"index": 8464,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0015_auto_20190709_1543')]\n operations = [migrations.CreateModel(name='ExampleModel', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('model_pic', models.ImageField(null=\n True, upload_to='image/'))]), migrations.RemoveField(model_name=\n 'post', name='photo')]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0015_auto_20190709_1543')]\n operations = [migrations.CreateModel(name='ExampleModel', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('model_pic', models.ImageField(null=\n True, upload_to='image/'))]), migrations.RemoveField(model_name=\n 'post', name='photo')]\n",
"step-5": "# Generated by Django 2.2.2 on 2019-07-09 20:04\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0015_auto_20190709_1543'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ExampleModel',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('model_pic', models.ImageField(null=True, upload_to='image/')),\n ],\n ),\n migrations.RemoveField(\n model_name='post',\n name='photo',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from emulator import Emulator
from device import Device
from devices.compactflash import CompactFlash
from devices.mc68681 import MC68681
from musashi import m68k
def add_arguments(parser):
parser.add_argument('--rom',
type=str,
help='ROM image')
parser.add_argument('--dram-size',
type=int,
default=16,
help='DRAM size; boards may have 16, 64 or 128M')
parser.add_argument('--cf-width',
type=int,
default=8,
help='CompactFlash interface width, 8 or 16')
CompactFlash.add_arguments(parser)
MC68681.add_arguments(parser)
class CB030Remap(Device):
def __init__(self, args, **options):
super().__init__(args=args,
name='CB030Remap',
required_options=['address'],
**options)
# no registers, just a 4k aperture
self.size = 0x1000
self._did_remap = False
self._dram_size = args.dram_size
def access(self, operation, offset, size, value):
if not self._did_remap:
# remove the low alias of the EEPROM
self.emu.remove_memory(base=0)
# and add the previously-masked DRAM
self.emu.add_memory(base=0x0000000, size=self._dram_size * 1024 * 1024)
return 0
class CB030Ticker(Device):
def __init__(self, args, **options):
super().__init__(args=args,
name='CB030Ticker',
required_options=['address'],
**options)
# no registers, just a 4k aperture
self.size = 0x1000
# core clock @ 24MHz, 100Hz tick rate
self._tick_cycles = int(self.emu.cycle_rate / 100)
self.reset()
def reset(self):
self._stop()
self._tick_fired = False
def access(self, operation, offset, size, value):
if offset < 0x800:
self._stop()
else:
self._start()
def _stop(self):
self.callback_cancel('tick')
self._ticker_on = False
def _start(self):
if not self._ticker_on:
self.callback_every(self._tick_cycles, 'tick', self._tick)
self._ticker_on = True
def _tick(self):
if self._ticker_on:
self._tick_fired = True
self.assert_ipl()
def get_vector(self):
if self._tick_fired:
self._tick_fired = False
return M68K_IRQ_AUTOVECTOR
return M68K_IRQ_SPURIOUS
def configure(args):
"""create and configure an emulator"""
emu = Emulator(args,
cpu='68030',
frequency=24 * 1000 * 1000)
# initially only the EEPROM exists; aliased at 0 all the way up to 0xfe000000
# we only map the low and high aliases, as the intermediates aren't interesting
emu.add_memory(base=0, size=512 * 1024, writable=False, from_file=args.rom)
emu.add_memory(base=0xfe000000, size=512 * 1024, writable=False, from_file=args.rom)
emu.add_device(args,
MC68681,
address=0xfffff000,
interrupt=m68k.IRQ_2,
register_arrangement='16-bit-doubled')
emu.add_device(args,
CompactFlash,
address=0xffffe000,
register_arrangement='8-bit' if args.cf_width == 8 else '16-bit')
emu.add_device(args,
CB030Remap,
address=0xffff8000)
emu.add_device(args,
CB030Ticker,
address=0xffff9000,
interrupt=m68k.IRQ_6)
return emu
|
normal
|
{
"blob_id": "9eef202a42bfc10b2f52d1b9153d664c5046c13f",
"index": 1965,
"step-1": "<mask token>\n\n\nclass CB030Ticker(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Ticker', required_options=[\n 'address'], **options)\n self.size = 4096\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 2048:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n <mask token>\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CB030Remap(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Remap', required_options=[\n 'address'], **options)\n self.size = 4096\n self._did_remap = False\n self._dram_size = args.dram_size\n\n def access(self, operation, offset, size, value):\n if not self._did_remap:\n self.emu.remove_memory(base=0)\n self.emu.add_memory(base=0, size=self._dram_size * 1024 * 1024)\n return 0\n\n\nclass CB030Ticker(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Ticker', required_options=[\n 'address'], **options)\n self.size = 4096\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 2048:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n\n def _start(self):\n if not self._ticker_on:\n self.callback_every(self._tick_cycles, 'tick', self._tick)\n self._ticker_on = True\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef add_arguments(parser):\n parser.add_argument('--rom', type=str, help='ROM image')\n parser.add_argument('--dram-size', type=int, default=16, help=\n 'DRAM size; boards may have 16, 64 or 128M')\n parser.add_argument('--cf-width', type=int, default=8, help=\n 'CompactFlash interface width, 8 or 16')\n CompactFlash.add_arguments(parser)\n MC68681.add_arguments(parser)\n\n\nclass CB030Remap(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Remap', required_options=[\n 'address'], **options)\n self.size = 4096\n self._did_remap = False\n self._dram_size = args.dram_size\n\n def access(self, operation, offset, size, value):\n if not self._did_remap:\n self.emu.remove_memory(base=0)\n self.emu.add_memory(base=0, size=self._dram_size * 1024 * 1024)\n return 0\n\n\nclass CB030Ticker(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Ticker', required_options=[\n 'address'], **options)\n self.size = 4096\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 2048:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n\n def _start(self):\n if not self._ticker_on:\n self.callback_every(self._tick_cycles, 'tick', self._tick)\n self._ticker_on = True\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\ndef configure(args):\n \"\"\"create and configure an emulator\"\"\"\n emu = Emulator(args, cpu='68030', frequency=24 * 1000 * 1000)\n emu.add_memory(base=0, size=512 * 1024, writable=False, from_file=args.rom)\n emu.add_memory(base=4261412864, size=512 * 1024, writable=False,\n from_file=args.rom)\n emu.add_device(args, MC68681, address=4294963200, interrupt=m68k.IRQ_2,\n register_arrangement='16-bit-doubled')\n emu.add_device(args, CompactFlash, address=4294959104,\n register_arrangement='8-bit' if args.cf_width == 8 else '16-bit')\n emu.add_device(args, CB030Remap, address=4294934528)\n emu.add_device(args, CB030Ticker, address=4294938624, interrupt=m68k.IRQ_6)\n return emu\n",
"step-4": "from emulator import Emulator\nfrom device import Device\nfrom devices.compactflash import CompactFlash\nfrom devices.mc68681 import MC68681\nfrom musashi import m68k\n\n\ndef add_arguments(parser):\n parser.add_argument('--rom', type=str, help='ROM image')\n parser.add_argument('--dram-size', type=int, default=16, help=\n 'DRAM size; boards may have 16, 64 or 128M')\n parser.add_argument('--cf-width', type=int, default=8, help=\n 'CompactFlash interface width, 8 or 16')\n CompactFlash.add_arguments(parser)\n MC68681.add_arguments(parser)\n\n\nclass CB030Remap(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Remap', required_options=[\n 'address'], **options)\n self.size = 4096\n self._did_remap = False\n self._dram_size = args.dram_size\n\n def access(self, operation, offset, size, value):\n if not self._did_remap:\n self.emu.remove_memory(base=0)\n self.emu.add_memory(base=0, size=self._dram_size * 1024 * 1024)\n return 0\n\n\nclass CB030Ticker(Device):\n\n def __init__(self, args, **options):\n super().__init__(args=args, name='CB030Ticker', required_options=[\n 'address'], **options)\n self.size = 4096\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 2048:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n\n def _start(self):\n if not self._ticker_on:\n self.callback_every(self._tick_cycles, 'tick', self._tick)\n self._ticker_on = True\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\ndef configure(args):\n \"\"\"create and configure an emulator\"\"\"\n emu = Emulator(args, cpu='68030', frequency=24 * 1000 * 1000)\n emu.add_memory(base=0, size=512 * 1024, writable=False, from_file=args.rom)\n emu.add_memory(base=4261412864, size=512 * 1024, writable=False,\n from_file=args.rom)\n emu.add_device(args, MC68681, address=4294963200, interrupt=m68k.IRQ_2,\n register_arrangement='16-bit-doubled')\n emu.add_device(args, CompactFlash, address=4294959104,\n register_arrangement='8-bit' if args.cf_width == 8 else '16-bit')\n emu.add_device(args, CB030Remap, address=4294934528)\n emu.add_device(args, CB030Ticker, address=4294938624, interrupt=m68k.IRQ_6)\n return emu\n",
"step-5": "from emulator import Emulator\nfrom device import Device\nfrom devices.compactflash import CompactFlash\nfrom devices.mc68681 import MC68681\nfrom musashi import m68k\n\n\ndef add_arguments(parser):\n parser.add_argument('--rom',\n type=str,\n help='ROM image')\n parser.add_argument('--dram-size',\n type=int,\n default=16,\n help='DRAM size; boards may have 16, 64 or 128M')\n parser.add_argument('--cf-width',\n type=int,\n default=8,\n help='CompactFlash interface width, 8 or 16')\n CompactFlash.add_arguments(parser)\n MC68681.add_arguments(parser)\n\n\nclass CB030Remap(Device):\n def __init__(self, args, **options):\n super().__init__(args=args,\n name='CB030Remap',\n required_options=['address'],\n **options)\n\n # no registers, just a 4k aperture\n self.size = 0x1000\n self._did_remap = False\n self._dram_size = args.dram_size\n\n def access(self, operation, offset, size, value):\n if not self._did_remap:\n # remove the low alias of the EEPROM\n self.emu.remove_memory(base=0)\n\n # and add the previously-masked DRAM\n self.emu.add_memory(base=0x0000000, size=self._dram_size * 1024 * 1024)\n\n return 0\n\n\nclass CB030Ticker(Device):\n def __init__(self, args, **options):\n super().__init__(args=args,\n name='CB030Ticker',\n required_options=['address'],\n **options)\n\n # no registers, just a 4k aperture\n self.size = 0x1000\n # core clock @ 24MHz, 100Hz tick rate\n self._tick_cycles = int(self.emu.cycle_rate / 100)\n self.reset()\n\n def reset(self):\n self._stop()\n self._tick_fired = False\n\n def access(self, operation, offset, size, value):\n if offset < 0x800:\n self._stop()\n else:\n self._start()\n\n def _stop(self):\n self.callback_cancel('tick')\n self._ticker_on = False\n\n def _start(self):\n if not self._ticker_on:\n self.callback_every(self._tick_cycles, 'tick', self._tick)\n self._ticker_on = True\n\n def _tick(self):\n if self._ticker_on:\n self._tick_fired = True\n self.assert_ipl()\n\n def get_vector(self):\n if self._tick_fired:\n self._tick_fired = False\n return M68K_IRQ_AUTOVECTOR\n return M68K_IRQ_SPURIOUS\n\n\ndef configure(args):\n \"\"\"create and configure an emulator\"\"\"\n\n emu = Emulator(args,\n cpu='68030',\n frequency=24 * 1000 * 1000)\n # initially only the EEPROM exists; aliased at 0 all the way up to 0xfe000000\n # we only map the low and high aliases, as the intermediates aren't interesting\n emu.add_memory(base=0, size=512 * 1024, writable=False, from_file=args.rom)\n emu.add_memory(base=0xfe000000, size=512 * 1024, writable=False, from_file=args.rom)\n\n emu.add_device(args,\n MC68681,\n address=0xfffff000,\n interrupt=m68k.IRQ_2,\n register_arrangement='16-bit-doubled')\n emu.add_device(args,\n CompactFlash,\n address=0xffffe000,\n register_arrangement='8-bit' if args.cf_width == 8 else '16-bit')\n emu.add_device(args,\n CB030Remap,\n address=0xffff8000)\n emu.add_device(args,\n CB030Ticker,\n address=0xffff9000,\n interrupt=m68k.IRQ_6)\n return emu\n",
"step-ids": [
7,
11,
13,
14,
15
]
}
|
[
7,
11,
13,
14,
15
] |
from robot.libraries.BuiltIn import BuiltIn
from RoboGalaxyLibrary.utilitylib import logging as logger
import re
def block_no_keyword_warn():
pass
class Compare_hpMCTP(object):
def __init__(self):
self.fusionlib = BuiltIn().get_library_instance('FusionLibrary')
def do(self, expect, actual, verbose=False):
def smart_compare(exp, act):
# Remove leading whitespaces
exp = (re.sub(r'^\s*', '', exp))
act = (re.sub(r'^\s*', '', act))
if verbose:
logger._log_to_console_and_log_file("expected after removing leading white space: %s" % exp)
logger._log_to_console_and_log_file("actual after removing leading white space: %s" % act)
missing = [e for e in exp if (e not in act) and (e is not '')]
extra = [a for a in act if (a not in exp)]
rc = 1 # True (good, until proven otherwise)
if extra:
logger._log_to_console_and_log_file("extra item found: %s" % extra)
rc = 0
else:
logger._log_to_console_and_log_file("No Extra found.")
if missing:
logger._log_to_console_and_log_file("missing item: %s" % missing)
rc = 0
else:
logger._log_to_console_and_log_file("No Missing found.")
return rc
# Need to delete some items.
actual = re.sub(r'\n\r', '\n', actual)
# get rid of the stuff from actual up to the first header. Extra info not compared.
# for example, the first three lines below.
# hpMCTP 2.3.0-4
# Copyright (c) 2015-2016 Hewlett-Packard - All Rights Reserved
# -------------------------------------------------------------
# <ISCSI-Boot-Cats>
headerEnd = actual.index('<ISCSI-Boot-Cats>')
actual = '\n' + actual[headerEnd:]
if verbose:
logger._log_to_console_and_log_file("Actual now: %s" % actual)
logger._log_to_console_and_log_file("Expect now: %s" % expect)
# Start comparing the expected vs the actual
# if as a string they match, then no need to do a smart compare
if expect == actual:
return logger._log_to_console_and_log_file("expect == actual. String equal, no further compare needed.")
else:
logger._log_to_console_and_log_file("expect != actual, will do smart compare")
# split into single lines.
eList = expect.split('\n')
aList = actual.split('\n')
logger._log_to_console_and_log_file("Split on: %s into %s sections" % ('\n', len(eList) - 1))
if len(aList) != len(eList):
errMsg = "aList and eList counts diff. Problem with split. a: %s, e: %s" % (len(aList) - 1, len(eList) - 1)
logger._log_to_console_and_log_file(errMsg)
raise AssertionError(errMsg)
for i in xrange(1, len(eList)):
if eList[i] == aList[i]:
logger._log_to_console_and_log_file("Sections %s are equal." % i)
if verbose:
logger._log_to_console_and_log_file("expect: %s" % eList[i])
logger._log_to_console_and_log_file("actual: %s" % aList[i])
else:
logger._log_to_console_and_log_file("Section %s requires a smart compare." % i)
if verbose:
logger._log_to_console_and_log_file("expect: %s" % eList[i])
logger._log_to_console_and_log_file("actual: %s" % aList[i])
if not smart_compare(eList[i], aList[i]):
errMsg = "Expected: '%s' does not match '%s'" % (eList[i], aList[i])
logger._log_to_console_and_log_file(errMsg)
raise AssertionError(errMsg)
|
normal
|
{
"blob_id": "17ba6aaa9009c258136b184ca6a8660cec1cfe40",
"index": 3752,
"step-1": "<mask token>\n\n\nclass Compare_hpMCTP(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Compare_hpMCTP(object):\n <mask token>\n\n def do(self, expect, actual, verbose=False):\n\n def smart_compare(exp, act):\n exp = re.sub('^\\\\s*', '', exp)\n act = re.sub('^\\\\s*', '', act)\n if verbose:\n logger._log_to_console_and_log_file(\n 'expected after removing leading white space: %s' % exp)\n logger._log_to_console_and_log_file(\n 'actual after removing leading white space: %s' % act)\n missing = [e for e in exp if e not in act and e is not '']\n extra = [a for a in act if a not in exp]\n rc = 1\n if extra:\n logger._log_to_console_and_log_file('extra item found: %s' %\n extra)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Extra found.')\n if missing:\n logger._log_to_console_and_log_file('missing item: %s' %\n missing)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Missing found.')\n return rc\n actual = re.sub('\\\\n\\\\r', '\\n', actual)\n headerEnd = actual.index('<ISCSI-Boot-Cats>')\n actual = '\\n' + actual[headerEnd:]\n if verbose:\n logger._log_to_console_and_log_file('Actual now: %s' % actual)\n logger._log_to_console_and_log_file('Expect now: %s' % expect)\n if expect == actual:\n return logger._log_to_console_and_log_file(\n 'expect == actual. String equal, no further compare needed.')\n else:\n logger._log_to_console_and_log_file(\n 'expect != actual, will do smart compare')\n eList = expect.split('\\n')\n aList = actual.split('\\n')\n logger._log_to_console_and_log_file('Split on: %s into %s sections' %\n ('\\n', len(eList) - 1))\n if len(aList) != len(eList):\n errMsg = (\n 'aList and eList counts diff. Problem with split. a: %s, e: %s'\n % (len(aList) - 1, len(eList) - 1))\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n for i in xrange(1, len(eList)):\n if eList[i] == aList[i]:\n logger._log_to_console_and_log_file(\n 'Sections %s are equal.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n else:\n logger._log_to_console_and_log_file(\n 'Section %s requires a smart compare.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n if not smart_compare(eList[i], aList[i]):\n errMsg = \"Expected: '%s' does not match '%s'\" % (eList[\n i], aList[i])\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n",
"step-3": "<mask token>\n\n\ndef block_no_keyword_warn():\n pass\n\n\nclass Compare_hpMCTP(object):\n\n def __init__(self):\n self.fusionlib = BuiltIn().get_library_instance('FusionLibrary')\n\n def do(self, expect, actual, verbose=False):\n\n def smart_compare(exp, act):\n exp = re.sub('^\\\\s*', '', exp)\n act = re.sub('^\\\\s*', '', act)\n if verbose:\n logger._log_to_console_and_log_file(\n 'expected after removing leading white space: %s' % exp)\n logger._log_to_console_and_log_file(\n 'actual after removing leading white space: %s' % act)\n missing = [e for e in exp if e not in act and e is not '']\n extra = [a for a in act if a not in exp]\n rc = 1\n if extra:\n logger._log_to_console_and_log_file('extra item found: %s' %\n extra)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Extra found.')\n if missing:\n logger._log_to_console_and_log_file('missing item: %s' %\n missing)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Missing found.')\n return rc\n actual = re.sub('\\\\n\\\\r', '\\n', actual)\n headerEnd = actual.index('<ISCSI-Boot-Cats>')\n actual = '\\n' + actual[headerEnd:]\n if verbose:\n logger._log_to_console_and_log_file('Actual now: %s' % actual)\n logger._log_to_console_and_log_file('Expect now: %s' % expect)\n if expect == actual:\n return logger._log_to_console_and_log_file(\n 'expect == actual. String equal, no further compare needed.')\n else:\n logger._log_to_console_and_log_file(\n 'expect != actual, will do smart compare')\n eList = expect.split('\\n')\n aList = actual.split('\\n')\n logger._log_to_console_and_log_file('Split on: %s into %s sections' %\n ('\\n', len(eList) - 1))\n if len(aList) != len(eList):\n errMsg = (\n 'aList and eList counts diff. Problem with split. a: %s, e: %s'\n % (len(aList) - 1, len(eList) - 1))\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n for i in xrange(1, len(eList)):\n if eList[i] == aList[i]:\n logger._log_to_console_and_log_file(\n 'Sections %s are equal.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n else:\n logger._log_to_console_and_log_file(\n 'Section %s requires a smart compare.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n if not smart_compare(eList[i], aList[i]):\n errMsg = \"Expected: '%s' does not match '%s'\" % (eList[\n i], aList[i])\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n",
"step-4": "from robot.libraries.BuiltIn import BuiltIn\nfrom RoboGalaxyLibrary.utilitylib import logging as logger\nimport re\n\n\ndef block_no_keyword_warn():\n pass\n\n\nclass Compare_hpMCTP(object):\n\n def __init__(self):\n self.fusionlib = BuiltIn().get_library_instance('FusionLibrary')\n\n def do(self, expect, actual, verbose=False):\n\n def smart_compare(exp, act):\n exp = re.sub('^\\\\s*', '', exp)\n act = re.sub('^\\\\s*', '', act)\n if verbose:\n logger._log_to_console_and_log_file(\n 'expected after removing leading white space: %s' % exp)\n logger._log_to_console_and_log_file(\n 'actual after removing leading white space: %s' % act)\n missing = [e for e in exp if e not in act and e is not '']\n extra = [a for a in act if a not in exp]\n rc = 1\n if extra:\n logger._log_to_console_and_log_file('extra item found: %s' %\n extra)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Extra found.')\n if missing:\n logger._log_to_console_and_log_file('missing item: %s' %\n missing)\n rc = 0\n else:\n logger._log_to_console_and_log_file('No Missing found.')\n return rc\n actual = re.sub('\\\\n\\\\r', '\\n', actual)\n headerEnd = actual.index('<ISCSI-Boot-Cats>')\n actual = '\\n' + actual[headerEnd:]\n if verbose:\n logger._log_to_console_and_log_file('Actual now: %s' % actual)\n logger._log_to_console_and_log_file('Expect now: %s' % expect)\n if expect == actual:\n return logger._log_to_console_and_log_file(\n 'expect == actual. String equal, no further compare needed.')\n else:\n logger._log_to_console_and_log_file(\n 'expect != actual, will do smart compare')\n eList = expect.split('\\n')\n aList = actual.split('\\n')\n logger._log_to_console_and_log_file('Split on: %s into %s sections' %\n ('\\n', len(eList) - 1))\n if len(aList) != len(eList):\n errMsg = (\n 'aList and eList counts diff. Problem with split. a: %s, e: %s'\n % (len(aList) - 1, len(eList) - 1))\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n for i in xrange(1, len(eList)):\n if eList[i] == aList[i]:\n logger._log_to_console_and_log_file(\n 'Sections %s are equal.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n else:\n logger._log_to_console_and_log_file(\n 'Section %s requires a smart compare.' % i)\n if verbose:\n logger._log_to_console_and_log_file('expect: %s' % eList[i]\n )\n logger._log_to_console_and_log_file('actual: %s' % aList[i]\n )\n if not smart_compare(eList[i], aList[i]):\n errMsg = \"Expected: '%s' does not match '%s'\" % (eList[\n i], aList[i])\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n",
"step-5": "from robot.libraries.BuiltIn import BuiltIn\nfrom RoboGalaxyLibrary.utilitylib import logging as logger\nimport re\n\n\ndef block_no_keyword_warn():\n pass\n\n\nclass Compare_hpMCTP(object):\n\n def __init__(self):\n self.fusionlib = BuiltIn().get_library_instance('FusionLibrary')\n\n def do(self, expect, actual, verbose=False):\n\n def smart_compare(exp, act):\n\n # Remove leading whitespaces\n exp = (re.sub(r'^\\s*', '', exp))\n act = (re.sub(r'^\\s*', '', act))\n\n if verbose:\n logger._log_to_console_and_log_file(\"expected after removing leading white space: %s\" % exp)\n logger._log_to_console_and_log_file(\"actual after removing leading white space: %s\" % act)\n\n missing = [e for e in exp if (e not in act) and (e is not '')]\n extra = [a for a in act if (a not in exp)]\n\n rc = 1 # True (good, until proven otherwise)\n if extra:\n logger._log_to_console_and_log_file(\"extra item found: %s\" % extra)\n rc = 0\n else:\n logger._log_to_console_and_log_file(\"No Extra found.\")\n\n if missing:\n logger._log_to_console_and_log_file(\"missing item: %s\" % missing)\n rc = 0\n else:\n logger._log_to_console_and_log_file(\"No Missing found.\")\n\n return rc\n\n# Need to delete some items.\n actual = re.sub(r'\\n\\r', '\\n', actual)\n\n# get rid of the stuff from actual up to the first header. Extra info not compared.\n# for example, the first three lines below.\n# hpMCTP 2.3.0-4\n# Copyright (c) 2015-2016 Hewlett-Packard - All Rights Reserved\n# -------------------------------------------------------------\n# <ISCSI-Boot-Cats>\n headerEnd = actual.index('<ISCSI-Boot-Cats>')\n actual = '\\n' + actual[headerEnd:]\n\n if verbose:\n logger._log_to_console_and_log_file(\"Actual now: %s\" % actual)\n logger._log_to_console_and_log_file(\"Expect now: %s\" % expect)\n\n# Start comparing the expected vs the actual\n # if as a string they match, then no need to do a smart compare\n if expect == actual:\n return logger._log_to_console_and_log_file(\"expect == actual. String equal, no further compare needed.\")\n\n else:\n logger._log_to_console_and_log_file(\"expect != actual, will do smart compare\")\n\n # split into single lines.\n eList = expect.split('\\n')\n aList = actual.split('\\n')\n logger._log_to_console_and_log_file(\"Split on: %s into %s sections\" % ('\\n', len(eList) - 1))\n if len(aList) != len(eList):\n errMsg = \"aList and eList counts diff. Problem with split. a: %s, e: %s\" % (len(aList) - 1, len(eList) - 1)\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n\n for i in xrange(1, len(eList)):\n if eList[i] == aList[i]:\n logger._log_to_console_and_log_file(\"Sections %s are equal.\" % i)\n if verbose:\n logger._log_to_console_and_log_file(\"expect: %s\" % eList[i])\n logger._log_to_console_and_log_file(\"actual: %s\" % aList[i])\n else:\n logger._log_to_console_and_log_file(\"Section %s requires a smart compare.\" % i)\n if verbose:\n logger._log_to_console_and_log_file(\"expect: %s\" % eList[i])\n logger._log_to_console_and_log_file(\"actual: %s\" % aList[i])\n\n if not smart_compare(eList[i], aList[i]):\n errMsg = \"Expected: '%s' does not match '%s'\" % (eList[i], aList[i])\n logger._log_to_console_and_log_file(errMsg)\n raise AssertionError(errMsg)\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
import json
import time
from typing import Dict
import threading
"""
Note: każdy request uruchamia osobny wątek.
Przegląd: `top -H -p <process_id>`
"""
from flask import Flask, jsonify, request
app = Flask(__name__)
# https://www.tutorialspoint.com/flask/flask_http_methods.htm
# ładowanie konfiguracji aplikacji (opcjonalne, ale to dobry pomysł);
# po zbudowaniu aplikacji (poniżej) file "config.json" powinien się znajdować w folderze aplikacji
with open('config.json', 'r') as f:
loaded = json.load(f)
magic = loaded['magic']
@app.route('/status')
def get_json_data():
return jsonify({'comment': f'App działa OK; magic:{magic}'})
# dostępna pod: http://localhost:5001/compute?a=10&b=0
@app.route('/compute')
def compute():
a = int(request.args.get('a'))
b = int(request.args.get('b'))
print(f'request a={a}, thread:{threading.current_thread().name}')
time.sleep(10.0)
if b == 0:
# teraz zwracamy komunikat o błędzie, oraz http error-code 400 (BAD_REQUEST)
return jsonify({'comment': 'b==0, cannot divide'}), 400
return jsonify({'sum': a + b, 'difference': a - b, 'division': a / b})
# dostępna pod: http://localhost:5001/welcome/roadrunner/suffix/nice%20to%20meet%20you
@app.route('/welcome/<username>/suffix/<message>')
def welcome(username, message):
return jsonify({'comment': f'Hello {username}, {message}!'})
class Auth:
def __init__(self, user: str, pass_: str):
self.user = user
self.pass_ = pass_
# zadanie -> zbierać userów w jakieś strukturze (np. liście 'users', albo Dict lub Set),
# i zwrócić błąd jeśli tworzymy usera, którego pole "user" już zostało "zajęte"
# rozwiązanie:
users: Dict[str, Auth] = {}
# dostępna per Postman (trzeba zrobić zapytanie POST):
# localhost:5001/user/create
# w sekcji "body" trzba dać "raw -> JSON", i w polu JSON dodać:
# {
# "user": "Xi Wuhan",
# "pass_": "123"
# }
@app.route('/user/create', methods=['POST'])
def create_user():
data = request.json
k = Auth(**data)
if users.keys().__contains__(k.user):
return jsonify({'comment': 'This user name already exists!'}), 400
users[k.user] = k
return jsonify(k.__dict__)
app.run(host='localhost', port=5001, debug=None, load_dotenv=False) # can skip all args
# możliwa kompilacja do pojedynczego pliku wykonywalnego:
# `pyinstaller _zero.py -n my_flask_app --onefile
|
normal
|
{
"blob_id": "8fcc2a13fd5a803e2d755a567c78c8274bd88aad",
"index": 7283,
"step-1": "<mask token>\n\n\nclass Auth:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/welcome/<username>/suffix/<message>')\ndef welcome(username, message):\n return jsonify({'comment': f'Hello {username}, {message}!'})\n\n\nclass Auth:\n\n def __init__(self, user: str, pass_: str):\n self.user = user\n self.pass_ = pass_\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@app.route('/status')\ndef get_json_data():\n return jsonify({'comment': f'App działa OK; magic:{magic}'})\n\n\n@app.route('/compute')\ndef compute():\n a = int(request.args.get('a'))\n b = int(request.args.get('b'))\n print(f'request a={a}, thread:{threading.current_thread().name}')\n time.sleep(10.0)\n if b == 0:\n return jsonify({'comment': 'b==0, cannot divide'}), 400\n return jsonify({'sum': a + b, 'difference': a - b, 'division': a / b})\n\n\n@app.route('/welcome/<username>/suffix/<message>')\ndef welcome(username, message):\n return jsonify({'comment': f'Hello {username}, {message}!'})\n\n\nclass Auth:\n\n def __init__(self, user: str, pass_: str):\n self.user = user\n self.pass_ = pass_\n\n\n<mask token>\n\n\n@app.route('/user/create', methods=['POST'])\ndef create_user():\n data = request.json\n k = Auth(**data)\n if users.keys().__contains__(k.user):\n return jsonify({'comment': 'This user name already exists!'}), 400\n users[k.user] = k\n return jsonify(k.__dict__)\n\n\n<mask token>\n",
"step-4": "import json\nimport time\nfrom typing import Dict\nimport threading\n<mask token>\nfrom flask import Flask, jsonify, request\napp = Flask(__name__)\nwith open('config.json', 'r') as f:\n loaded = json.load(f)\n magic = loaded['magic']\n\n\n@app.route('/status')\ndef get_json_data():\n return jsonify({'comment': f'App działa OK; magic:{magic}'})\n\n\n@app.route('/compute')\ndef compute():\n a = int(request.args.get('a'))\n b = int(request.args.get('b'))\n print(f'request a={a}, thread:{threading.current_thread().name}')\n time.sleep(10.0)\n if b == 0:\n return jsonify({'comment': 'b==0, cannot divide'}), 400\n return jsonify({'sum': a + b, 'difference': a - b, 'division': a / b})\n\n\n@app.route('/welcome/<username>/suffix/<message>')\ndef welcome(username, message):\n return jsonify({'comment': f'Hello {username}, {message}!'})\n\n\nclass Auth:\n\n def __init__(self, user: str, pass_: str):\n self.user = user\n self.pass_ = pass_\n\n\nusers: Dict[str, Auth] = {}\n\n\n@app.route('/user/create', methods=['POST'])\ndef create_user():\n data = request.json\n k = Auth(**data)\n if users.keys().__contains__(k.user):\n return jsonify({'comment': 'This user name already exists!'}), 400\n users[k.user] = k\n return jsonify(k.__dict__)\n\n\napp.run(host='localhost', port=5001, debug=None, load_dotenv=False)\n",
"step-5": "import json\nimport time\nfrom typing import Dict\nimport threading\n\n\"\"\"\n Note: każdy request uruchamia osobny wątek. \n Przegląd: `top -H -p <process_id>`\n\"\"\"\n\n\nfrom flask import Flask, jsonify, request\n\napp = Flask(__name__)\n\n# https://www.tutorialspoint.com/flask/flask_http_methods.htm\n\n# ładowanie konfiguracji aplikacji (opcjonalne, ale to dobry pomysł);\n# po zbudowaniu aplikacji (poniżej) file \"config.json\" powinien się znajdować w folderze aplikacji\nwith open('config.json', 'r') as f:\n loaded = json.load(f)\n magic = loaded['magic']\n\n\n@app.route('/status')\ndef get_json_data():\n return jsonify({'comment': f'App działa OK; magic:{magic}'})\n\n\n# dostępna pod: http://localhost:5001/compute?a=10&b=0\n@app.route('/compute')\ndef compute():\n a = int(request.args.get('a'))\n b = int(request.args.get('b'))\n print(f'request a={a}, thread:{threading.current_thread().name}')\n time.sleep(10.0)\n if b == 0:\n # teraz zwracamy komunikat o błędzie, oraz http error-code 400 (BAD_REQUEST)\n return jsonify({'comment': 'b==0, cannot divide'}), 400\n return jsonify({'sum': a + b, 'difference': a - b, 'division': a / b})\n\n\n# dostępna pod: http://localhost:5001/welcome/roadrunner/suffix/nice%20to%20meet%20you\n@app.route('/welcome/<username>/suffix/<message>')\ndef welcome(username, message):\n return jsonify({'comment': f'Hello {username}, {message}!'})\n\n\nclass Auth:\n def __init__(self, user: str, pass_: str):\n self.user = user\n self.pass_ = pass_\n\n\n# zadanie -> zbierać userów w jakieś strukturze (np. liście 'users', albo Dict lub Set),\n# i zwrócić błąd jeśli tworzymy usera, którego pole \"user\" już zostało \"zajęte\"\n# rozwiązanie:\n\nusers: Dict[str, Auth] = {}\n\n\n# dostępna per Postman (trzeba zrobić zapytanie POST):\n# localhost:5001/user/create\n# w sekcji \"body\" trzba dać \"raw -> JSON\", i w polu JSON dodać:\n# {\n# \t\"user\": \"Xi Wuhan\",\n# \t\"pass_\": \"123\"\n# }\n@app.route('/user/create', methods=['POST'])\ndef create_user():\n data = request.json\n k = Auth(**data)\n if users.keys().__contains__(k.user):\n return jsonify({'comment': 'This user name already exists!'}), 400\n users[k.user] = k\n return jsonify(k.__dict__)\n\n\napp.run(host='localhost', port=5001, debug=None, load_dotenv=False) # can skip all args\n\n# możliwa kompilacja do pojedynczego pliku wykonywalnego:\n# `pyinstaller _zero.py -n my_flask_app --onefile\n",
"step-ids": [
1,
3,
6,
9,
10
]
}
|
[
1,
3,
6,
9,
10
] |
import cv2 as cv
'''色彩空间介绍'''
'''
RGB:对于RGB的色彩空间是立方体的色彩空间 三通道 红 黄 蓝 每个灰度级为255
HSV:对于HSV的色彩空间是255度的圆柱体 三通道 高度 圆心角 半径分别是255
HIS
YCrCb
YUV
'''
'''常用的色彩空间转换函数***cvtColor'''
def colorSpaceConvert(image):
'''转换到灰度空间'''
res = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
cv.imshow("gray", res)
'''转换到HSV色彩空间'''
res = cv.cvtColor(image, cv.COLOR_BGR2HSV)
cv.imshow("hsv", res)
'''转换到YUV色彩空间'''
res = cv.cvtColor(image, cv.COLOR_BGR2YUV)
cv.imshow("yuv", res)
image = cv.imread("../girl.jpg")
colorSpaceConvert(image)
'''等待下一个操作的延迟'''
cv.waitKey(0)
'''程序操作结束要销毁所有的窗口'''
cv.destroyAllWindows()
|
normal
|
{
"blob_id": "6d359d987c50fd0d5e963d467a379eb245e3eb40",
"index": 3756,
"step-1": "<mask token>\n\n\ndef colorSpaceConvert(image):\n \"\"\"转换到灰度空间\"\"\"\n res = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n cv.imshow('gray', res)\n \"\"\"转换到HSV色彩空间\"\"\"\n res = cv.cvtColor(image, cv.COLOR_BGR2HSV)\n cv.imshow('hsv', res)\n \"\"\"转换到YUV色彩空间\"\"\"\n res = cv.cvtColor(image, cv.COLOR_BGR2YUV)\n cv.imshow('yuv', res)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef colorSpaceConvert(image):\n \"\"\"转换到灰度空间\"\"\"\n res = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n cv.imshow('gray', res)\n \"\"\"转换到HSV色彩空间\"\"\"\n res = cv.cvtColor(image, cv.COLOR_BGR2HSV)\n cv.imshow('hsv', res)\n \"\"\"转换到YUV色彩空间\"\"\"\n res = cv.cvtColor(image, cv.COLOR_BGR2YUV)\n cv.imshow('yuv', res)\n\n\n<mask token>\ncolorSpaceConvert(image)\n<mask token>\ncv.waitKey(0)\n<mask token>\ncv.destroyAllWindows()\n",
"step-3": "<mask token>\n\n\ndef colorSpaceConvert(image):\n \"\"\"转换到灰度空间\"\"\"\n res = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n cv.imshow('gray', res)\n \"\"\"转换到HSV色彩空间\"\"\"\n res = cv.cvtColor(image, cv.COLOR_BGR2HSV)\n cv.imshow('hsv', res)\n \"\"\"转换到YUV色彩空间\"\"\"\n res = cv.cvtColor(image, cv.COLOR_BGR2YUV)\n cv.imshow('yuv', res)\n\n\nimage = cv.imread('../girl.jpg')\ncolorSpaceConvert(image)\n<mask token>\ncv.waitKey(0)\n<mask token>\ncv.destroyAllWindows()\n",
"step-4": "import cv2 as cv\n<mask token>\n\n\ndef colorSpaceConvert(image):\n \"\"\"转换到灰度空间\"\"\"\n res = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n cv.imshow('gray', res)\n \"\"\"转换到HSV色彩空间\"\"\"\n res = cv.cvtColor(image, cv.COLOR_BGR2HSV)\n cv.imshow('hsv', res)\n \"\"\"转换到YUV色彩空间\"\"\"\n res = cv.cvtColor(image, cv.COLOR_BGR2YUV)\n cv.imshow('yuv', res)\n\n\nimage = cv.imread('../girl.jpg')\ncolorSpaceConvert(image)\n<mask token>\ncv.waitKey(0)\n<mask token>\ncv.destroyAllWindows()\n",
"step-5": "import cv2 as cv\n\n'''色彩空间介绍'''\n'''\nRGB:对于RGB的色彩空间是立方体的色彩空间 三通道 红 黄 蓝 每个灰度级为255\nHSV:对于HSV的色彩空间是255度的圆柱体 三通道 高度 圆心角 半径分别是255\nHIS\nYCrCb\nYUV\n\n'''\n'''常用的色彩空间转换函数***cvtColor'''\ndef colorSpaceConvert(image):\n '''转换到灰度空间'''\n res = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n cv.imshow(\"gray\", res)\n '''转换到HSV色彩空间'''\n res = cv.cvtColor(image, cv.COLOR_BGR2HSV)\n cv.imshow(\"hsv\", res)\n '''转换到YUV色彩空间'''\n res = cv.cvtColor(image, cv.COLOR_BGR2YUV)\n cv.imshow(\"yuv\", res)\n\nimage = cv.imread(\"../girl.jpg\")\ncolorSpaceConvert(image)\n'''等待下一个操作的延迟'''\ncv.waitKey(0)\n'''程序操作结束要销毁所有的窗口'''\ncv.destroyAllWindows()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import re
import numpy as np
# only read pgm file
def readfile(filename:str)->tuple:
'''read given pgm file'''
col = 0
row = 0
lst = list()
with open(filename, 'rb') as file:
header = list()
ls = list()
# remove first line
header.append((file.readline()).decode("utf-8"))
while True:
line = (file.readline()).decode("utf-8")
if not line:
break
elif(line[0] == '#'):
continue
else:
header.append(line)
ss = str(line)
l = re.findall(r'\d+', ss)
col = int(l[0])
row = int(l[1])
break
header.append((file.readline()).decode("utf-8"))
n = col*row
lst = list()
for i in range(n):
try:
lst.append(ord(file.read(1)))
except:
pass
file.close()
return header, lst, [col, row]
#convert list
def convert(lst:list)->list():
'''String Unicode to int'''
l = list()
for item in lst:
l.append(ord(item))
return l
def writeNumeric(filename:str, data:list, header:list, dimension:list):
'''write pgm file in numeric format (P2 as a header)'''
# clear file if exists
name = filename.split('.')
filename = name[0]+'_out.'+name[1]
f = open(filename, 'w')
f.write('')
f.close()
col = dimension[0]
row = dimension[1]
s = ''
# write new file
with open(filename, 'w', encoding='ISO-8859-1') as file:
header[0] = 'P2\n'
for h in header:
# decoding
s += h
for i in range(row):
for j in range(col):
try:
index = i*col + j
s += str(data[index])
if j < col -1:
s += ' '
except:
# print(i)
# print(j)
pass
s += '\n'
file.write(s)
file.close()
def write(filename:str, data:list, header:list):
# clear file if exists
name = filename.split('.')
filename = name[0]+'_out.'+name[1]
f = open(filename, 'w')
f.write('')
f.close()
s = ''
# write new file
with open(filename, 'w', encoding='ISO-8859-1') as file:
for h in header:
# decoding
s += h
for d in data:
s += str(d)
file.write(s)
file.close()
|
normal
|
{
"blob_id": "63be96c0d1231f836bbec9ce93f06bda32775511",
"index": 2259,
"step-1": "<mask token>\n\n\ndef convert(lst: list) ->list():\n \"\"\"String Unicode to int\"\"\"\n l = list()\n for item in lst:\n l.append(ord(item))\n return l\n\n\n<mask token>\n\n\ndef write(filename: str, data: list, header: list):\n name = filename.split('.')\n filename = name[0] + '_out.' + name[1]\n f = open(filename, 'w')\n f.write('')\n f.close()\n s = ''\n with open(filename, 'w', encoding='ISO-8859-1') as file:\n for h in header:\n s += h\n for d in data:\n s += str(d)\n file.write(s)\n file.close()\n",
"step-2": "<mask token>\n\n\ndef readfile(filename: str) ->tuple:\n \"\"\"read given pgm file\"\"\"\n col = 0\n row = 0\n lst = list()\n with open(filename, 'rb') as file:\n header = list()\n ls = list()\n header.append(file.readline().decode('utf-8'))\n while True:\n line = file.readline().decode('utf-8')\n if not line:\n break\n elif line[0] == '#':\n continue\n else:\n header.append(line)\n ss = str(line)\n l = re.findall('\\\\d+', ss)\n col = int(l[0])\n row = int(l[1])\n break\n header.append(file.readline().decode('utf-8'))\n n = col * row\n lst = list()\n for i in range(n):\n try:\n lst.append(ord(file.read(1)))\n except:\n pass\n file.close()\n return header, lst, [col, row]\n\n\ndef convert(lst: list) ->list():\n \"\"\"String Unicode to int\"\"\"\n l = list()\n for item in lst:\n l.append(ord(item))\n return l\n\n\n<mask token>\n\n\ndef write(filename: str, data: list, header: list):\n name = filename.split('.')\n filename = name[0] + '_out.' + name[1]\n f = open(filename, 'w')\n f.write('')\n f.close()\n s = ''\n with open(filename, 'w', encoding='ISO-8859-1') as file:\n for h in header:\n s += h\n for d in data:\n s += str(d)\n file.write(s)\n file.close()\n",
"step-3": "<mask token>\n\n\ndef readfile(filename: str) ->tuple:\n \"\"\"read given pgm file\"\"\"\n col = 0\n row = 0\n lst = list()\n with open(filename, 'rb') as file:\n header = list()\n ls = list()\n header.append(file.readline().decode('utf-8'))\n while True:\n line = file.readline().decode('utf-8')\n if not line:\n break\n elif line[0] == '#':\n continue\n else:\n header.append(line)\n ss = str(line)\n l = re.findall('\\\\d+', ss)\n col = int(l[0])\n row = int(l[1])\n break\n header.append(file.readline().decode('utf-8'))\n n = col * row\n lst = list()\n for i in range(n):\n try:\n lst.append(ord(file.read(1)))\n except:\n pass\n file.close()\n return header, lst, [col, row]\n\n\ndef convert(lst: list) ->list():\n \"\"\"String Unicode to int\"\"\"\n l = list()\n for item in lst:\n l.append(ord(item))\n return l\n\n\ndef writeNumeric(filename: str, data: list, header: list, dimension: list):\n \"\"\"write pgm file in numeric format (P2 as a header)\"\"\"\n name = filename.split('.')\n filename = name[0] + '_out.' + name[1]\n f = open(filename, 'w')\n f.write('')\n f.close()\n col = dimension[0]\n row = dimension[1]\n s = ''\n with open(filename, 'w', encoding='ISO-8859-1') as file:\n header[0] = 'P2\\n'\n for h in header:\n s += h\n for i in range(row):\n for j in range(col):\n try:\n index = i * col + j\n s += str(data[index])\n if j < col - 1:\n s += ' '\n except:\n pass\n s += '\\n'\n file.write(s)\n file.close()\n\n\ndef write(filename: str, data: list, header: list):\n name = filename.split('.')\n filename = name[0] + '_out.' + name[1]\n f = open(filename, 'w')\n f.write('')\n f.close()\n s = ''\n with open(filename, 'w', encoding='ISO-8859-1') as file:\n for h in header:\n s += h\n for d in data:\n s += str(d)\n file.write(s)\n file.close()\n",
"step-4": "import re\nimport numpy as np\n\n\ndef readfile(filename: str) ->tuple:\n \"\"\"read given pgm file\"\"\"\n col = 0\n row = 0\n lst = list()\n with open(filename, 'rb') as file:\n header = list()\n ls = list()\n header.append(file.readline().decode('utf-8'))\n while True:\n line = file.readline().decode('utf-8')\n if not line:\n break\n elif line[0] == '#':\n continue\n else:\n header.append(line)\n ss = str(line)\n l = re.findall('\\\\d+', ss)\n col = int(l[0])\n row = int(l[1])\n break\n header.append(file.readline().decode('utf-8'))\n n = col * row\n lst = list()\n for i in range(n):\n try:\n lst.append(ord(file.read(1)))\n except:\n pass\n file.close()\n return header, lst, [col, row]\n\n\ndef convert(lst: list) ->list():\n \"\"\"String Unicode to int\"\"\"\n l = list()\n for item in lst:\n l.append(ord(item))\n return l\n\n\ndef writeNumeric(filename: str, data: list, header: list, dimension: list):\n \"\"\"write pgm file in numeric format (P2 as a header)\"\"\"\n name = filename.split('.')\n filename = name[0] + '_out.' + name[1]\n f = open(filename, 'w')\n f.write('')\n f.close()\n col = dimension[0]\n row = dimension[1]\n s = ''\n with open(filename, 'w', encoding='ISO-8859-1') as file:\n header[0] = 'P2\\n'\n for h in header:\n s += h\n for i in range(row):\n for j in range(col):\n try:\n index = i * col + j\n s += str(data[index])\n if j < col - 1:\n s += ' '\n except:\n pass\n s += '\\n'\n file.write(s)\n file.close()\n\n\ndef write(filename: str, data: list, header: list):\n name = filename.split('.')\n filename = name[0] + '_out.' + name[1]\n f = open(filename, 'w')\n f.write('')\n f.close()\n s = ''\n with open(filename, 'w', encoding='ISO-8859-1') as file:\n for h in header:\n s += h\n for d in data:\n s += str(d)\n file.write(s)\n file.close()\n",
"step-5": "import re\nimport numpy as np\n\n# only read pgm file\ndef readfile(filename:str)->tuple:\n '''read given pgm file'''\n col = 0\n row = 0\n lst = list()\n with open(filename, 'rb') as file:\n header = list()\n ls = list()\n # remove first line\n header.append((file.readline()).decode(\"utf-8\"))\n while True:\n line = (file.readline()).decode(\"utf-8\")\n if not line:\n break\n elif(line[0] == '#'):\n continue\n else:\n header.append(line)\n ss = str(line)\n l = re.findall(r'\\d+', ss)\n col = int(l[0])\n row = int(l[1])\n break\n header.append((file.readline()).decode(\"utf-8\"))\n\n n = col*row\n lst = list()\n for i in range(n):\n try:\n lst.append(ord(file.read(1)))\n except:\n pass\n\n file.close()\n return header, lst, [col, row]\n\n#convert list\ndef convert(lst:list)->list():\n '''String Unicode to int'''\n l = list()\n for item in lst:\n l.append(ord(item))\n return l\n\ndef writeNumeric(filename:str, data:list, header:list, dimension:list):\n '''write pgm file in numeric format (P2 as a header)'''\n # clear file if exists\n name = filename.split('.')\n filename = name[0]+'_out.'+name[1]\n\n f = open(filename, 'w')\n f.write('')\n f.close()\n\n col = dimension[0]\n row = dimension[1]\n\n s = ''\n # write new file\n with open(filename, 'w', encoding='ISO-8859-1') as file:\n header[0] = 'P2\\n'\n for h in header:\n # decoding\n s += h\n for i in range(row):\n for j in range(col):\n try:\n index = i*col + j\n s += str(data[index])\n if j < col -1:\n s += ' '\n except:\n # print(i)\n # print(j)\n pass\n s += '\\n'\n file.write(s)\n file.close()\n\ndef write(filename:str, data:list, header:list):\n # clear file if exists\n name = filename.split('.')\n filename = name[0]+'_out.'+name[1]\n\n f = open(filename, 'w')\n f.write('')\n f.close()\n\n s = ''\n # write new file\n with open(filename, 'w', encoding='ISO-8859-1') as file:\n for h in header:\n # decoding\n s += h\n for d in data:\n s += str(d)\n\n file.write(s)\n file.close()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# python examples/mnist_rnn.py --bsz 128 --bsz-eval 256
import sys
from argparse import ArgumentParser
import pytorch_lightning as pl
import torch.nn as nn
import torch.optim as optim
from loguru import logger
from slp.config.config_parser import make_cli_parser, parse_config
from slp.data.collators import SequenceClassificationCollator
from slp.modules.rnn import RNN
from slp.plbind import (
FromLogits,
PLDataModuleFromDatasets,
RnnPLModule,
make_trainer,
watch_model,
)
from slp.util.log import configure_logging
from torchvision.datasets import MNIST # type: ignore
from torchvision.transforms import Compose, Normalize, ToTensor # type: ignore
collate_fn = SequenceClassificationCollator()
class Net(nn.Module):
def __init__(self, input_size, hidden_size=40, num_classes=10, bidirectional=False):
super().__init__()
self.encoder = RNN(input_size, hidden_size, bidirectional=bidirectional)
out_size = hidden_size if not bidirectional else 2 * hidden_size
self.clf = nn.Linear(out_size, num_classes)
def forward(self, x, lengths):
_, x, _ = self.encoder(x, lengths)
out = self.clf(x)
return out
def get_parser():
parser = ArgumentParser("MNIST classification example")
parser.add_argument(
"--hidden",
dest="model.hidden_size",
type=int,
help="Intermediate hidden layers for linear module",
)
parser.add_argument(
"--bi",
dest="model.bidirectional",
action="store_true",
help="Use BiLSTM",
)
return parser
def get_data():
# Fix: https://stackoverflow.com/a/66820249
MNIST.resources = [
(
"https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz",
"f68b3c2dcbeaaa9fbdd348bbdeb94873",
),
(
"https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz",
"d53e105ee54ea40749a09fcbcd1e9432",
),
(
"https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz",
"9fb629c4189551a2d022fa330f9573f3",
),
(
"https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz",
"ec29112dd5afa0611ce80d1b7f02629c",
),
]
def squeeze(x):
return x.squeeze()
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,)), squeeze])
train = MNIST(download=True, root=".", transform=data_transform, train=True)
val = MNIST(download=False, root=".", transform=data_transform, train=False)
return train, val
if __name__ == "__main__":
# SETUP ##################################################
parser = get_parser()
parser = make_cli_parser(parser, PLDataModuleFromDatasets)
config = parse_config(parser, parser.parse_args().config)
if config.trainer.experiment_name == "experiment":
config.trainer.experiment_name = "mnist-rnn-classification"
configure_logging(f"logs/{config.trainer.experiment_name}")
if config.seed is not None:
logger.info("Seeding everything with seed={seed}")
pl.utilities.seed.seed_everything(seed=config.seed)
train, test = get_data()
# Get data and make datamodule ##########################
ldm = PLDataModuleFromDatasets(
train, test=test, seed=config.seed, collate_fn=collate_fn, **config.data
)
# Create model, optimizer, criterion, scheduler ###########
model = Net(28, **config.model)
optimizer = getattr(optim, config.optimizer)(model.parameters(), **config.optim)
criterion = nn.CrossEntropyLoss()
lr_scheduler = None
if config.lr_scheduler:
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, **config.lr_schedule
)
# Wrap in PLModule, & configure metrics ####################
lm = RnnPLModule(
model,
optimizer,
criterion,
lr_scheduler=lr_scheduler,
metrics={"acc": FromLogits(pl.metrics.classification.Accuracy())},
hparams=config,
)
# Run debugging session or fit & test the model ############
if config.debug:
logger.info("Running in debug mode: Fast run on 5 batches")
trainer = make_trainer(fast_dev_run=5)
trainer.fit(lm, datamodule=ldm)
logger.info("Running in debug mode: Overfitting 5 batches")
trainer = make_trainer(overfit_batches=5)
trainer.fit(lm, datamodule=ldm)
else:
trainer = make_trainer(**config.trainer)
watch_model(trainer, model)
trainer.fit(lm, datamodule=ldm)
trainer.test(ckpt_path="best", test_dataloaders=ldm.test_dataloader())
logger.info("Run finished. Uploading files to wandb...")
|
normal
|
{
"blob_id": "d8a09f9952856da69120fae6221636dd5bd8c93e",
"index": 3567,
"step-1": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size=40, num_classes=10,\n bidirectional=False):\n super().__init__()\n self.encoder = RNN(input_size, hidden_size, bidirectional=bidirectional\n )\n out_size = hidden_size if not bidirectional else 2 * hidden_size\n self.clf = nn.Linear(out_size, num_classes)\n\n def forward(self, x, lengths):\n _, x, _ = self.encoder(x, lengths)\n out = self.clf(x)\n return out\n\n\ndef get_parser():\n parser = ArgumentParser('MNIST classification example')\n parser.add_argument('--hidden', dest='model.hidden_size', type=int,\n help='Intermediate hidden layers for linear module')\n parser.add_argument('--bi', dest='model.bidirectional', action=\n 'store_true', help='Use BiLSTM')\n return parser\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size=40, num_classes=10,\n bidirectional=False):\n super().__init__()\n self.encoder = RNN(input_size, hidden_size, bidirectional=bidirectional\n )\n out_size = hidden_size if not bidirectional else 2 * hidden_size\n self.clf = nn.Linear(out_size, num_classes)\n\n def forward(self, x, lengths):\n _, x, _ = self.encoder(x, lengths)\n out = self.clf(x)\n return out\n\n\ndef get_parser():\n parser = ArgumentParser('MNIST classification example')\n parser.add_argument('--hidden', dest='model.hidden_size', type=int,\n help='Intermediate hidden layers for linear module')\n parser.add_argument('--bi', dest='model.bidirectional', action=\n 'store_true', help='Use BiLSTM')\n return parser\n\n\ndef get_data():\n MNIST.resources = [(\n 'https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz'\n , 'f68b3c2dcbeaaa9fbdd348bbdeb94873'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz'\n , 'd53e105ee54ea40749a09fcbcd1e9432'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz'\n , '9fb629c4189551a2d022fa330f9573f3'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz'\n , 'ec29112dd5afa0611ce80d1b7f02629c')]\n\n def squeeze(x):\n return x.squeeze()\n data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,)),\n squeeze])\n train = MNIST(download=True, root='.', transform=data_transform, train=True\n )\n val = MNIST(download=False, root='.', transform=data_transform, train=False\n )\n return train, val\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size=40, num_classes=10,\n bidirectional=False):\n super().__init__()\n self.encoder = RNN(input_size, hidden_size, bidirectional=bidirectional\n )\n out_size = hidden_size if not bidirectional else 2 * hidden_size\n self.clf = nn.Linear(out_size, num_classes)\n\n def forward(self, x, lengths):\n _, x, _ = self.encoder(x, lengths)\n out = self.clf(x)\n return out\n\n\ndef get_parser():\n parser = ArgumentParser('MNIST classification example')\n parser.add_argument('--hidden', dest='model.hidden_size', type=int,\n help='Intermediate hidden layers for linear module')\n parser.add_argument('--bi', dest='model.bidirectional', action=\n 'store_true', help='Use BiLSTM')\n return parser\n\n\ndef get_data():\n MNIST.resources = [(\n 'https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz'\n , 'f68b3c2dcbeaaa9fbdd348bbdeb94873'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz'\n , 'd53e105ee54ea40749a09fcbcd1e9432'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz'\n , '9fb629c4189551a2d022fa330f9573f3'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz'\n , 'ec29112dd5afa0611ce80d1b7f02629c')]\n\n def squeeze(x):\n return x.squeeze()\n data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,)),\n squeeze])\n train = MNIST(download=True, root='.', transform=data_transform, train=True\n )\n val = MNIST(download=False, root='.', transform=data_transform, train=False\n )\n return train, val\n\n\nif __name__ == '__main__':\n parser = get_parser()\n parser = make_cli_parser(parser, PLDataModuleFromDatasets)\n config = parse_config(parser, parser.parse_args().config)\n if config.trainer.experiment_name == 'experiment':\n config.trainer.experiment_name = 'mnist-rnn-classification'\n configure_logging(f'logs/{config.trainer.experiment_name}')\n if config.seed is not None:\n logger.info('Seeding everything with seed={seed}')\n pl.utilities.seed.seed_everything(seed=config.seed)\n train, test = get_data()\n ldm = PLDataModuleFromDatasets(train, test=test, seed=config.seed,\n collate_fn=collate_fn, **config.data)\n model = Net(28, **config.model)\n optimizer = getattr(optim, config.optimizer)(model.parameters(), **\n config.optim)\n criterion = nn.CrossEntropyLoss()\n lr_scheduler = None\n if config.lr_scheduler:\n lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, **\n config.lr_schedule)\n lm = RnnPLModule(model, optimizer, criterion, lr_scheduler=lr_scheduler,\n metrics={'acc': FromLogits(pl.metrics.classification.Accuracy())},\n hparams=config)\n if config.debug:\n logger.info('Running in debug mode: Fast run on 5 batches')\n trainer = make_trainer(fast_dev_run=5)\n trainer.fit(lm, datamodule=ldm)\n logger.info('Running in debug mode: Overfitting 5 batches')\n trainer = make_trainer(overfit_batches=5)\n trainer.fit(lm, datamodule=ldm)\n else:\n trainer = make_trainer(**config.trainer)\n watch_model(trainer, model)\n trainer.fit(lm, datamodule=ldm)\n trainer.test(ckpt_path='best', test_dataloaders=ldm.test_dataloader())\n logger.info('Run finished. Uploading files to wandb...')\n",
"step-4": "<mask token>\ncollate_fn = SequenceClassificationCollator()\n\n\nclass Net(nn.Module):\n\n def __init__(self, input_size, hidden_size=40, num_classes=10,\n bidirectional=False):\n super().__init__()\n self.encoder = RNN(input_size, hidden_size, bidirectional=bidirectional\n )\n out_size = hidden_size if not bidirectional else 2 * hidden_size\n self.clf = nn.Linear(out_size, num_classes)\n\n def forward(self, x, lengths):\n _, x, _ = self.encoder(x, lengths)\n out = self.clf(x)\n return out\n\n\ndef get_parser():\n parser = ArgumentParser('MNIST classification example')\n parser.add_argument('--hidden', dest='model.hidden_size', type=int,\n help='Intermediate hidden layers for linear module')\n parser.add_argument('--bi', dest='model.bidirectional', action=\n 'store_true', help='Use BiLSTM')\n return parser\n\n\ndef get_data():\n MNIST.resources = [(\n 'https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz'\n , 'f68b3c2dcbeaaa9fbdd348bbdeb94873'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz'\n , 'd53e105ee54ea40749a09fcbcd1e9432'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz'\n , '9fb629c4189551a2d022fa330f9573f3'), (\n 'https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz'\n , 'ec29112dd5afa0611ce80d1b7f02629c')]\n\n def squeeze(x):\n return x.squeeze()\n data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,)),\n squeeze])\n train = MNIST(download=True, root='.', transform=data_transform, train=True\n )\n val = MNIST(download=False, root='.', transform=data_transform, train=False\n )\n return train, val\n\n\nif __name__ == '__main__':\n parser = get_parser()\n parser = make_cli_parser(parser, PLDataModuleFromDatasets)\n config = parse_config(parser, parser.parse_args().config)\n if config.trainer.experiment_name == 'experiment':\n config.trainer.experiment_name = 'mnist-rnn-classification'\n configure_logging(f'logs/{config.trainer.experiment_name}')\n if config.seed is not None:\n logger.info('Seeding everything with seed={seed}')\n pl.utilities.seed.seed_everything(seed=config.seed)\n train, test = get_data()\n ldm = PLDataModuleFromDatasets(train, test=test, seed=config.seed,\n collate_fn=collate_fn, **config.data)\n model = Net(28, **config.model)\n optimizer = getattr(optim, config.optimizer)(model.parameters(), **\n config.optim)\n criterion = nn.CrossEntropyLoss()\n lr_scheduler = None\n if config.lr_scheduler:\n lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, **\n config.lr_schedule)\n lm = RnnPLModule(model, optimizer, criterion, lr_scheduler=lr_scheduler,\n metrics={'acc': FromLogits(pl.metrics.classification.Accuracy())},\n hparams=config)\n if config.debug:\n logger.info('Running in debug mode: Fast run on 5 batches')\n trainer = make_trainer(fast_dev_run=5)\n trainer.fit(lm, datamodule=ldm)\n logger.info('Running in debug mode: Overfitting 5 batches')\n trainer = make_trainer(overfit_batches=5)\n trainer.fit(lm, datamodule=ldm)\n else:\n trainer = make_trainer(**config.trainer)\n watch_model(trainer, model)\n trainer.fit(lm, datamodule=ldm)\n trainer.test(ckpt_path='best', test_dataloaders=ldm.test_dataloader())\n logger.info('Run finished. Uploading files to wandb...')\n",
"step-5": "# python examples/mnist_rnn.py --bsz 128 --bsz-eval 256\n\nimport sys\nfrom argparse import ArgumentParser\n\nimport pytorch_lightning as pl\nimport torch.nn as nn\nimport torch.optim as optim\nfrom loguru import logger\nfrom slp.config.config_parser import make_cli_parser, parse_config\nfrom slp.data.collators import SequenceClassificationCollator\nfrom slp.modules.rnn import RNN\nfrom slp.plbind import (\n FromLogits,\n PLDataModuleFromDatasets,\n RnnPLModule,\n make_trainer,\n watch_model,\n)\nfrom slp.util.log import configure_logging\nfrom torchvision.datasets import MNIST # type: ignore\nfrom torchvision.transforms import Compose, Normalize, ToTensor # type: ignore\n\ncollate_fn = SequenceClassificationCollator()\n\n\nclass Net(nn.Module):\n def __init__(self, input_size, hidden_size=40, num_classes=10, bidirectional=False):\n super().__init__()\n self.encoder = RNN(input_size, hidden_size, bidirectional=bidirectional)\n out_size = hidden_size if not bidirectional else 2 * hidden_size\n self.clf = nn.Linear(out_size, num_classes)\n\n def forward(self, x, lengths):\n _, x, _ = self.encoder(x, lengths)\n out = self.clf(x)\n\n return out\n\n\ndef get_parser():\n parser = ArgumentParser(\"MNIST classification example\")\n parser.add_argument(\n \"--hidden\",\n dest=\"model.hidden_size\",\n type=int,\n help=\"Intermediate hidden layers for linear module\",\n )\n parser.add_argument(\n \"--bi\",\n dest=\"model.bidirectional\",\n action=\"store_true\",\n help=\"Use BiLSTM\",\n )\n\n return parser\n\n\ndef get_data():\n # Fix: https://stackoverflow.com/a/66820249\n MNIST.resources = [\n (\n \"https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz\",\n \"f68b3c2dcbeaaa9fbdd348bbdeb94873\",\n ),\n (\n \"https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz\",\n \"d53e105ee54ea40749a09fcbcd1e9432\",\n ),\n (\n \"https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz\",\n \"9fb629c4189551a2d022fa330f9573f3\",\n ),\n (\n \"https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz\",\n \"ec29112dd5afa0611ce80d1b7f02629c\",\n ),\n ]\n\n def squeeze(x):\n return x.squeeze()\n\n data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,)), squeeze])\n train = MNIST(download=True, root=\".\", transform=data_transform, train=True)\n\n val = MNIST(download=False, root=\".\", transform=data_transform, train=False)\n\n return train, val\n\n\nif __name__ == \"__main__\":\n # SETUP ##################################################\n parser = get_parser()\n parser = make_cli_parser(parser, PLDataModuleFromDatasets)\n\n config = parse_config(parser, parser.parse_args().config)\n\n if config.trainer.experiment_name == \"experiment\":\n config.trainer.experiment_name = \"mnist-rnn-classification\"\n\n configure_logging(f\"logs/{config.trainer.experiment_name}\")\n\n if config.seed is not None:\n logger.info(\"Seeding everything with seed={seed}\")\n pl.utilities.seed.seed_everything(seed=config.seed)\n\n train, test = get_data()\n\n # Get data and make datamodule ##########################\n ldm = PLDataModuleFromDatasets(\n train, test=test, seed=config.seed, collate_fn=collate_fn, **config.data\n )\n\n # Create model, optimizer, criterion, scheduler ###########\n model = Net(28, **config.model)\n\n optimizer = getattr(optim, config.optimizer)(model.parameters(), **config.optim)\n criterion = nn.CrossEntropyLoss()\n\n lr_scheduler = None\n\n if config.lr_scheduler:\n lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, **config.lr_schedule\n )\n\n # Wrap in PLModule, & configure metrics ####################\n lm = RnnPLModule(\n model,\n optimizer,\n criterion,\n lr_scheduler=lr_scheduler,\n metrics={\"acc\": FromLogits(pl.metrics.classification.Accuracy())},\n hparams=config,\n )\n\n # Run debugging session or fit & test the model ############\n\n if config.debug:\n logger.info(\"Running in debug mode: Fast run on 5 batches\")\n trainer = make_trainer(fast_dev_run=5)\n trainer.fit(lm, datamodule=ldm)\n\n logger.info(\"Running in debug mode: Overfitting 5 batches\")\n trainer = make_trainer(overfit_batches=5)\n trainer.fit(lm, datamodule=ldm)\n\n else:\n trainer = make_trainer(**config.trainer)\n watch_model(trainer, model)\n\n trainer.fit(lm, datamodule=ldm)\n\n trainer.test(ckpt_path=\"best\", test_dataloaders=ldm.test_dataloader())\n\n logger.info(\"Run finished. Uploading files to wandb...\")\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
from django.urls import path
from .views import PollsList, SinglePollsView, PollsCreate, PollsAnswer
app_name = "authors"
# app_name will help us do a reverse look-up latter.
urlpatterns = [
path('polls/', PollsList.as_view()),
path('polls/create', PollsCreate.as_view()),
path('polls/<int:pk>', SinglePollsView.as_view()),
path('answers/', PollsAnswer.as_view()),
]
|
normal
|
{
"blob_id": "64ac007faeebe0e71ba0060e74fa07154e6291e2",
"index": 6053,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'authors'\nurlpatterns = [path('polls/', PollsList.as_view()), path('polls/create',\n PollsCreate.as_view()), path('polls/<int:pk>', SinglePollsView.as_view(\n )), path('answers/', PollsAnswer.as_view())]\n",
"step-3": "from django.urls import path\nfrom .views import PollsList, SinglePollsView, PollsCreate, PollsAnswer\napp_name = 'authors'\nurlpatterns = [path('polls/', PollsList.as_view()), path('polls/create',\n PollsCreate.as_view()), path('polls/<int:pk>', SinglePollsView.as_view(\n )), path('answers/', PollsAnswer.as_view())]\n",
"step-4": "from django.urls import path\nfrom .views import PollsList, SinglePollsView, PollsCreate, PollsAnswer\napp_name = \"authors\"\n# app_name will help us do a reverse look-up latter.\nurlpatterns = [\n path('polls/', PollsList.as_view()),\n path('polls/create', PollsCreate.as_view()),\n path('polls/<int:pk>', SinglePollsView.as_view()),\n path('answers/', PollsAnswer.as_view()),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
This example shows how to communicate with a SH05 (shutter) connected to a KSC101 (KCube Solenoid).
"""
# this "if" statement is used so that Sphinx does not execute this script when the docs are being built
if __name__ == '__main__':
import os
import time
from msl.equipment import EquipmentRecord, ConnectionRecord, Backend
from msl.equipment.resources.thorlabs import MotionControl
# ensure that the Kinesis folder is available on PATH
os.environ['PATH'] += os.pathsep + 'C:/Program Files/Thorlabs/Kinesis'
# rather than reading the EquipmentRecord from a database we can create it manually
record = EquipmentRecord(
manufacturer='Thorlabs',
model='KSC101',
serial='68000297', # update the serial number for your KSC101
connection=ConnectionRecord(
backend=Backend.MSL,
address='SDK::Thorlabs.MotionControl.KCube.Solenoid.dll',
),
)
def is_open():
return shutter.get_operating_state() == 1
# avoid the FT_DeviceNotFound error
MotionControl.build_device_list()
# connect to the KCube Solenoid
shutter = record.connect()
print('Connected to {}'.format(shutter))
# start polling at 200 ms
shutter.start_polling(200)
# set the operating mode to SC_OperatingModes.SC_Manual
shutter.set_operating_mode('Manual')
for i in range(5):
# set the operating state to SC_OperatingStates.SC_Active
print('Opening the shutter...')
shutter.set_operating_state('Active')
while not is_open():
time.sleep(0.05)
print(' Is the shutter open? {}'.format(is_open()))
time.sleep(1)
# set the operating state to SC_OperatingStates.SC_Inactive
print('Closing the shutter...')
shutter.set_operating_state('Inactive')
while is_open():
time.sleep(0.05)
print(' Is the shutter open? {}'.format(is_open()))
time.sleep(1)
# stop polling and close the connection
shutter.stop_polling()
shutter.disconnect()
|
normal
|
{
"blob_id": "04b5df5cfd052390f057c6f13b2e21d27bac6449",
"index": 943,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n import os\n import time\n from msl.equipment import EquipmentRecord, ConnectionRecord, Backend\n from msl.equipment.resources.thorlabs import MotionControl\n os.environ['PATH'] += os.pathsep + 'C:/Program Files/Thorlabs/Kinesis'\n record = EquipmentRecord(manufacturer='Thorlabs', model='KSC101',\n serial='68000297', connection=ConnectionRecord(backend=Backend.MSL,\n address='SDK::Thorlabs.MotionControl.KCube.Solenoid.dll'))\n\n def is_open():\n return shutter.get_operating_state() == 1\n MotionControl.build_device_list()\n shutter = record.connect()\n print('Connected to {}'.format(shutter))\n shutter.start_polling(200)\n shutter.set_operating_mode('Manual')\n for i in range(5):\n print('Opening the shutter...')\n shutter.set_operating_state('Active')\n while not is_open():\n time.sleep(0.05)\n print(' Is the shutter open? {}'.format(is_open()))\n time.sleep(1)\n print('Closing the shutter...')\n shutter.set_operating_state('Inactive')\n while is_open():\n time.sleep(0.05)\n print(' Is the shutter open? {}'.format(is_open()))\n time.sleep(1)\n shutter.stop_polling()\n shutter.disconnect()\n",
"step-3": "\"\"\"\nThis example shows how to communicate with a SH05 (shutter) connected to a KSC101 (KCube Solenoid).\n\"\"\"\n\n# this \"if\" statement is used so that Sphinx does not execute this script when the docs are being built\nif __name__ == '__main__':\n import os\n import time\n\n from msl.equipment import EquipmentRecord, ConnectionRecord, Backend\n from msl.equipment.resources.thorlabs import MotionControl\n\n # ensure that the Kinesis folder is available on PATH\n os.environ['PATH'] += os.pathsep + 'C:/Program Files/Thorlabs/Kinesis'\n\n # rather than reading the EquipmentRecord from a database we can create it manually\n record = EquipmentRecord(\n manufacturer='Thorlabs',\n model='KSC101',\n serial='68000297', # update the serial number for your KSC101\n connection=ConnectionRecord(\n backend=Backend.MSL,\n address='SDK::Thorlabs.MotionControl.KCube.Solenoid.dll',\n ),\n )\n\n def is_open():\n return shutter.get_operating_state() == 1\n\n # avoid the FT_DeviceNotFound error\n MotionControl.build_device_list()\n\n # connect to the KCube Solenoid\n shutter = record.connect()\n print('Connected to {}'.format(shutter))\n\n # start polling at 200 ms\n shutter.start_polling(200)\n\n # set the operating mode to SC_OperatingModes.SC_Manual\n shutter.set_operating_mode('Manual')\n\n for i in range(5):\n\n # set the operating state to SC_OperatingStates.SC_Active\n print('Opening the shutter...')\n shutter.set_operating_state('Active')\n while not is_open():\n time.sleep(0.05)\n print(' Is the shutter open? {}'.format(is_open()))\n\n time.sleep(1)\n\n # set the operating state to SC_OperatingStates.SC_Inactive\n print('Closing the shutter...')\n shutter.set_operating_state('Inactive')\n while is_open():\n time.sleep(0.05)\n print(' Is the shutter open? {}'.format(is_open()))\n\n time.sleep(1)\n\n # stop polling and close the connection\n shutter.stop_polling()\n shutter.disconnect()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""
Copyright (c) 2007 by the Pallets team.
Some rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
from sentry_sdk._compat import iteritems
from sentry_sdk._types import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Dict
from typing import Iterator
from typing import Tuple
#
# `get_headers` comes from `werkzeug.datastructures.EnvironHeaders`
# https://github.com/pallets/werkzeug/blob/0.14.1/werkzeug/datastructures.py#L1361
#
# We need this function because Django does not give us a "pure" http header
# dict. So we might as well use it for all WSGI integrations.
#
def _get_headers(environ):
# type: (Dict[str, str]) -> Iterator[Tuple[str, str]]
"""
Returns only proper HTTP headers.
"""
for key, value in iteritems(environ):
key = str(key)
if key.startswith("HTTP_") and key not in (
"HTTP_CONTENT_TYPE",
"HTTP_CONTENT_LENGTH",
):
yield key[5:].replace("_", "-").title(), value
elif key in ("CONTENT_TYPE", "CONTENT_LENGTH"):
yield key.replace("_", "-").title(), value
#
# `get_host` comes from `werkzeug.wsgi.get_host`
# https://github.com/pallets/werkzeug/blob/1.0.1/src/werkzeug/wsgi.py#L145
#
def get_host(environ, use_x_forwarded_for=False):
# type: (Dict[str, str], bool) -> str
"""
Return the host for the given WSGI environment.
"""
if use_x_forwarded_for and "HTTP_X_FORWARDED_HOST" in environ:
rv = environ["HTTP_X_FORWARDED_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("HTTP_HOST"):
rv = environ["HTTP_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("SERVER_NAME"):
rv = environ["SERVER_NAME"]
if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
("https", "443"),
("http", "80"),
):
rv += ":" + environ["SERVER_PORT"]
else:
# In spite of the WSGI spec, SERVER_NAME might not be present.
rv = "unknown"
return rv
|
normal
|
{
"blob_id": "53cd9d5a79e97bb1af69446a82c747248c3cc298",
"index": 1367,
"step-1": "<mask token>\n\n\ndef _get_headers(environ):\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',\n 'HTTP_CONTENT_LENGTH'):\n yield key[5:].replace('_', '-').title(), value\n elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):\n yield key.replace('_', '-').title(), value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _get_headers(environ):\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',\n 'HTTP_CONTENT_LENGTH'):\n yield key[5:].replace('_', '-').title(), value\n elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):\n yield key.replace('_', '-').title(), value\n\n\ndef get_host(environ, use_x_forwarded_for=False):\n \"\"\"\n Return the host for the given WSGI environment.\n \"\"\"\n if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:\n rv = environ['HTTP_X_FORWARDED_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('HTTP_HOST'):\n rv = environ['HTTP_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('SERVER_NAME'):\n rv = environ['SERVER_NAME']\n if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((\n 'https', '443'), ('http', '80')):\n rv += ':' + environ['SERVER_PORT']\n else:\n rv = 'unknown'\n return rv\n",
"step-3": "<mask token>\nif TYPE_CHECKING:\n from typing import Dict\n from typing import Iterator\n from typing import Tuple\n\n\ndef _get_headers(environ):\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',\n 'HTTP_CONTENT_LENGTH'):\n yield key[5:].replace('_', '-').title(), value\n elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):\n yield key.replace('_', '-').title(), value\n\n\ndef get_host(environ, use_x_forwarded_for=False):\n \"\"\"\n Return the host for the given WSGI environment.\n \"\"\"\n if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:\n rv = environ['HTTP_X_FORWARDED_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('HTTP_HOST'):\n rv = environ['HTTP_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('SERVER_NAME'):\n rv = environ['SERVER_NAME']\n if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((\n 'https', '443'), ('http', '80')):\n rv += ':' + environ['SERVER_PORT']\n else:\n rv = 'unknown'\n return rv\n",
"step-4": "<mask token>\nfrom sentry_sdk._compat import iteritems\nfrom sentry_sdk._types import TYPE_CHECKING\nif TYPE_CHECKING:\n from typing import Dict\n from typing import Iterator\n from typing import Tuple\n\n\ndef _get_headers(environ):\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',\n 'HTTP_CONTENT_LENGTH'):\n yield key[5:].replace('_', '-').title(), value\n elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):\n yield key.replace('_', '-').title(), value\n\n\ndef get_host(environ, use_x_forwarded_for=False):\n \"\"\"\n Return the host for the given WSGI environment.\n \"\"\"\n if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:\n rv = environ['HTTP_X_FORWARDED_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('HTTP_HOST'):\n rv = environ['HTTP_HOST']\n if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):\n rv = rv[:-3]\n elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):\n rv = rv[:-4]\n elif environ.get('SERVER_NAME'):\n rv = environ['SERVER_NAME']\n if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((\n 'https', '443'), ('http', '80')):\n rv += ':' + environ['SERVER_PORT']\n else:\n rv = 'unknown'\n return rv\n",
"step-5": "\"\"\"\nCopyright (c) 2007 by the Pallets team.\n\nSome rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n* Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND\nCONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,\nBUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\nCOPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\nINCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\nNOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF\nUSE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\nTHIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGE.\n\"\"\"\n\nfrom sentry_sdk._compat import iteritems\n\nfrom sentry_sdk._types import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from typing import Dict\n from typing import Iterator\n from typing import Tuple\n\n\n#\n# `get_headers` comes from `werkzeug.datastructures.EnvironHeaders`\n# https://github.com/pallets/werkzeug/blob/0.14.1/werkzeug/datastructures.py#L1361\n#\n# We need this function because Django does not give us a \"pure\" http header\n# dict. So we might as well use it for all WSGI integrations.\n#\ndef _get_headers(environ):\n # type: (Dict[str, str]) -> Iterator[Tuple[str, str]]\n \"\"\"\n Returns only proper HTTP headers.\n \"\"\"\n for key, value in iteritems(environ):\n key = str(key)\n if key.startswith(\"HTTP_\") and key not in (\n \"HTTP_CONTENT_TYPE\",\n \"HTTP_CONTENT_LENGTH\",\n ):\n yield key[5:].replace(\"_\", \"-\").title(), value\n elif key in (\"CONTENT_TYPE\", \"CONTENT_LENGTH\"):\n yield key.replace(\"_\", \"-\").title(), value\n\n\n#\n# `get_host` comes from `werkzeug.wsgi.get_host`\n# https://github.com/pallets/werkzeug/blob/1.0.1/src/werkzeug/wsgi.py#L145\n#\ndef get_host(environ, use_x_forwarded_for=False):\n # type: (Dict[str, str], bool) -> str\n \"\"\"\n Return the host for the given WSGI environment.\n \"\"\"\n if use_x_forwarded_for and \"HTTP_X_FORWARDED_HOST\" in environ:\n rv = environ[\"HTTP_X_FORWARDED_HOST\"]\n if environ[\"wsgi.url_scheme\"] == \"http\" and rv.endswith(\":80\"):\n rv = rv[:-3]\n elif environ[\"wsgi.url_scheme\"] == \"https\" and rv.endswith(\":443\"):\n rv = rv[:-4]\n elif environ.get(\"HTTP_HOST\"):\n rv = environ[\"HTTP_HOST\"]\n if environ[\"wsgi.url_scheme\"] == \"http\" and rv.endswith(\":80\"):\n rv = rv[:-3]\n elif environ[\"wsgi.url_scheme\"] == \"https\" and rv.endswith(\":443\"):\n rv = rv[:-4]\n elif environ.get(\"SERVER_NAME\"):\n rv = environ[\"SERVER_NAME\"]\n if (environ[\"wsgi.url_scheme\"], environ[\"SERVER_PORT\"]) not in (\n (\"https\", \"443\"),\n (\"http\", \"80\"),\n ):\n rv += \":\" + environ[\"SERVER_PORT\"]\n else:\n # In spite of the WSGI spec, SERVER_NAME might not be present.\n rv = \"unknown\"\n\n return rv\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
"""
Otsu method for automatic estimation of $T$ threshold value
- assumes two maxima of grayscale histogram & searches for optimal separation
Parameters
Usage
Example
$ python <scriptname>.py --image ../img/<filename>.png
## Explain
"""
import numpy as np
import argparse
import mahotas
import cv2
from numpy.matrixlib.defmatrix import matrix
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
#preprocessing
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (5,5), 0)
cv2.imshow("Image", image)
# Otsu
T = mahotas.thresholding.otsu(blurred)
print("[INFO] Otsu's threshold {}".format(T))
thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < 255] = 0
thresh = cv2.bitwise_not(thresh)
cv2.imshow("Otsu", thresh)
# Riddler-Calvard
T = mahotas.thresholding.rc(blurred)
print("[INFO] Riddler-Calvard: {}".format(T))
thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < 255] = 0
thresh = cv2.bitwise_not(thresh)
cv2.imshow("Riddler-Calvard", thresh)
cv2.waitKey(0)
if __name__=="__main__":
main()
|
normal
|
{
"blob_id": "0547751af7bbac42351476dde591d13d40fb37eb",
"index": 7811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument('-i', '--image', required=True, help='Path to the image')\n args = vars(ap.parse_args())\n image = cv2.imread(args['image'])\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(image, (5, 5), 0)\n cv2.imshow('Image', image)\n T = mahotas.thresholding.otsu(blurred)\n print(\"[INFO] Otsu's threshold {}\".format(T))\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow('Otsu', thresh)\n T = mahotas.thresholding.rc(blurred)\n print('[INFO] Riddler-Calvard: {}'.format(T))\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow('Riddler-Calvard', thresh)\n cv2.waitKey(0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument('-i', '--image', required=True, help='Path to the image')\n args = vars(ap.parse_args())\n image = cv2.imread(args['image'])\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(image, (5, 5), 0)\n cv2.imshow('Image', image)\n T = mahotas.thresholding.otsu(blurred)\n print(\"[INFO] Otsu's threshold {}\".format(T))\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow('Otsu', thresh)\n T = mahotas.thresholding.rc(blurred)\n print('[INFO] Riddler-Calvard: {}'.format(T))\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow('Riddler-Calvard', thresh)\n cv2.waitKey(0)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport numpy as np\nimport argparse\nimport mahotas\nimport cv2\nfrom numpy.matrixlib.defmatrix import matrix\n\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument('-i', '--image', required=True, help='Path to the image')\n args = vars(ap.parse_args())\n image = cv2.imread(args['image'])\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(image, (5, 5), 0)\n cv2.imshow('Image', image)\n T = mahotas.thresholding.otsu(blurred)\n print(\"[INFO] Otsu's threshold {}\".format(T))\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow('Otsu', thresh)\n T = mahotas.thresholding.rc(blurred)\n print('[INFO] Riddler-Calvard: {}'.format(T))\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow('Riddler-Calvard', thresh)\n cv2.waitKey(0)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\"\"\"\nOtsu method for automatic estimation of $T$ threshold value\n - assumes two maxima of grayscale histogram & searches for optimal separation\n\nParameters\n\nUsage\n\nExample\n $ python <scriptname>.py --image ../img/<filename>.png\n\n## Explain\n\n\"\"\"\nimport numpy as np\nimport argparse\nimport mahotas\nimport cv2\nfrom numpy.matrixlib.defmatrix import matrix\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-i\", \"--image\", required=True, help=\"Path to the image\")\n args = vars(ap.parse_args())\n\n image = cv2.imread(args[\"image\"])\n #preprocessing\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(image, (5,5), 0)\n cv2.imshow(\"Image\", image)\n\n # Otsu\n T = mahotas.thresholding.otsu(blurred)\n print(\"[INFO] Otsu's threshold {}\".format(T))\n\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow(\"Otsu\", thresh)\n\n # Riddler-Calvard\n T = mahotas.thresholding.rc(blurred)\n print(\"[INFO] Riddler-Calvard: {}\".format(T))\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow(\"Riddler-Calvard\", thresh)\n\n cv2.waitKey(0)\n\nif __name__==\"__main__\":\n main()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from abc import abstractmethod
class Environment:
@abstractmethod
def __init__(self, agent):
pass
@abstractmethod
def execute_step(self, n=1):
pass
@abstractmethod
def execute_all(self):
pass
@abstractmethod
def set_delay(self, delay):
pass
|
normal
|
{
"blob_id": "8698aedc5c8671f46c73898a7188440254b79bbf",
"index": 307,
"step-1": "<mask token>\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n <mask token>\n\n @abstractmethod\n def execute_all(self):\n pass\n\n @abstractmethod\n def set_delay(self, delay):\n pass\n",
"step-3": "<mask token>\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n\n @abstractmethod\n def execute_step(self, n=1):\n pass\n\n @abstractmethod\n def execute_all(self):\n pass\n\n @abstractmethod\n def set_delay(self, delay):\n pass\n",
"step-4": "from abc import abstractmethod\n\n\nclass Environment:\n\n @abstractmethod\n def __init__(self, agent):\n pass\n\n @abstractmethod\n def execute_step(self, n=1):\n pass\n\n @abstractmethod\n def execute_all(self):\n pass\n\n @abstractmethod\n def set_delay(self, delay):\n pass\n",
"step-5": null,
"step-ids": [
2,
4,
5,
6
]
}
|
[
2,
4,
5,
6
] |
# Generated by Django 2.1.7 on 2020-01-09 08:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('goods', '0004_auto_20200109_0713'),
]
operations = [
migrations.AlterField(
model_name='banner',
name='show_type',
field=models.IntegerField(choices=[(1, '首页轮播'), (2, '最新活动')], default=1, verbose_name='展示控制'),
),
]
|
normal
|
{
"blob_id": "b7687240413441e1d3ed0085e5953f8089cbf4c9",
"index": 9303,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('goods', '0004_auto_20200109_0713')]\n operations = [migrations.AlterField(model_name='banner', name=\n 'show_type', field=models.IntegerField(choices=[(1, '首页轮播'), (2,\n '最新活动')], default=1, verbose_name='展示控制'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('goods', '0004_auto_20200109_0713')]\n operations = [migrations.AlterField(model_name='banner', name=\n 'show_type', field=models.IntegerField(choices=[(1, '首页轮播'), (2,\n '最新活动')], default=1, verbose_name='展示控制'))]\n",
"step-5": "# Generated by Django 2.1.7 on 2020-01-09 08:19\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('goods', '0004_auto_20200109_0713'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='banner',\n name='show_type',\n field=models.IntegerField(choices=[(1, '首页轮播'), (2, '最新活动')], default=1, verbose_name='展示控制'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import print_function
import os
import re
import xml.etree.ElementTree as ET
def read_vivado_report(hls_dir, full_report=False):
if not os.path.exists(hls_dir):
print('Path {} does not exist. Exiting.'.format(hls_dir))
return
prj_dir = None
top_func_name = None
if os.path.isfile(hls_dir + '/build_prj.tcl'):
prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl')
if prj_dir is None or top_func_name is None:
print('Unable to read project data. Exiting.')
return
sln_dir = hls_dir + '/' + prj_dir
if not os.path.exists(sln_dir):
print('Project {} does not exist. Rerun "hls4ml build -p {}".'.format(prj_dir, hls_dir))
return
solutions = _find_solutions(sln_dir)
print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))
for sln in solutions:
print('Reports for solution "{}":\n'.format(sln))
_find_reports(sln_dir + '/' + sln, top_func_name, full_report)
def _parse_build_script(script_path):
prj_dir = None
top_func_name = None
with open(script_path, 'r') as f:
for line in f.readlines():
if 'open_project' in line:
prj_dir = line.split()[-1]
elif 'set_top' in line:
top_func_name = line.split()[-1]
return prj_dir, top_func_name
def _find_solutions(sln_dir):
solutions = []
if os.path.isfile(sln_dir + '/vivado_hls.app'):
with open(sln_dir + '/vivado_hls.app') as f:
# Get rid of namespaces (workaround to support two types of vivado_hls.app files)
xmlstring = re.sub(' xmlns="[^"]+"', '', f.read(), count=1)
root = ET.fromstring(xmlstring)
for sln_tag in root.findall('solutions/solution'):
sln_name = sln_tag.get('name')
if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name):
solutions.append(sln_name)
return solutions
def _find_reports(sln_dir, top_func_name, full_report=False):
csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)
if os.path.isfile(csim_file):
_show_csim_report(csim_file)
else:
print('C simulation report not found.')
syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)
if os.path.isfile(syn_file):
_show_synth_report(syn_file, full_report)
else:
print('Synthesis report not found.')
def _show_csim_report(csim_file):
with open(csim_file, 'r') as f:
print('C SIMULATION RESULT:')
print(f.read())
def _show_synth_report(synth_file, full_report=False):
with open(synth_file, 'r') as f:
print('SYNTHESIS REPORT:')
for line in f.readlines()[2:]:
if not full_report and '* DSP48' in line:
break
print(line, end = '')
|
normal
|
{
"blob_id": "7d173b0571c20dc8fcae884451e8f69ba3a05763",
"index": 8087,
"step-1": "<mask token>\n\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n prj_dir = None\n top_func_name = None\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'\n )\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.\n format(prj_dir, hls_dir))\n return\n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\n\n<mask token>\n\n\ndef _find_solutions(sln_dir):\n solutions = []\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name\n ):\n solutions.append(sln_name)\n return solutions\n\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\n\n<mask token>\n\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end='')\n",
"step-2": "<mask token>\n\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n prj_dir = None\n top_func_name = None\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'\n )\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.\n format(prj_dir, hls_dir))\n return\n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\n\n<mask token>\n\n\ndef _find_solutions(sln_dir):\n solutions = []\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name\n ):\n solutions.append(sln_name)\n return solutions\n\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\n\ndef _show_csim_report(csim_file):\n with open(csim_file, 'r') as f:\n print('C SIMULATION RESULT:')\n print(f.read())\n\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end='')\n",
"step-3": "<mask token>\n\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n prj_dir = None\n top_func_name = None\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'\n )\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.\n format(prj_dir, hls_dir))\n return\n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\n\ndef _parse_build_script(script_path):\n prj_dir = None\n top_func_name = None\n with open(script_path, 'r') as f:\n for line in f.readlines():\n if 'open_project' in line:\n prj_dir = line.split()[-1]\n elif 'set_top' in line:\n top_func_name = line.split()[-1]\n return prj_dir, top_func_name\n\n\ndef _find_solutions(sln_dir):\n solutions = []\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name\n ):\n solutions.append(sln_name)\n return solutions\n\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\n\ndef _show_csim_report(csim_file):\n with open(csim_file, 'r') as f:\n print('C SIMULATION RESULT:')\n print(f.read())\n\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end='')\n",
"step-4": "from __future__ import print_function\nimport os\nimport re\nimport xml.etree.ElementTree as ET\n\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n prj_dir = None\n top_func_name = None\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'\n )\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.\n format(prj_dir, hls_dir))\n return\n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\n\ndef _parse_build_script(script_path):\n prj_dir = None\n top_func_name = None\n with open(script_path, 'r') as f:\n for line in f.readlines():\n if 'open_project' in line:\n prj_dir = line.split()[-1]\n elif 'set_top' in line:\n top_func_name = line.split()[-1]\n return prj_dir, top_func_name\n\n\ndef _find_solutions(sln_dir):\n solutions = []\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name\n ):\n solutions.append(sln_name)\n return solutions\n\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\n\ndef _show_csim_report(csim_file):\n with open(csim_file, 'r') as f:\n print('C SIMULATION RESULT:')\n print(f.read())\n\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end='')\n",
"step-5": "from __future__ import print_function\nimport os\nimport re\nimport xml.etree.ElementTree as ET\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n\n prj_dir = None\n top_func_name = None\n\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl')\n\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n \n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.format(prj_dir, hls_dir))\n return\n \n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\ndef _parse_build_script(script_path):\n prj_dir = None\n top_func_name = None\n\n with open(script_path, 'r') as f:\n for line in f.readlines():\n if 'open_project' in line:\n prj_dir = line.split()[-1]\n elif 'set_top' in line:\n top_func_name = line.split()[-1]\n \n return prj_dir, top_func_name\n\ndef _find_solutions(sln_dir):\n solutions = []\n\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n # Get rid of namespaces (workaround to support two types of vivado_hls.app files)\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name):\n solutions.append(sln_name)\n \n return solutions\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n \n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\ndef _show_csim_report(csim_file):\n with open(csim_file, 'r') as f:\n print('C SIMULATION RESULT:')\n print(f.read())\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end = '')\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import re
APACHE_ACCESS_LOG_PATTERN = '^(\S+) (\S+) (\S+) \[([\w:/]+\s[+\-]\d{4})\] "(\S+) (\S+)\s*(\S*)" (\d{3}) (\S+)'
pattern = re.compile(APACHE_ACCESS_LOG_PATTERN)
print re.match('ix-sac6-20.ix.netcom.com - - [08/Aug/1995:14:43:39 -0400] "GET / HTTP/1.0 " 200 7131', 0)
|
normal
|
{
"blob_id": "0abba9fdd98d6bb5c706b82a01a267dbcefbba28",
"index": 4562,
"step-1": "import re\nAPACHE_ACCESS_LOG_PATTERN = '^(\\S+) (\\S+) (\\S+) \\[([\\w:/]+\\s[+\\-]\\d{4})\\] \"(\\S+) (\\S+)\\s*(\\S*)\" (\\d{3}) (\\S+)'\npattern = re.compile(APACHE_ACCESS_LOG_PATTERN)\nprint re.match('ix-sac6-20.ix.netcom.com - - [08/Aug/1995:14:43:39 -0400] \"GET / HTTP/1.0 \" 200 7131', 0)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python3
"""display your id from github.
"""
from sys import argv
import requests
if __name__ == "__main__":
get = requests.get('https://api.github.com/user',
auth=(argv[1], argv[2])).json().get('id')
print(get)
|
normal
|
{
"blob_id": "8280f321b102cace462761f9ece2aebf9e28a432",
"index": 3941,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n get = requests.get('https://api.github.com/user', auth=(argv[1], argv[2])\n ).json().get('id')\n print(get)\n",
"step-3": "<mask token>\nfrom sys import argv\nimport requests\nif __name__ == '__main__':\n get = requests.get('https://api.github.com/user', auth=(argv[1], argv[2])\n ).json().get('id')\n print(get)\n",
"step-4": "#!/usr/bin/python3\n\"\"\"display your id from github.\n\"\"\"\nfrom sys import argv\nimport requests\n\n\nif __name__ == \"__main__\":\n get = requests.get('https://api.github.com/user',\n auth=(argv[1], argv[2])).json().get('id')\n print(get)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
@File : densenet_block.py
@Time : 12/11/20 9:59 PM
@Author : Mingqiang Ning
@Email : ningmq_cv@foxmail.com
@Modify Time @Version @Description
------------ -------- -----------
12/11/20 9:59 PM 1.0 None
# @Software: PyCharm
"""
import torch
from torch import nn
class BottleNeck(nn.Module):
def __init__(self,n_channels,growth_rate):
super(BottleNeck,self).__init__()
Channels=4*growth_rate
self.bottleneck=nn.Sequential(
nn.BatchNorm2d(n_channels),
nn.ReLU(inplace=True),
nn.Conv2d(n_channels,Channels,1,bias=False),
nn.BatchNorm2d(Channels),
nn.ReLU(inplace=True),
nn.Conv2d(Channels, growth_rate, 3,padding=1, bias=False)
)
def forward(self,x):
out=self.bottleneck(x)
out=torch.cat((x,out),1)
return out
class DenseBlock(nn.Module):
def __init__(self, n_channels, growth_rate,n_DenseBlocks):
super(DenseBlock, self).__init__()
layers=[]
for i in range(n_DenseBlocks):
layers.append(BottleNeck(n_channels+i*growth_rate,growth_rate))
self.denseblock=nn.Sequential(*layers)
def forward(self, x):
out=self.denseblock(x)
return out
|
normal
|
{
"blob_id": "c2ba18062b8555c77b329718ec1f2ae7f326c78e",
"index": 1988,
"step-1": "<mask token>\n\n\nclass DenseBlock(nn.Module):\n <mask token>\n\n def forward(self, x):\n out = self.denseblock(x)\n return out\n",
"step-2": "<mask token>\n\n\nclass BottleNeck(nn.Module):\n <mask token>\n\n def forward(self, x):\n out = self.bottleneck(x)\n out = torch.cat((x, out), 1)\n return out\n\n\nclass DenseBlock(nn.Module):\n\n def __init__(self, n_channels, growth_rate, n_DenseBlocks):\n super(DenseBlock, self).__init__()\n layers = []\n for i in range(n_DenseBlocks):\n layers.append(BottleNeck(n_channels + i * growth_rate, growth_rate)\n )\n self.denseblock = nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.denseblock(x)\n return out\n",
"step-3": "<mask token>\n\n\nclass BottleNeck(nn.Module):\n\n def __init__(self, n_channels, growth_rate):\n super(BottleNeck, self).__init__()\n Channels = 4 * growth_rate\n self.bottleneck = nn.Sequential(nn.BatchNorm2d(n_channels), nn.ReLU\n (inplace=True), nn.Conv2d(n_channels, Channels, 1, bias=False),\n nn.BatchNorm2d(Channels), nn.ReLU(inplace=True), nn.Conv2d(\n Channels, growth_rate, 3, padding=1, bias=False))\n\n def forward(self, x):\n out = self.bottleneck(x)\n out = torch.cat((x, out), 1)\n return out\n\n\nclass DenseBlock(nn.Module):\n\n def __init__(self, n_channels, growth_rate, n_DenseBlocks):\n super(DenseBlock, self).__init__()\n layers = []\n for i in range(n_DenseBlocks):\n layers.append(BottleNeck(n_channels + i * growth_rate, growth_rate)\n )\n self.denseblock = nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.denseblock(x)\n return out\n",
"step-4": "<mask token>\nimport torch\nfrom torch import nn\n\n\nclass BottleNeck(nn.Module):\n\n def __init__(self, n_channels, growth_rate):\n super(BottleNeck, self).__init__()\n Channels = 4 * growth_rate\n self.bottleneck = nn.Sequential(nn.BatchNorm2d(n_channels), nn.ReLU\n (inplace=True), nn.Conv2d(n_channels, Channels, 1, bias=False),\n nn.BatchNorm2d(Channels), nn.ReLU(inplace=True), nn.Conv2d(\n Channels, growth_rate, 3, padding=1, bias=False))\n\n def forward(self, x):\n out = self.bottleneck(x)\n out = torch.cat((x, out), 1)\n return out\n\n\nclass DenseBlock(nn.Module):\n\n def __init__(self, n_channels, growth_rate, n_DenseBlocks):\n super(DenseBlock, self).__init__()\n layers = []\n for i in range(n_DenseBlocks):\n layers.append(BottleNeck(n_channels + i * growth_rate, growth_rate)\n )\n self.denseblock = nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.denseblock(x)\n return out\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\n@File : densenet_block.py\n@Time : 12/11/20 9:59 PM\n@Author : Mingqiang Ning\n@Email : ningmq_cv@foxmail.com\n@Modify Time @Version @Description\n------------ -------- -----------\n12/11/20 9:59 PM 1.0 None\n# @Software: PyCharm\n\"\"\"\nimport torch\nfrom torch import nn\nclass BottleNeck(nn.Module):\n def __init__(self,n_channels,growth_rate):\n super(BottleNeck,self).__init__()\n Channels=4*growth_rate\n self.bottleneck=nn.Sequential(\n nn.BatchNorm2d(n_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(n_channels,Channels,1,bias=False),\n nn.BatchNorm2d(Channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(Channels, growth_rate, 3,padding=1, bias=False)\n )\n def forward(self,x):\n out=self.bottleneck(x)\n out=torch.cat((x,out),1)\n return out\n\n\nclass DenseBlock(nn.Module):\n def __init__(self, n_channels, growth_rate,n_DenseBlocks):\n super(DenseBlock, self).__init__()\n layers=[]\n for i in range(n_DenseBlocks):\n layers.append(BottleNeck(n_channels+i*growth_rate,growth_rate))\n self.denseblock=nn.Sequential(*layers)\n def forward(self, x):\n out=self.denseblock(x)\n return out\n\n\n\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
SacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores.
Inspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text.
It also knows all the standard test sets and handles downloading, processing, and tokenization for you.
See the [README.md] file for more information.
"""
import argparse
import gzip
import hashlib
import io
import logging
import math
import os
import portalocker
import re
import sys
import ssl
import urllib.request
from collections import Counter
from itertools import zip_longest
from typing import List, Iterable, Tuple, Union
from .tokenizer import TOKENIZERS, TokenizeMeCab
from .dataset import DATASETS, DOMAINS, COUNTRIES, SUBSETS
from . import __version__ as VERSION
sacrelogger = logging.getLogger('sacrebleu')
try:
# SIGPIPE is not available on Windows machines, throwing an exception.
from signal import SIGPIPE
# If SIGPIPE is available, change behaviour to default instead of ignore.
from signal import signal, SIG_DFL
signal(SIGPIPE, SIG_DFL)
except ImportError:
sacrelogger.warning('Could not import signal.SIGPIPE (this is expected on Windows machines)')
# Where to store downloaded test sets.
# Define the environment variable $SACREBLEU, or use the default of ~/.sacrebleu.
#
# Querying for a HOME environment variable can result in None (e.g., on Windows)
# in which case the os.path.join() throws a TypeError. Using expanduser() is
# a safe way to get the user's home folder.
USERHOME = os.path.expanduser("~")
SACREBLEU_DIR = os.environ.get('SACREBLEU', os.path.join(USERHOME, '.sacrebleu'))
# n-gram order. Don't change this.
NGRAM_ORDER = 4
# Default values for CHRF
CHRF_ORDER = 6
# default to 2 (per http://www.aclweb.org/anthology/W16-2341)
CHRF_BETA = 2
# The default floor value to use with `--smooth floor`
SMOOTH_VALUE_DEFAULT = {'floor': 0.0, 'add-k': 1}
DEFAULT_TOKENIZER = '13a'
def smart_open(file, mode='rt', encoding='utf-8'):
"""Convenience function for reading compressed or plain text files.
:param file: The file to read.
:param mode: The file mode (read, write).
:param encoding: The file encoding.
"""
if file.endswith('.gz'):
return gzip.open(file, mode=mode, encoding=encoding, newline="\n")
return open(file, mode=mode, encoding=encoding, newline="\n")
def my_log(num):
"""
Floors the log function
:param num: the number
:return: log(num) floored to a very low number
"""
if num == 0.0:
return -9999999999
return math.log(num)
def bleu_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the signature
"""
# Abbreviations for the signature
abbr = {
'test': 't',
'lang': 'l',
'smooth': 's',
'case': 'c',
'tok': 'tok',
'numrefs': '#',
'version': 'v',
'origlang': 'o',
'subset': 'S',
}
signature = {'tok': args.tokenize,
'version': VERSION,
'smooth': args.smooth,
'numrefs': numrefs,
'case': 'lc' if args.lc else 'mixed'}
# For the Japanese tokenizer, add a dictionary type and its version to the signature.
if args.tokenize == "ja-mecab":
signature['tok'] += "-" + TokenizeMeCab().signature()
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
if args.origlang is not None:
signature['origlang'] = args.origlang
if args.subset is not None:
signature['subset'] = args.subset
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x, signature[x]) for x in sorted(signature.keys())])
return sigstr
def chrf_signature(args, numrefs):
"""
Builds a signature that uniquely identifies the scoring parameters used.
:param args: the arguments passed into the script
:return: the chrF signature
"""
# Abbreviations for the signature
abbr = {
'test': 't',
'lang': 'l',
'numchars': 'n',
'space': 's',
'case': 'c',
'numrefs': '#',
'version': 'v',
'origlang': 'o',
'subset': 'S',
}
signature = {'version': VERSION,
'space': args.chrf_whitespace,
'numchars': args.chrf_order,
'numrefs': numrefs,
'case': 'lc' if args.lc else 'mixed'}
if args.test_set is not None:
signature['test'] = args.test_set
if args.langpair is not None:
signature['lang'] = args.langpair
if args.origlang is not None:
signature['origlang'] = args.origlang
if args.subset is not None:
signature['subset'] = args.subset
sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x, signature[x]) for x in sorted(signature.keys())])
return sigstr
def extract_ngrams(line, min_order=1, max_order=NGRAM_ORDER) -> Counter:
"""Extracts all the ngrams (min_order <= n <= max_order) from a sequence of tokens.
:param line: A segment containing a sequence of words.
:param min_order: Minimum n-gram length (default: 1).
:param max_order: Maximum n-gram length (default: NGRAM_ORDER).
:return: a dictionary containing ngrams and counts
"""
ngrams = Counter()
tokens = line.split()
for n in range(min_order, max_order + 1):
for i in range(0, len(tokens) - n + 1):
ngram = ' '.join(tokens[i: i + n])
ngrams[ngram] += 1
return ngrams
def extract_char_ngrams(s: str, n: int) -> Counter:
"""
Yields counts of character n-grams from string s of order n.
"""
return Counter([s[i:i + n] for i in range(len(s) - n + 1)])
def ref_stats(output, refs):
ngrams = Counter()
closest_diff = None
closest_len = None
for ref in refs:
tokens = ref.split()
reflen = len(tokens)
diff = abs(len(output.split()) - reflen)
if closest_diff is None or diff < closest_diff:
closest_diff = diff
closest_len = reflen
elif diff == closest_diff:
if reflen < closest_len:
closest_len = reflen
ngrams_ref = extract_ngrams(ref)
for ngram in ngrams_ref.keys():
ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])
return ngrams, closest_diff, closest_len
def _clean(s):
"""
Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.
:param s: The string.
:return: A cleaned-up string.
"""
return re.sub(r'\s+', ' ', s.strip())
def process_to_text(rawfile, txtfile, field: int=None):
"""Processes raw files to plain text files.
:param rawfile: the input file (possibly SGML)
:param txtfile: the plaintext file
:param field: For TSV files, which field to extract.
"""
if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:
sacrelogger.info("Processing %s to %s", rawfile, txtfile)
if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\1', line)), file=fout)
elif rawfile.endswith('.xml'): # IWSLT
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
if line.startswith('<seg '):
print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\1', line)), file=fout)
elif rawfile.endswith('.txt'): # wmt17/ms
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip(), file=fout)
elif rawfile.endswith('.tsv'): # MTNT
with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:
for line in fin:
print(line.rstrip().split('\t')[field], file=fout)
def print_test_set(test_set, langpair, side, origlang=None, subset=None):
"""Prints to STDOUT the specified side of the specified test set
:param test_set: the test set to print
:param langpair: the language pair
:param side: 'src' for source, 'ref' for reference
:param origlang: print only sentences with a given original language (2-char ISO639-1 code), "non-" prefix means negation
:param subset: print only sentences whose document annotation matches a given regex
"""
files = download_test_set(test_set, langpair)
if side == 'src':
files = [files[0]]
elif side == 'ref':
files.pop(0)
streams = [smart_open(file) for file in files]
streams = _filter_subset(streams, test_set, langpair, origlang, subset)
for lines in zip(*streams):
print('\t'.join(map(lambda x: x.rstrip(), lines)))
def download_test_set(test_set, langpair=None):
"""Downloads the specified test to the system location specified by the SACREBLEU environment variable.
:param test_set: the test set to download
:param langpair: the language pair (needed for some datasets)
:return: the set of processed files
"""
outdir = os.path.join(SACREBLEU_DIR, test_set)
os.makedirs(outdir, exist_ok=True)
expected_checksums = DATASETS[test_set].get('md5', [None] * len(DATASETS[test_set]))
for dataset, expected_md5 in zip(DATASETS[test_set]['data'], expected_checksums):
tarball = os.path.join(outdir, os.path.basename(dataset))
rawdir = os.path.join(outdir, 'raw')
lockfile = '{}.lock'.format(tarball)
with portalocker.Lock(lockfile, 'w', timeout=60):
if not os.path.exists(tarball) or os.path.getsize(tarball) == 0:
sacrelogger.info("Downloading %s to %s", dataset, tarball)
try:
with urllib.request.urlopen(dataset) as f, open(tarball, 'wb') as out:
out.write(f.read())
except ssl.SSLError:
sacrelogger.warning('An SSL error was encountered in downloading the files. If you\'re on a Mac, '
'you may need to run the "Install Certificates.command" file located in the '
'"Python 3" folder, often found under /Applications')
sys.exit(1)
# Check md5sum
if expected_md5 is not None:
md5 = hashlib.md5()
with open(tarball, 'rb') as infile:
for line in infile:
md5.update(line)
if md5.hexdigest() != expected_md5:
sacrelogger.error('Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'.format(md5.hexdigest(), expected_md5))
sacrelogger.error('Please manually delete "{}" and rerun the command.'.format(tarball))
sacrelogger.error('If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.')
sys.exit(1)
else:
sacrelogger.info('Checksum passed: {}'.format(md5.hexdigest()))
# Extract the tarball
sacrelogger.info('Extracting %s', tarball)
if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'):
import tarfile
with tarfile.open(tarball) as tar:
tar.extractall(path=rawdir)
elif tarball.endswith('.zip'):
import zipfile
with zipfile.ZipFile(tarball, 'r') as zipfile:
zipfile.extractall(path=rawdir)
found = []
# Process the files into plain text
languages = DATASETS[test_set].keys() if langpair is None else [langpair]
for pair in languages:
if '-' not in pair:
continue
src, tgt = pair.split('-')
rawfile = DATASETS[test_set][pair][0]
field = None # used for TSV files
if rawfile.endswith('.tsv'):
field, rawfile = rawfile.split(':', maxsplit=1)
field = int(field)
rawpath = os.path.join(rawdir, rawfile)
outpath = os.path.join(outdir, '{}.{}'.format(pair, src))
process_to_text(rawpath, outpath, field=field)
found.append(outpath)
refs = DATASETS[test_set][pair][1:]
for i, ref in enumerate(refs):
field = None
if ref.endswith('.tsv'):
field, ref = ref.split(':', maxsplit=1)
field = int(field)
rawpath = os.path.join(rawdir, ref)
if len(refs) >= 2:
outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))
else:
outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt))
process_to_text(rawpath, outpath, field=field)
found.append(outpath)
return found
class Result:
def __init__(self, score: float):
self.score = score
def __str__(self):
return self.format()
class BLEU(Result):
def __init__(self,
score: float,
counts,
totals,
precisions,
bp,
sys_len,
ref_len):
super().__init__(score)
self.counts = counts
self.totals = totals
self.precisions = precisions
self.bp = bp
self.sys_len = sys_len
self.ref_len = ref_len
def format(self, width=2):
precisions = "/".join(["{:.1f}".format(p) for p in self.precisions])
return 'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'.format(
score=self.score,
width=width,
precisions=precisions,
bp=self.bp,
ratio=self.sys_len / self.ref_len,
sys_len=self.sys_len,
ref_len=self.ref_len)
class CHRF(Result):
def __init__(self, score: float):
super().__init__(score)
def format(self, width=2):
return '{score:.{width}f}'.format(score=self.score, width=width)
def compute_bleu(correct: List[int],
total: List[int],
sys_len: int,
ref_len: int,
smooth_method = 'none',
smooth_value = None,
use_effective_order = False) -> BLEU:
"""Computes BLEU score from its sufficient statistics. Adds smoothing.
Smoothing methods (citing "A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU",
Boxing Chen and Colin Cherry, WMT 2014: http://aclweb.org/anthology/W14-3346)
- exp: NIST smoothing method (Method 3)
- floor: Method 1
- add-k: Method 2 (generalizing Lin and Och, 2004)
- none: do nothing.
:param correct: List of counts of correct ngrams, 1 <= n <= NGRAM_ORDER
:param total: List of counts of total ngrams, 1 <= n <= NGRAM_ORDER
:param sys_len: The cumulative system length
:param ref_len: The cumulative reference length
:param smooth: The smoothing method to use
:param smooth_value: The smoothing value added, if smooth method 'floor' is used
:param use_effective_order: If true, use the length of `correct` for the n-gram order instead of NGRAM_ORDER.
:return: A BLEU object with the score (100-based) and other statistics.
"""
if smooth_method in SMOOTH_VALUE_DEFAULT and smooth_value is None:
smooth_value = SMOOTH_VALUE_DEFAULT[smooth_method]
precisions = [0 for x in range(NGRAM_ORDER)]
smooth_mteval = 1.
effective_order = NGRAM_ORDER
for n in range(1, NGRAM_ORDER + 1):
if smooth_method == 'add-k' and n > 1:
correct[n-1] += smooth_value
total[n-1] += smooth_value
if total[n-1] == 0:
break
if use_effective_order:
effective_order = n
if correct[n-1] == 0:
if smooth_method == 'exp':
smooth_mteval *= 2
precisions[n-1] = 100. / (smooth_mteval * total[n-1])
elif smooth_method == 'floor':
precisions[n-1] = 100. * smooth_value / total[n-1]
else:
precisions[n-1] = 100. * correct[n-1] / total[n-1]
# If the system guesses no i-grams, 1 <= i <= NGRAM_ORDER, the BLEU score is 0 (technically undefined).
# This is a problem for sentence-level BLEU or a corpus of short sentences, where systems will get no credit
# if sentence lengths fall under the NGRAM_ORDER threshold. This fix scales NGRAM_ORDER to the observed
# maximum order. It is only available through the API and off by default
brevity_penalty = 1.0
if sys_len < ref_len:
brevity_penalty = math.exp(1 - ref_len / sys_len) if sys_len > 0 else 0.0
score = brevity_penalty * math.exp(sum(map(my_log, precisions[:effective_order])) / effective_order)
return BLEU(score, correct, total, precisions, brevity_penalty, sys_len, ref_len)
def sentence_bleu(hypothesis: str,
references: List[str],
smooth_method: str = 'floor',
smooth_value: float = None,
use_effective_order: bool = True) -> BLEU:
"""
Computes BLEU on a single sentence pair.
Disclaimer: computing BLEU on the sentence level is not its intended use,
BLEU is a corpus-level metric.
:param hypothesis: Hypothesis string.
:param reference: Reference string.
:param smooth_value: For 'floor' smoothing, the floor value to use.
:param use_effective_order: Account for references that are shorter than the largest n-gram.
:return: Returns a single BLEU score as a float.
"""
bleu = corpus_bleu(hypothesis, references,
smooth_method=smooth_method,
smooth_value=smooth_value,
use_effective_order=use_effective_order)
return bleu
def corpus_bleu(sys_stream: Union[str, Iterable[str]],
ref_streams: Union[str, List[Iterable[str]]],
smooth_method='exp',
smooth_value=None,
force=False,
lowercase=False,
tokenize=DEFAULT_TOKENIZER,
use_effective_order=False) -> BLEU:
"""Produces BLEU scores along with its sufficient statistics from a source against one or more references.
:param sys_stream: The system stream (a sequence of segments)
:param ref_streams: A list of one or more reference streams (each a sequence of segments)
:param smooth: The smoothing method to use
:param smooth_value: For 'floor' smoothing, the floor to use
:param force: Ignore data that looks already tokenized
:param lowercase: Lowercase the data
:param tokenize: The tokenizer to use
:return: a BLEU object containing everything you'd want
"""
# Add some robustness to the input arguments
if isinstance(sys_stream, str):
sys_stream = [sys_stream]
if isinstance(ref_streams, str):
ref_streams = [[ref_streams]]
sys_len = 0
ref_len = 0
correct = [0 for n in range(NGRAM_ORDER)]
total = [0 for n in range(NGRAM_ORDER)]
# look for already-tokenized sentences
tokenized_count = 0
fhs = [sys_stream] + ref_streams
for lines in zip_longest(*fhs):
if None in lines:
raise EOFError("Source and reference streams have different lengths!")
if lowercase:
lines = [x.lower() for x in lines]
if not (force or tokenize == 'none') and lines[0].rstrip().endswith(' .'):
tokenized_count += 1
if tokenized_count == 100:
sacrelogger.warning('That\'s 100 lines that end in a tokenized period (\'.\')')
sacrelogger.warning('It looks like you forgot to detokenize your test data, which may hurt your score.')
sacrelogger.warning('If you insist your data is detokenized, or don\'t care, you can suppress this message with \'--force\'.')
output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]
ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)
sys_len += len(output.split())
ref_len += closest_len
sys_ngrams = extract_ngrams(output)
for ngram in sys_ngrams.keys():
n = len(ngram.split())
correct[n-1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))
total[n-1] += sys_ngrams[ngram]
return compute_bleu(correct, total, sys_len, ref_len, smooth_method=smooth_method, smooth_value=smooth_value, use_effective_order=use_effective_order)
def raw_corpus_bleu(sys_stream,
ref_streams,
smooth_value=None) -> BLEU:
"""Convenience function that wraps corpus_bleu().
This is convenient if you're using sacrebleu as a library, say for scoring on dev.
It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).
:param sys_stream: the system stream (a sequence of segments)
:param ref_streams: a list of one or more reference streams (each a sequence of segments)
"""
return corpus_bleu(sys_stream, ref_streams, smooth_method='floor', smooth_value=smooth_value, force=True, tokenize='none', use_effective_order=True)
def delete_whitespace(text: str) -> str:
"""
Removes whitespaces from text.
"""
return re.sub(r'\s+', '', text).strip()
def get_sentence_statistics(hypothesis: str,
reference: str,
order: int = CHRF_ORDER,
remove_whitespace: bool = True) -> List[float]:
hypothesis = delete_whitespace(hypothesis) if remove_whitespace else hypothesis
reference = delete_whitespace(reference) if remove_whitespace else reference
statistics = [0] * (order * 3)
for i in range(order):
n = i + 1
hypothesis_ngrams = extract_char_ngrams(hypothesis, n)
reference_ngrams = extract_char_ngrams(reference, n)
common_ngrams = hypothesis_ngrams & reference_ngrams
statistics[3 * i + 0] = sum(hypothesis_ngrams.values())
statistics[3 * i + 1] = sum(reference_ngrams.values())
statistics[3 * i + 2] = sum(common_ngrams.values())
return statistics
def get_corpus_statistics(hypotheses: Iterable[str],
references: Iterable[str],
order: int = CHRF_ORDER,
remove_whitespace: bool = True) -> List[float]:
corpus_statistics = [0] * (order * 3)
for hypothesis, reference in zip(hypotheses, references):
statistics = get_sentence_statistics(hypothesis, reference, order=order, remove_whitespace=remove_whitespace)
for i in range(len(statistics)):
corpus_statistics[i] += statistics[i]
return corpus_statistics
def _avg_precision_and_recall(statistics: List[float], order: int) -> Tuple[float, float]:
avg_precision = 0.0
avg_recall = 0.0
effective_order = 0
for i in range(order):
hypotheses_ngrams = statistics[3 * i + 0]
references_ngrams = statistics[3 * i + 1]
common_ngrams = statistics[3 * i + 2]
if hypotheses_ngrams > 0 and references_ngrams > 0:
avg_precision += common_ngrams / hypotheses_ngrams
avg_recall += common_ngrams / references_ngrams
effective_order += 1
if effective_order == 0:
return 0.0, 0.0
avg_precision /= effective_order
avg_recall /= effective_order
return avg_precision, avg_recall
def _chrf(avg_precision, avg_recall, beta: int = CHRF_BETA) -> float:
if avg_precision + avg_recall == 0:
return 0.0
beta_square = beta ** 2
score = (1 + beta_square) * (avg_precision * avg_recall) / ((beta_square * avg_precision) + avg_recall)
return score
def corpus_chrf(hypotheses: Iterable[str],
references: Iterable[str],
order: int = CHRF_ORDER,
beta: float = CHRF_BETA,
remove_whitespace: bool = True) -> CHRF:
"""
Computes Chrf on a corpus.
:param hypotheses: Stream of hypotheses.
:param references: Stream of references
:param order: Maximum n-gram order.
:param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.
:param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.
:return: Chrf score.
"""
corpus_statistics = get_corpus_statistics(hypotheses, references, order=order, remove_whitespace=remove_whitespace)
avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics, order)
return CHRF(_chrf(avg_precision, avg_recall, beta=beta))
def sentence_chrf(hypothesis: str,
reference: str,
order: int = CHRF_ORDER,
beta: float = CHRF_BETA,
remove_whitespace: bool = True) -> CHRF:
"""
Computes ChrF on a single sentence pair.
:param hypothesis: Hypothesis string.
:param reference: Reference string.
:param order: Maximum n-gram order.
:param remove_whitespace: Whether to delete whitespaces from hypothesis and reference strings.
:param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.
:return: Chrf score.
"""
statistics = get_sentence_statistics(hypothesis, reference, order=order, remove_whitespace=remove_whitespace)
avg_precision, avg_recall = _avg_precision_and_recall(statistics, order)
return CHRF(_chrf(avg_precision, avg_recall, beta=beta))
def get_langpairs_for_testset(testset: str) -> List:
"""Return a list of language pairs for a given test set."""
return list(filter(lambda x: re.match('\w\w\-\w\w', x), DATASETS.get(testset, {}).keys()))
def get_a_list_of_testset_names() -> str:
"""Return a string with a formatted list of available test sets plus their descriptions. """
message = 'The available test sets are:'
for testset in sorted(DATASETS.keys(), reverse=True):
message += '\n%20s: %s' % (testset, DATASETS[testset].get('description', ''))
return message
def _available_origlangs(test_sets, langpair):
"""Return a list of origlang values in according to the raw SGM files."""
origlangs = set()
for test_set in test_sets.split(','):
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])
if rawfile.endswith('.sgm'):
with smart_open(rawfile) as fin:
for line in fin:
if line.startswith('<doc '):
doc_origlang = re.sub(r'.* origlang="([^"]+)".*\n', '\\1', line)
origlangs.add(doc_origlang)
return sorted(list(origlangs))
def _filter_subset(systems, test_sets, langpair, origlang, subset=None):
"""Filter sentences with a given origlang (or subset) according to the raw SGM files."""
if origlang is None and subset is None:
return systems
if test_sets is None or langpair is None:
raise ValueError('Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).')
indices_to_keep = []
for test_set in test_sets.split(','):
rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])
if not rawfile.endswith('.sgm'):
raise Exception('--origlang and --subset supports only *.sgm files, not %s', rawfile)
if subset is not None:
if test_set not in SUBSETS:
raise Exception('No subset annotation available for test set ' + test_set)
doc_to_tags = SUBSETS[test_set]
number_sentences_included = 0
with smart_open(rawfile) as fin:
include_doc = False
for line in fin:
if line.startswith('<doc '):
if origlang is None:
include_doc = True
else:
doc_origlang = re.sub(r'.* origlang="([^"]+)".*\n', '\\1', line)
if origlang.startswith('non-'):
include_doc = doc_origlang != origlang[4:]
else:
include_doc = doc_origlang == origlang
if subset is not None:
doc_id = re.sub(r'.* docid="([^"]+)".*\n', '\\1', line)
if not re.search(subset, doc_to_tags.get(doc_id, '')):
include_doc = False
if line.startswith('<seg '):
indices_to_keep.append(include_doc)
number_sentences_included += 1 if include_doc else 0
return [[sentence for sentence,keep in zip(sys, indices_to_keep) if keep] for sys in systems]
def main():
args = parse_args()
# Explicitly set the encoding
sys.stdin = open(sys.stdin.fileno(), mode='r', encoding='utf-8', buffering=True, newline="\n")
sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf-8', buffering=True)
if not args.quiet:
logging.basicConfig(level=logging.INFO, format='sacreBLEU: %(message)s')
if args.download:
download_test_set(args.download, args.langpair)
sys.exit(0)
if args.list:
if args.test_set:
print(' '.join(get_langpairs_for_testset(args.test_set)))
else:
print(get_a_list_of_testset_names())
sys.exit(0)
if args.sentence_level and len(args.metrics) > 1:
sacrelogger.error('Only one metric can be used with Sentence-level reporting.')
sys.exit(1)
if args.citation:
if not args.test_set:
sacrelogger.error('I need a test set (-t).')
sys.exit(1)
for test_set in args.test_set.split(','):
if 'citation' not in DATASETS[test_set]:
sacrelogger.error('No citation found for %s', test_set)
else:
print(DATASETS[test_set]['citation'])
sys.exit(0)
if args.num_refs != 1 and (args.test_set is not None or len(args.refs) > 1):
sacrelogger.error('The --num-refs argument allows you to provide any number of tab-delimited references in a single file.')
sacrelogger.error('You can only use it with externaly-provided references, however (i.e., not with `-t`),')
sacrelogger.error('and you cannot then provide multiple reference files.')
sys.exit(1)
if args.test_set is not None:
for test_set in args.test_set.split(','):
if test_set not in DATASETS:
sacrelogger.error('Unknown test set "%s"\n%s', test_set, get_a_list_of_testset_names())
sys.exit(1)
if args.test_set is None:
if len(args.refs) == 0:
sacrelogger.error('I need either a predefined test set (-t) or a list of references')
sacrelogger.error(get_a_list_of_testset_names())
sys.exit(1)
elif len(args.refs) > 0:
sacrelogger.error('I need exactly one of (a) a predefined test set (-t) or (b) a list of references')
sys.exit(1)
elif args.langpair is None:
sacrelogger.error('I need a language pair (-l).')
sys.exit(1)
else:
for test_set in args.test_set.split(','):
if args.langpair not in DATASETS[test_set]:
sacrelogger.error('No such language pair "%s"', args.langpair)
sacrelogger.error('Available language pairs for test set "%s": %s', test_set,
', '.join(x for x in DATASETS[test_set].keys() if '-' in x))
sys.exit(1)
if args.echo:
if args.langpair is None or args.test_set is None:
sacrelogger.warning("--echo requires a test set (--t) and a language pair (-l)")
sys.exit(1)
for test_set in args.test_set.split(','):
print_test_set(test_set, args.langpair, args.echo, args.origlang, args.subset)
sys.exit(0)
if args.test_set is not None and args.tokenize == 'none':
sacrelogger.warning("You are turning off sacrebleu's internal tokenization ('--tokenize none'), presumably to supply\n"
"your own reference tokenization. Published numbers will not be comparable with other papers.\n")
# Internal tokenizer settings. Set to 'zh' for Chinese DEFAULT_TOKENIZER (
if args.tokenize is None:
# set default
if args.langpair is not None and args.langpair.split('-')[1] == 'zh':
args.tokenize = 'zh'
elif args.langpair is not None and args.langpair.split('-')[1] == 'ja':
args.tokenize = 'ja-mecab'
else:
args.tokenize = DEFAULT_TOKENIZER
if args.langpair is not None and 'bleu' in args.metrics:
if args.langpair.split('-')[1] == 'zh' and args.tokenize != 'zh':
logger.warning('You should also pass "--tok zh" when scoring Chinese...')
if args.langpair.split('-')[1] == 'ja' and not args.tokenize.startswith('ja-'):
logger.warning('You should also pass "--tok ja-mecab" when scoring Japanese...')
# concat_ref_files is a list of list of reference filenames, for example:
# concat_ref_files = [[testset1_refA, testset1_refB], [testset2_refA, testset2_refB]]
if args.test_set is None:
concat_ref_files = [args.refs]
else:
concat_ref_files = []
for test_set in args.test_set.split(','):
_, *ref_files = download_test_set(test_set, args.langpair)
if len(ref_files) == 0:
sacrelogger.warning('No references found for test set {}/{}.'.format(test_set, args.langpair))
concat_ref_files.append(ref_files)
inputfh = io.TextIOWrapper(sys.stdin.buffer, encoding=args.encoding) if args.input == '-' else smart_open(args.input, encoding=args.encoding)
full_system = inputfh.readlines()
# Read references
full_refs = [[] for x in range(max(len(concat_ref_files[0]), args.num_refs))]
for ref_files in concat_ref_files:
for refno, ref_file in enumerate(ref_files):
for lineno, line in enumerate(smart_open(ref_file, encoding=args.encoding), 1):
if args.num_refs != 1:
splits = line.rstrip().split(sep='\t', maxsplit=args.num_refs-1)
if len(splits) != args.num_refs:
sacrelogger.error('FATAL: line {}: expected {} fields, but found {}.'.format(lineno, args.num_refs, len(splits)))
sys.exit(17)
for refno, split in enumerate(splits):
full_refs[refno].append(split)
else:
full_refs[refno].append(line)
# Filter sentences according to a given origlang
system, *refs = _filter_subset([full_system, *full_refs], args.test_set, args.langpair, args.origlang, args.subset)
if len(system) == 0:
message = 'Test set %s contains no sentence' % args.test_set
if args.origlang is not None or args.subset is not None:
message += ' with'
message += '' if args.origlang is None else ' origlang=' + args.origlang
message += '' if args.subset is None else ' subset=' + args.subset
sacrelogger.error(message)
exit(1)
# Handle sentence level and quit
if args.sentence_level:
for output, *references in zip(system, *refs):
results = []
for metric in args.metrics:
if metric == 'bleu':
bleu = sentence_bleu(output,
[[x] for x in references],
smooth_method=args.smooth,
smooth_value=args.smooth_value)
results.append(bleu)
if metric == 'chrf':
chrf = sentence_chrf(output,
references[0],
args.chrf_order,
args.chrf_beta,
remove_whitespace=not args.chrf_whitespace)
results.append(chrf)
display_metric(args.metrics, results, len(refs), args)
sys.exit(0)
# Else, handle system level
results = []
try:
for metric in args.metrics:
if metric == 'bleu':
bleu = corpus_bleu(system, refs, smooth_method=args.smooth, smooth_value=args.smooth_value, force=args.force, lowercase=args.lc, tokenize=args.tokenize)
results.append(bleu)
elif metric == 'chrf':
chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta, order=args.chrf_order, remove_whitespace=not args.chrf_whitespace)
results.append(chrf)
except EOFError:
sacrelogger.error('The input and reference stream(s) were of different lengths.')
if args.test_set is not None:
sacrelogger.error('\nThis could be a problem with your system output or with sacreBLEU\'s reference database.\n'
'If the latter, you can clean out the references cache by typing:\n'
'\n'
' rm -r %s/%s\n'
'\n'
'They will be downloaded automatically again the next time you run sacreBLEU.', SACREBLEU_DIR,
args.test_set)
sys.exit(1)
display_metric(args.metrics, results, len(refs), args)
if args.detail:
width = args.width
sents_digits = len(str(len(full_system)))
origlangs = args.origlang if args.origlang else _available_origlangs(args.test_set, args.langpair)
for origlang in origlangs:
subsets = [None]
if args.subset is not None:
subsets += [args.subset]
elif all(t in SUBSETS for t in args.test_set.split(',')):
subsets += COUNTRIES + DOMAINS
for subset in subsets:
system, *refs = _filter_subset([full_system, *full_refs], args.test_set, args.langpair, origlang, subset)
if len(system) == 0:
continue
if subset in COUNTRIES:
subset_str = '%20s' % ('country=' + subset)
elif subset in DOMAINS:
subset_str = '%20s' % ('domain=' + subset)
else:
subset_str = '%20s' % ''
if 'bleu' in args.metrics:
bleu = corpus_bleu(system, refs, smooth_method=args.smooth, smooth_value=args.smooth_value, force=args.force, lowercase=args.lc, tokenize=args.tokenize)
print('origlang={} {}: sentences={:{}} BLEU={:{}.{}f}'.format(origlang, subset_str, len(system), sents_digits, bleu.score, width+4, width))
if 'chrf' in args.metrics:
chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta, order=args.chrf_order, remove_whitespace=not args.chrf_whitespace)
print('origlang={} {}: sentences={:{}} chrF={:{}.{}f}'.format(origlang, subset_str, len(system), sents_digits, chrf.score, width+4, width))
def display_metric(metrics_to_print, results, num_refs, args):
"""
Badly in need of refactoring.
One idea is to put all of this in the BLEU and CHRF classes, and then define
a Result::signature() function.
"""
for metric, result in zip(metrics_to_print, results):
if metric == 'bleu':
if args.score_only:
print('{0:.{1}f}'.format(result.score, args.width))
else:
version_str = bleu_signature(args, num_refs)
print(result.format(args.width).replace('BLEU', 'BLEU+' + version_str))
elif metric == 'chrf':
if args.score_only:
print('{0:.{1}f}'.format(result.score, args.width))
else:
version_str = chrf_signature(args, num_refs)
print('chrF{0:d}+{1} = {2:.{3}f}'.format(args.chrf_beta, version_str, result.score, args.width))
def parse_args():
arg_parser = argparse.ArgumentParser(
description='sacreBLEU: Hassle-free computation of shareable BLEU scores.\n'
'Quick usage: score your detokenized output against WMT\'14 EN-DE:\n'
' cat output.detok.de | sacrebleu -t wmt14 -l en-de',
# epilog = 'Available test sets: ' + ','.join(sorted(DATASETS.keys(), reverse=True)),
formatter_class=argparse.RawDescriptionHelpFormatter)
arg_parser.add_argument('--test-set', '-t', type=str, default=None,
help='the test set to use (see also --list) or a comma-separated list of test sets to be concatenated')
arg_parser.add_argument('-lc', action='store_true', default=False,
help='Use case-insensitive BLEU (default: actual case)')
arg_parser.add_argument('--sentence-level', '-sl', action='store_true',
help='Output metric on each sentence.')
arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor', 'add-k', 'none'],
default='exp',
help='smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none')
arg_parser.add_argument('--smooth-value', '-sv', type=float, default=None,
help='The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'.format(
SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))
arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(), default=None,
help='tokenization method to use')
arg_parser.add_argument('--language-pair', '-l', dest='langpair', default=None,
help='source-target language pair (2-char ISO639-1 codes)')
arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=None,
help='use a subset of sentences with a given original language (2-char ISO639-1 codes), "non-" prefix means negation')
arg_parser.add_argument('--subset', dest='subset', default=None,
help='use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)')
arg_parser.add_argument('--download', type=str, default=None,
help='download a test set and quit')
arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=str, default=None,
help='output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit')
arg_parser.add_argument('--input', '-i', type=str, default='-',
help='Read input from a file instead of STDIN')
arg_parser.add_argument('--num-refs', '-nr', type=int, default=1,
help='Split the reference stream on tabs, and expect this many references. Default: %(default)s.')
arg_parser.add_argument('refs', nargs='*', default=[],
help='optional list of references (for backwards-compatibility with older scripts)')
arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'], nargs='+',
default=['bleu'],
help='metrics to compute (default: bleu)')
arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,
help='chrf character order (default: %(default)s)')
arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,
help='chrf BETA parameter (default: %(default)s)')
arg_parser.add_argument('--chrf-whitespace', action='store_true', default=False,
help='include whitespace in chrF calculation (default: %(default)s)')
arg_parser.add_argument('--short', default=False, action='store_true',
help='produce a shorter (less human readable) signature')
arg_parser.add_argument('--score-only', '-b', default=False, action='store_true',
help='output only the BLEU score')
arg_parser.add_argument('--force', default=False, action='store_true',
help='insist that your tokenized input is actually detokenized')
arg_parser.add_argument('--quiet', '-q', default=False, action='store_true',
help='suppress informative output')
arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',
help='open text files with specified encoding (default: %(default)s)')
arg_parser.add_argument('--list', default=False, action='store_true',
help='print a list of all available test sets.')
arg_parser.add_argument('--citation', '--cite', default=False, action='store_true',
help='dump the bibtex citation and quit.')
arg_parser.add_argument('--width', '-w', type=int, default=1,
help='floating point width (default: %(default)s)')
arg_parser.add_argument('--detail', '-d', default=False, action='store_true',
help='print extra information (split test sets based on origlang)')
arg_parser.add_argument('-V', '--version', action='version',
version='%(prog)s {}'.format(VERSION))
args = arg_parser.parse_args()
return args
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "8adcd75e925fe0c5a50b2fc7dc8c472a9610b4f2",
"index": 9575,
"step-1": "<mask token>\n\n\ndef smart_open(file, mode='rt', encoding='utf-8'):\n \"\"\"Convenience function for reading compressed or plain text files.\n :param file: The file to read.\n :param mode: The file mode (read, write).\n :param encoding: The file encoding.\n \"\"\"\n if file.endswith('.gz'):\n return gzip.open(file, mode=mode, encoding=encoding, newline='\\n')\n return open(file, mode=mode, encoding=encoding, newline='\\n')\n\n\ndef my_log(num):\n \"\"\"\n Floors the log function\n\n :param num: the number\n :return: log(num) floored to a very low number\n \"\"\"\n if num == 0.0:\n return -9999999999\n return math.log(num)\n\n\ndef bleu_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'smooth': 's', 'case': 'c', 'tok':\n 'tok', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'tok': args.tokenize, 'version': VERSION, 'smooth': args.\n smooth, 'numrefs': numrefs, 'case': 'lc' if args.lc else 'mixed'}\n if args.tokenize == 'ja-mecab':\n signature['tok'] += '-' + TokenizeMeCab().signature()\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\ndef chrf_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the chrF signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'numchars': 'n', 'space': 's', 'case':\n 'c', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'version': VERSION, 'space': args.chrf_whitespace,\n 'numchars': args.chrf_order, 'numrefs': numrefs, 'case': 'lc' if\n args.lc else 'mixed'}\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\n<mask token>\n\n\ndef extract_char_ngrams(s: str, n: int) ->Counter:\n \"\"\"\n Yields counts of character n-grams from string s of order n.\n \"\"\"\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])\n\n\ndef ref_stats(output, refs):\n ngrams = Counter()\n closest_diff = None\n closest_len = None\n for ref in refs:\n tokens = ref.split()\n reflen = len(tokens)\n diff = abs(len(output.split()) - reflen)\n if closest_diff is None or diff < closest_diff:\n closest_diff = diff\n closest_len = reflen\n elif diff == closest_diff:\n if reflen < closest_len:\n closest_len = reflen\n ngrams_ref = extract_ngrams(ref)\n for ngram in ngrams_ref.keys():\n ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])\n return ngrams, closest_diff, closest_len\n\n\ndef _clean(s):\n \"\"\"\n Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.\n\n :param s: The string.\n :return: A cleaned-up string.\n \"\"\"\n return re.sub('\\\\s+', ' ', s.strip())\n\n\ndef process_to_text(rawfile, txtfile, field: int=None):\n \"\"\"Processes raw files to plain text files.\n :param rawfile: the input file (possibly SGML)\n :param txtfile: the plaintext file\n :param field: For TSV files, which field to extract.\n \"\"\"\n if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:\n sacrelogger.info('Processing %s to %s', rawfile, txtfile)\n if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.xml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.txt'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip(), file=fout)\n elif rawfile.endswith('.tsv'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip().split('\\t')[field], file=fout)\n\n\ndef print_test_set(test_set, langpair, side, origlang=None, subset=None):\n \"\"\"Prints to STDOUT the specified side of the specified test set\n :param test_set: the test set to print\n :param langpair: the language pair\n :param side: 'src' for source, 'ref' for reference\n :param origlang: print only sentences with a given original language (2-char ISO639-1 code), \"non-\" prefix means negation\n :param subset: print only sentences whose document annotation matches a given regex\n \"\"\"\n files = download_test_set(test_set, langpair)\n if side == 'src':\n files = [files[0]]\n elif side == 'ref':\n files.pop(0)\n streams = [smart_open(file) for file in files]\n streams = _filter_subset(streams, test_set, langpair, origlang, subset)\n for lines in zip(*streams):\n print('\\t'.join(map(lambda x: x.rstrip(), lines)))\n\n\n<mask token>\n\n\nclass Result:\n\n def __init__(self, score: float):\n self.score = score\n\n def __str__(self):\n return self.format()\n\n\nclass BLEU(Result):\n\n def __init__(self, score: float, counts, totals, precisions, bp,\n sys_len, ref_len):\n super().__init__(score)\n self.counts = counts\n self.totals = totals\n self.precisions = precisions\n self.bp = bp\n self.sys_len = sys_len\n self.ref_len = ref_len\n\n def format(self, width=2):\n precisions = '/'.join(['{:.1f}'.format(p) for p in self.precisions])\n return (\n 'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'\n .format(score=self.score, width=width, precisions=precisions,\n bp=self.bp, ratio=self.sys_len / self.ref_len, sys_len=self.\n sys_len, ref_len=self.ref_len))\n\n\nclass CHRF(Result):\n\n def __init__(self, score: float):\n super().__init__(score)\n\n def format(self, width=2):\n return '{score:.{width}f}'.format(score=self.score, width=width)\n\n\n<mask token>\n\n\ndef corpus_bleu(sys_stream: Union[str, Iterable[str]], ref_streams: Union[\n str, List[Iterable[str]]], smooth_method='exp', smooth_value=None,\n force=False, lowercase=False, tokenize=DEFAULT_TOKENIZER,\n use_effective_order=False) ->BLEU:\n \"\"\"Produces BLEU scores along with its sufficient statistics from a source against one or more references.\n\n :param sys_stream: The system stream (a sequence of segments)\n :param ref_streams: A list of one or more reference streams (each a sequence of segments)\n :param smooth: The smoothing method to use\n :param smooth_value: For 'floor' smoothing, the floor to use\n :param force: Ignore data that looks already tokenized\n :param lowercase: Lowercase the data\n :param tokenize: The tokenizer to use\n :return: a BLEU object containing everything you'd want\n \"\"\"\n if isinstance(sys_stream, str):\n sys_stream = [sys_stream]\n if isinstance(ref_streams, str):\n ref_streams = [[ref_streams]]\n sys_len = 0\n ref_len = 0\n correct = [(0) for n in range(NGRAM_ORDER)]\n total = [(0) for n in range(NGRAM_ORDER)]\n tokenized_count = 0\n fhs = [sys_stream] + ref_streams\n for lines in zip_longest(*fhs):\n if None in lines:\n raise EOFError(\n 'Source and reference streams have different lengths!')\n if lowercase:\n lines = [x.lower() for x in lines]\n if not (force or tokenize == 'none') and lines[0].rstrip().endswith(\n ' .'):\n tokenized_count += 1\n if tokenized_count == 100:\n sacrelogger.warning(\n \"That's 100 lines that end in a tokenized period ('.')\")\n sacrelogger.warning(\n 'It looks like you forgot to detokenize your test data, which may hurt your score.'\n )\n sacrelogger.warning(\n \"If you insist your data is detokenized, or don't care, you can suppress this message with '--force'.\"\n )\n output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]\n ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)\n sys_len += len(output.split())\n ref_len += closest_len\n sys_ngrams = extract_ngrams(output)\n for ngram in sys_ngrams.keys():\n n = len(ngram.split())\n correct[n - 1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))\n total[n - 1] += sys_ngrams[ngram]\n return compute_bleu(correct, total, sys_len, ref_len, smooth_method=\n smooth_method, smooth_value=smooth_value, use_effective_order=\n use_effective_order)\n\n\ndef raw_corpus_bleu(sys_stream, ref_streams, smooth_value=None) ->BLEU:\n \"\"\"Convenience function that wraps corpus_bleu().\n This is convenient if you're using sacrebleu as a library, say for scoring on dev.\n It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).\n\n :param sys_stream: the system stream (a sequence of segments)\n :param ref_streams: a list of one or more reference streams (each a sequence of segments)\n \"\"\"\n return corpus_bleu(sys_stream, ref_streams, smooth_method='floor',\n smooth_value=smooth_value, force=True, tokenize='none',\n use_effective_order=True)\n\n\ndef delete_whitespace(text: str) ->str:\n \"\"\"\n Removes whitespaces from text.\n \"\"\"\n return re.sub('\\\\s+', '', text).strip()\n\n\n<mask token>\n\n\ndef _chrf(avg_precision, avg_recall, beta: int=CHRF_BETA) ->float:\n if avg_precision + avg_recall == 0:\n return 0.0\n beta_square = beta ** 2\n score = (1 + beta_square) * (avg_precision * avg_recall) / (beta_square *\n avg_precision + avg_recall)\n return score\n\n\ndef corpus_chrf(hypotheses: Iterable[str], references: Iterable[str], order:\n int=CHRF_ORDER, beta: float=CHRF_BETA, remove_whitespace: bool=True\n ) ->CHRF:\n \"\"\"\n Computes Chrf on a corpus.\n\n :param hypotheses: Stream of hypotheses.\n :param references: Stream of references\n :param order: Maximum n-gram order.\n :param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.\n :param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.\n :return: Chrf score.\n \"\"\"\n corpus_statistics = get_corpus_statistics(hypotheses, references, order\n =order, remove_whitespace=remove_whitespace)\n avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics,\n order)\n return CHRF(_chrf(avg_precision, avg_recall, beta=beta))\n\n\n<mask token>\n\n\ndef get_langpairs_for_testset(testset: str) ->List:\n \"\"\"Return a list of language pairs for a given test set.\"\"\"\n return list(filter(lambda x: re.match('\\\\w\\\\w\\\\-\\\\w\\\\w', x), DATASETS.\n get(testset, {}).keys()))\n\n\ndef get_a_list_of_testset_names() ->str:\n \"\"\"Return a string with a formatted list of available test sets plus their descriptions. \"\"\"\n message = 'The available test sets are:'\n for testset in sorted(DATASETS.keys(), reverse=True):\n message += '\\n%20s: %s' % (testset, DATASETS[testset].get(\n 'description', ''))\n return message\n\n\n<mask token>\n\n\ndef _filter_subset(systems, test_sets, langpair, origlang, subset=None):\n \"\"\"Filter sentences with a given origlang (or subset) according to the raw SGM files.\"\"\"\n if origlang is None and subset is None:\n return systems\n if test_sets is None or langpair is None:\n raise ValueError(\n 'Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).'\n )\n indices_to_keep = []\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[\n test_set][langpair][0])\n if not rawfile.endswith('.sgm'):\n raise Exception(\n '--origlang and --subset supports only *.sgm files, not %s',\n rawfile)\n if subset is not None:\n if test_set not in SUBSETS:\n raise Exception(\n 'No subset annotation available for test set ' + test_set)\n doc_to_tags = SUBSETS[test_set]\n number_sentences_included = 0\n with smart_open(rawfile) as fin:\n include_doc = False\n for line in fin:\n if line.startswith('<doc '):\n if origlang is None:\n include_doc = True\n else:\n doc_origlang = re.sub('.* origlang=\"([^\"]+)\".*\\\\n',\n '\\\\1', line)\n if origlang.startswith('non-'):\n include_doc = doc_origlang != origlang[4:]\n else:\n include_doc = doc_origlang == origlang\n if subset is not None:\n doc_id = re.sub('.* docid=\"([^\"]+)\".*\\\\n', '\\\\1', line)\n if not re.search(subset, doc_to_tags.get(doc_id, '')):\n include_doc = False\n if line.startswith('<seg '):\n indices_to_keep.append(include_doc)\n number_sentences_included += 1 if include_doc else 0\n return [[sentence for sentence, keep in zip(sys, indices_to_keep) if\n keep] for sys in systems]\n\n\n<mask token>\n\n\ndef parse_args():\n arg_parser = argparse.ArgumentParser(description=\n \"\"\"sacreBLEU: Hassle-free computation of shareable BLEU scores.\nQuick usage: score your detokenized output against WMT'14 EN-DE:\n cat output.detok.de | sacrebleu -t wmt14 -l en-de\"\"\"\n , formatter_class=argparse.RawDescriptionHelpFormatter)\n arg_parser.add_argument('--test-set', '-t', type=str, default=None,\n help=\n 'the test set to use (see also --list) or a comma-separated list of test sets to be concatenated'\n )\n arg_parser.add_argument('-lc', action='store_true', default=False, help\n ='Use case-insensitive BLEU (default: actual case)')\n arg_parser.add_argument('--sentence-level', '-sl', action='store_true',\n help='Output metric on each sentence.')\n arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor',\n 'add-k', 'none'], default='exp', help=\n 'smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none'\n )\n arg_parser.add_argument('--smooth-value', '-sv', type=float, default=\n None, help=\n 'The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'\n .format(SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))\n arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(),\n default=None, help='tokenization method to use')\n arg_parser.add_argument('--language-pair', '-l', dest='langpair',\n default=None, help=\n 'source-target language pair (2-char ISO639-1 codes)')\n arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=\n None, help=\n 'use a subset of sentences with a given original language (2-char ISO639-1 codes), \"non-\" prefix means negation'\n )\n arg_parser.add_argument('--subset', dest='subset', default=None, help=\n 'use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)'\n )\n arg_parser.add_argument('--download', type=str, default=None, help=\n 'download a test set and quit')\n arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=\n str, default=None, help=\n 'output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit'\n )\n arg_parser.add_argument('--input', '-i', type=str, default='-', help=\n 'Read input from a file instead of STDIN')\n arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, help=\n 'Split the reference stream on tabs, and expect this many references. Default: %(default)s.'\n )\n arg_parser.add_argument('refs', nargs='*', default=[], help=\n 'optional list of references (for backwards-compatibility with older scripts)'\n )\n arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'],\n nargs='+', default=['bleu'], help='metrics to compute (default: bleu)')\n arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,\n help='chrf character order (default: %(default)s)')\n arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,\n help='chrf BETA parameter (default: %(default)s)')\n arg_parser.add_argument('--chrf-whitespace', action='store_true',\n default=False, help=\n 'include whitespace in chrF calculation (default: %(default)s)')\n arg_parser.add_argument('--short', default=False, action='store_true',\n help='produce a shorter (less human readable) signature')\n arg_parser.add_argument('--score-only', '-b', default=False, action=\n 'store_true', help='output only the BLEU score')\n arg_parser.add_argument('--force', default=False, action='store_true',\n help='insist that your tokenized input is actually detokenized')\n arg_parser.add_argument('--quiet', '-q', default=False, action=\n 'store_true', help='suppress informative output')\n arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',\n help='open text files with specified encoding (default: %(default)s)')\n arg_parser.add_argument('--list', default=False, action='store_true',\n help='print a list of all available test sets.')\n arg_parser.add_argument('--citation', '--cite', default=False, action=\n 'store_true', help='dump the bibtex citation and quit.')\n arg_parser.add_argument('--width', '-w', type=int, default=1, help=\n 'floating point width (default: %(default)s)')\n arg_parser.add_argument('--detail', '-d', default=False, action=\n 'store_true', help=\n 'print extra information (split test sets based on origlang)')\n arg_parser.add_argument('-V', '--version', action='version', version=\n '%(prog)s {}'.format(VERSION))\n args = arg_parser.parse_args()\n return args\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef smart_open(file, mode='rt', encoding='utf-8'):\n \"\"\"Convenience function for reading compressed or plain text files.\n :param file: The file to read.\n :param mode: The file mode (read, write).\n :param encoding: The file encoding.\n \"\"\"\n if file.endswith('.gz'):\n return gzip.open(file, mode=mode, encoding=encoding, newline='\\n')\n return open(file, mode=mode, encoding=encoding, newline='\\n')\n\n\ndef my_log(num):\n \"\"\"\n Floors the log function\n\n :param num: the number\n :return: log(num) floored to a very low number\n \"\"\"\n if num == 0.0:\n return -9999999999\n return math.log(num)\n\n\ndef bleu_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'smooth': 's', 'case': 'c', 'tok':\n 'tok', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'tok': args.tokenize, 'version': VERSION, 'smooth': args.\n smooth, 'numrefs': numrefs, 'case': 'lc' if args.lc else 'mixed'}\n if args.tokenize == 'ja-mecab':\n signature['tok'] += '-' + TokenizeMeCab().signature()\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\ndef chrf_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the chrF signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'numchars': 'n', 'space': 's', 'case':\n 'c', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'version': VERSION, 'space': args.chrf_whitespace,\n 'numchars': args.chrf_order, 'numrefs': numrefs, 'case': 'lc' if\n args.lc else 'mixed'}\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\n<mask token>\n\n\ndef extract_char_ngrams(s: str, n: int) ->Counter:\n \"\"\"\n Yields counts of character n-grams from string s of order n.\n \"\"\"\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])\n\n\ndef ref_stats(output, refs):\n ngrams = Counter()\n closest_diff = None\n closest_len = None\n for ref in refs:\n tokens = ref.split()\n reflen = len(tokens)\n diff = abs(len(output.split()) - reflen)\n if closest_diff is None or diff < closest_diff:\n closest_diff = diff\n closest_len = reflen\n elif diff == closest_diff:\n if reflen < closest_len:\n closest_len = reflen\n ngrams_ref = extract_ngrams(ref)\n for ngram in ngrams_ref.keys():\n ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])\n return ngrams, closest_diff, closest_len\n\n\ndef _clean(s):\n \"\"\"\n Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.\n\n :param s: The string.\n :return: A cleaned-up string.\n \"\"\"\n return re.sub('\\\\s+', ' ', s.strip())\n\n\ndef process_to_text(rawfile, txtfile, field: int=None):\n \"\"\"Processes raw files to plain text files.\n :param rawfile: the input file (possibly SGML)\n :param txtfile: the plaintext file\n :param field: For TSV files, which field to extract.\n \"\"\"\n if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:\n sacrelogger.info('Processing %s to %s', rawfile, txtfile)\n if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.xml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.txt'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip(), file=fout)\n elif rawfile.endswith('.tsv'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip().split('\\t')[field], file=fout)\n\n\ndef print_test_set(test_set, langpair, side, origlang=None, subset=None):\n \"\"\"Prints to STDOUT the specified side of the specified test set\n :param test_set: the test set to print\n :param langpair: the language pair\n :param side: 'src' for source, 'ref' for reference\n :param origlang: print only sentences with a given original language (2-char ISO639-1 code), \"non-\" prefix means negation\n :param subset: print only sentences whose document annotation matches a given regex\n \"\"\"\n files = download_test_set(test_set, langpair)\n if side == 'src':\n files = [files[0]]\n elif side == 'ref':\n files.pop(0)\n streams = [smart_open(file) for file in files]\n streams = _filter_subset(streams, test_set, langpair, origlang, subset)\n for lines in zip(*streams):\n print('\\t'.join(map(lambda x: x.rstrip(), lines)))\n\n\ndef download_test_set(test_set, langpair=None):\n \"\"\"Downloads the specified test to the system location specified by the SACREBLEU environment variable.\n\n :param test_set: the test set to download\n :param langpair: the language pair (needed for some datasets)\n :return: the set of processed files\n \"\"\"\n outdir = os.path.join(SACREBLEU_DIR, test_set)\n os.makedirs(outdir, exist_ok=True)\n expected_checksums = DATASETS[test_set].get('md5', [None] * len(\n DATASETS[test_set]))\n for dataset, expected_md5 in zip(DATASETS[test_set]['data'],\n expected_checksums):\n tarball = os.path.join(outdir, os.path.basename(dataset))\n rawdir = os.path.join(outdir, 'raw')\n lockfile = '{}.lock'.format(tarball)\n with portalocker.Lock(lockfile, 'w', timeout=60):\n if not os.path.exists(tarball) or os.path.getsize(tarball) == 0:\n sacrelogger.info('Downloading %s to %s', dataset, tarball)\n try:\n with urllib.request.urlopen(dataset) as f, open(tarball,\n 'wb') as out:\n out.write(f.read())\n except ssl.SSLError:\n sacrelogger.warning(\n 'An SSL error was encountered in downloading the files. If you\\'re on a Mac, you may need to run the \"Install Certificates.command\" file located in the \"Python 3\" folder, often found under /Applications'\n )\n sys.exit(1)\n if expected_md5 is not None:\n md5 = hashlib.md5()\n with open(tarball, 'rb') as infile:\n for line in infile:\n md5.update(line)\n if md5.hexdigest() != expected_md5:\n sacrelogger.error(\n 'Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'\n .format(md5.hexdigest(), expected_md5))\n sacrelogger.error(\n 'Please manually delete \"{}\" and rerun the command.'\n .format(tarball))\n sacrelogger.error(\n 'If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.'\n )\n sys.exit(1)\n else:\n sacrelogger.info('Checksum passed: {}'.format(md5.\n hexdigest()))\n sacrelogger.info('Extracting %s', tarball)\n if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'):\n import tarfile\n with tarfile.open(tarball) as tar:\n tar.extractall(path=rawdir)\n elif tarball.endswith('.zip'):\n import zipfile\n with zipfile.ZipFile(tarball, 'r') as zipfile:\n zipfile.extractall(path=rawdir)\n found = []\n languages = DATASETS[test_set].keys() if langpair is None else [langpair]\n for pair in languages:\n if '-' not in pair:\n continue\n src, tgt = pair.split('-')\n rawfile = DATASETS[test_set][pair][0]\n field = None\n if rawfile.endswith('.tsv'):\n field, rawfile = rawfile.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, rawfile)\n outpath = os.path.join(outdir, '{}.{}'.format(pair, src))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n refs = DATASETS[test_set][pair][1:]\n for i, ref in enumerate(refs):\n field = None\n if ref.endswith('.tsv'):\n field, ref = ref.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, ref)\n if len(refs) >= 2:\n outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))\n else:\n outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n return found\n\n\nclass Result:\n\n def __init__(self, score: float):\n self.score = score\n\n def __str__(self):\n return self.format()\n\n\nclass BLEU(Result):\n\n def __init__(self, score: float, counts, totals, precisions, bp,\n sys_len, ref_len):\n super().__init__(score)\n self.counts = counts\n self.totals = totals\n self.precisions = precisions\n self.bp = bp\n self.sys_len = sys_len\n self.ref_len = ref_len\n\n def format(self, width=2):\n precisions = '/'.join(['{:.1f}'.format(p) for p in self.precisions])\n return (\n 'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'\n .format(score=self.score, width=width, precisions=precisions,\n bp=self.bp, ratio=self.sys_len / self.ref_len, sys_len=self.\n sys_len, ref_len=self.ref_len))\n\n\nclass CHRF(Result):\n\n def __init__(self, score: float):\n super().__init__(score)\n\n def format(self, width=2):\n return '{score:.{width}f}'.format(score=self.score, width=width)\n\n\n<mask token>\n\n\ndef sentence_bleu(hypothesis: str, references: List[str], smooth_method:\n str='floor', smooth_value: float=None, use_effective_order: bool=True\n ) ->BLEU:\n \"\"\"\n Computes BLEU on a single sentence pair.\n\n Disclaimer: computing BLEU on the sentence level is not its intended use,\n BLEU is a corpus-level metric.\n\n :param hypothesis: Hypothesis string.\n :param reference: Reference string.\n :param smooth_value: For 'floor' smoothing, the floor value to use.\n :param use_effective_order: Account for references that are shorter than the largest n-gram.\n :return: Returns a single BLEU score as a float.\n \"\"\"\n bleu = corpus_bleu(hypothesis, references, smooth_method=smooth_method,\n smooth_value=smooth_value, use_effective_order=use_effective_order)\n return bleu\n\n\ndef corpus_bleu(sys_stream: Union[str, Iterable[str]], ref_streams: Union[\n str, List[Iterable[str]]], smooth_method='exp', smooth_value=None,\n force=False, lowercase=False, tokenize=DEFAULT_TOKENIZER,\n use_effective_order=False) ->BLEU:\n \"\"\"Produces BLEU scores along with its sufficient statistics from a source against one or more references.\n\n :param sys_stream: The system stream (a sequence of segments)\n :param ref_streams: A list of one or more reference streams (each a sequence of segments)\n :param smooth: The smoothing method to use\n :param smooth_value: For 'floor' smoothing, the floor to use\n :param force: Ignore data that looks already tokenized\n :param lowercase: Lowercase the data\n :param tokenize: The tokenizer to use\n :return: a BLEU object containing everything you'd want\n \"\"\"\n if isinstance(sys_stream, str):\n sys_stream = [sys_stream]\n if isinstance(ref_streams, str):\n ref_streams = [[ref_streams]]\n sys_len = 0\n ref_len = 0\n correct = [(0) for n in range(NGRAM_ORDER)]\n total = [(0) for n in range(NGRAM_ORDER)]\n tokenized_count = 0\n fhs = [sys_stream] + ref_streams\n for lines in zip_longest(*fhs):\n if None in lines:\n raise EOFError(\n 'Source and reference streams have different lengths!')\n if lowercase:\n lines = [x.lower() for x in lines]\n if not (force or tokenize == 'none') and lines[0].rstrip().endswith(\n ' .'):\n tokenized_count += 1\n if tokenized_count == 100:\n sacrelogger.warning(\n \"That's 100 lines that end in a tokenized period ('.')\")\n sacrelogger.warning(\n 'It looks like you forgot to detokenize your test data, which may hurt your score.'\n )\n sacrelogger.warning(\n \"If you insist your data is detokenized, or don't care, you can suppress this message with '--force'.\"\n )\n output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]\n ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)\n sys_len += len(output.split())\n ref_len += closest_len\n sys_ngrams = extract_ngrams(output)\n for ngram in sys_ngrams.keys():\n n = len(ngram.split())\n correct[n - 1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))\n total[n - 1] += sys_ngrams[ngram]\n return compute_bleu(correct, total, sys_len, ref_len, smooth_method=\n smooth_method, smooth_value=smooth_value, use_effective_order=\n use_effective_order)\n\n\ndef raw_corpus_bleu(sys_stream, ref_streams, smooth_value=None) ->BLEU:\n \"\"\"Convenience function that wraps corpus_bleu().\n This is convenient if you're using sacrebleu as a library, say for scoring on dev.\n It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).\n\n :param sys_stream: the system stream (a sequence of segments)\n :param ref_streams: a list of one or more reference streams (each a sequence of segments)\n \"\"\"\n return corpus_bleu(sys_stream, ref_streams, smooth_method='floor',\n smooth_value=smooth_value, force=True, tokenize='none',\n use_effective_order=True)\n\n\ndef delete_whitespace(text: str) ->str:\n \"\"\"\n Removes whitespaces from text.\n \"\"\"\n return re.sub('\\\\s+', '', text).strip()\n\n\n<mask token>\n\n\ndef _chrf(avg_precision, avg_recall, beta: int=CHRF_BETA) ->float:\n if avg_precision + avg_recall == 0:\n return 0.0\n beta_square = beta ** 2\n score = (1 + beta_square) * (avg_precision * avg_recall) / (beta_square *\n avg_precision + avg_recall)\n return score\n\n\ndef corpus_chrf(hypotheses: Iterable[str], references: Iterable[str], order:\n int=CHRF_ORDER, beta: float=CHRF_BETA, remove_whitespace: bool=True\n ) ->CHRF:\n \"\"\"\n Computes Chrf on a corpus.\n\n :param hypotheses: Stream of hypotheses.\n :param references: Stream of references\n :param order: Maximum n-gram order.\n :param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.\n :param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.\n :return: Chrf score.\n \"\"\"\n corpus_statistics = get_corpus_statistics(hypotheses, references, order\n =order, remove_whitespace=remove_whitespace)\n avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics,\n order)\n return CHRF(_chrf(avg_precision, avg_recall, beta=beta))\n\n\n<mask token>\n\n\ndef get_langpairs_for_testset(testset: str) ->List:\n \"\"\"Return a list of language pairs for a given test set.\"\"\"\n return list(filter(lambda x: re.match('\\\\w\\\\w\\\\-\\\\w\\\\w', x), DATASETS.\n get(testset, {}).keys()))\n\n\ndef get_a_list_of_testset_names() ->str:\n \"\"\"Return a string with a formatted list of available test sets plus their descriptions. \"\"\"\n message = 'The available test sets are:'\n for testset in sorted(DATASETS.keys(), reverse=True):\n message += '\\n%20s: %s' % (testset, DATASETS[testset].get(\n 'description', ''))\n return message\n\n\n<mask token>\n\n\ndef _filter_subset(systems, test_sets, langpair, origlang, subset=None):\n \"\"\"Filter sentences with a given origlang (or subset) according to the raw SGM files.\"\"\"\n if origlang is None and subset is None:\n return systems\n if test_sets is None or langpair is None:\n raise ValueError(\n 'Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).'\n )\n indices_to_keep = []\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[\n test_set][langpair][0])\n if not rawfile.endswith('.sgm'):\n raise Exception(\n '--origlang and --subset supports only *.sgm files, not %s',\n rawfile)\n if subset is not None:\n if test_set not in SUBSETS:\n raise Exception(\n 'No subset annotation available for test set ' + test_set)\n doc_to_tags = SUBSETS[test_set]\n number_sentences_included = 0\n with smart_open(rawfile) as fin:\n include_doc = False\n for line in fin:\n if line.startswith('<doc '):\n if origlang is None:\n include_doc = True\n else:\n doc_origlang = re.sub('.* origlang=\"([^\"]+)\".*\\\\n',\n '\\\\1', line)\n if origlang.startswith('non-'):\n include_doc = doc_origlang != origlang[4:]\n else:\n include_doc = doc_origlang == origlang\n if subset is not None:\n doc_id = re.sub('.* docid=\"([^\"]+)\".*\\\\n', '\\\\1', line)\n if not re.search(subset, doc_to_tags.get(doc_id, '')):\n include_doc = False\n if line.startswith('<seg '):\n indices_to_keep.append(include_doc)\n number_sentences_included += 1 if include_doc else 0\n return [[sentence for sentence, keep in zip(sys, indices_to_keep) if\n keep] for sys in systems]\n\n\n<mask token>\n\n\ndef parse_args():\n arg_parser = argparse.ArgumentParser(description=\n \"\"\"sacreBLEU: Hassle-free computation of shareable BLEU scores.\nQuick usage: score your detokenized output against WMT'14 EN-DE:\n cat output.detok.de | sacrebleu -t wmt14 -l en-de\"\"\"\n , formatter_class=argparse.RawDescriptionHelpFormatter)\n arg_parser.add_argument('--test-set', '-t', type=str, default=None,\n help=\n 'the test set to use (see also --list) or a comma-separated list of test sets to be concatenated'\n )\n arg_parser.add_argument('-lc', action='store_true', default=False, help\n ='Use case-insensitive BLEU (default: actual case)')\n arg_parser.add_argument('--sentence-level', '-sl', action='store_true',\n help='Output metric on each sentence.')\n arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor',\n 'add-k', 'none'], default='exp', help=\n 'smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none'\n )\n arg_parser.add_argument('--smooth-value', '-sv', type=float, default=\n None, help=\n 'The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'\n .format(SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))\n arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(),\n default=None, help='tokenization method to use')\n arg_parser.add_argument('--language-pair', '-l', dest='langpair',\n default=None, help=\n 'source-target language pair (2-char ISO639-1 codes)')\n arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=\n None, help=\n 'use a subset of sentences with a given original language (2-char ISO639-1 codes), \"non-\" prefix means negation'\n )\n arg_parser.add_argument('--subset', dest='subset', default=None, help=\n 'use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)'\n )\n arg_parser.add_argument('--download', type=str, default=None, help=\n 'download a test set and quit')\n arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=\n str, default=None, help=\n 'output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit'\n )\n arg_parser.add_argument('--input', '-i', type=str, default='-', help=\n 'Read input from a file instead of STDIN')\n arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, help=\n 'Split the reference stream on tabs, and expect this many references. Default: %(default)s.'\n )\n arg_parser.add_argument('refs', nargs='*', default=[], help=\n 'optional list of references (for backwards-compatibility with older scripts)'\n )\n arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'],\n nargs='+', default=['bleu'], help='metrics to compute (default: bleu)')\n arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,\n help='chrf character order (default: %(default)s)')\n arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,\n help='chrf BETA parameter (default: %(default)s)')\n arg_parser.add_argument('--chrf-whitespace', action='store_true',\n default=False, help=\n 'include whitespace in chrF calculation (default: %(default)s)')\n arg_parser.add_argument('--short', default=False, action='store_true',\n help='produce a shorter (less human readable) signature')\n arg_parser.add_argument('--score-only', '-b', default=False, action=\n 'store_true', help='output only the BLEU score')\n arg_parser.add_argument('--force', default=False, action='store_true',\n help='insist that your tokenized input is actually detokenized')\n arg_parser.add_argument('--quiet', '-q', default=False, action=\n 'store_true', help='suppress informative output')\n arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',\n help='open text files with specified encoding (default: %(default)s)')\n arg_parser.add_argument('--list', default=False, action='store_true',\n help='print a list of all available test sets.')\n arg_parser.add_argument('--citation', '--cite', default=False, action=\n 'store_true', help='dump the bibtex citation and quit.')\n arg_parser.add_argument('--width', '-w', type=int, default=1, help=\n 'floating point width (default: %(default)s)')\n arg_parser.add_argument('--detail', '-d', default=False, action=\n 'store_true', help=\n 'print extra information (split test sets based on origlang)')\n arg_parser.add_argument('-V', '--version', action='version', version=\n '%(prog)s {}'.format(VERSION))\n args = arg_parser.parse_args()\n return args\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef smart_open(file, mode='rt', encoding='utf-8'):\n \"\"\"Convenience function for reading compressed or plain text files.\n :param file: The file to read.\n :param mode: The file mode (read, write).\n :param encoding: The file encoding.\n \"\"\"\n if file.endswith('.gz'):\n return gzip.open(file, mode=mode, encoding=encoding, newline='\\n')\n return open(file, mode=mode, encoding=encoding, newline='\\n')\n\n\ndef my_log(num):\n \"\"\"\n Floors the log function\n\n :param num: the number\n :return: log(num) floored to a very low number\n \"\"\"\n if num == 0.0:\n return -9999999999\n return math.log(num)\n\n\ndef bleu_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'smooth': 's', 'case': 'c', 'tok':\n 'tok', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'tok': args.tokenize, 'version': VERSION, 'smooth': args.\n smooth, 'numrefs': numrefs, 'case': 'lc' if args.lc else 'mixed'}\n if args.tokenize == 'ja-mecab':\n signature['tok'] += '-' + TokenizeMeCab().signature()\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\ndef chrf_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the chrF signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'numchars': 'n', 'space': 's', 'case':\n 'c', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'version': VERSION, 'space': args.chrf_whitespace,\n 'numchars': args.chrf_order, 'numrefs': numrefs, 'case': 'lc' if\n args.lc else 'mixed'}\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\ndef extract_ngrams(line, min_order=1, max_order=NGRAM_ORDER) ->Counter:\n \"\"\"Extracts all the ngrams (min_order <= n <= max_order) from a sequence of tokens.\n\n :param line: A segment containing a sequence of words.\n :param min_order: Minimum n-gram length (default: 1).\n :param max_order: Maximum n-gram length (default: NGRAM_ORDER).\n :return: a dictionary containing ngrams and counts\n \"\"\"\n ngrams = Counter()\n tokens = line.split()\n for n in range(min_order, max_order + 1):\n for i in range(0, len(tokens) - n + 1):\n ngram = ' '.join(tokens[i:i + n])\n ngrams[ngram] += 1\n return ngrams\n\n\ndef extract_char_ngrams(s: str, n: int) ->Counter:\n \"\"\"\n Yields counts of character n-grams from string s of order n.\n \"\"\"\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])\n\n\ndef ref_stats(output, refs):\n ngrams = Counter()\n closest_diff = None\n closest_len = None\n for ref in refs:\n tokens = ref.split()\n reflen = len(tokens)\n diff = abs(len(output.split()) - reflen)\n if closest_diff is None or diff < closest_diff:\n closest_diff = diff\n closest_len = reflen\n elif diff == closest_diff:\n if reflen < closest_len:\n closest_len = reflen\n ngrams_ref = extract_ngrams(ref)\n for ngram in ngrams_ref.keys():\n ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])\n return ngrams, closest_diff, closest_len\n\n\ndef _clean(s):\n \"\"\"\n Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.\n\n :param s: The string.\n :return: A cleaned-up string.\n \"\"\"\n return re.sub('\\\\s+', ' ', s.strip())\n\n\ndef process_to_text(rawfile, txtfile, field: int=None):\n \"\"\"Processes raw files to plain text files.\n :param rawfile: the input file (possibly SGML)\n :param txtfile: the plaintext file\n :param field: For TSV files, which field to extract.\n \"\"\"\n if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:\n sacrelogger.info('Processing %s to %s', rawfile, txtfile)\n if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.xml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.txt'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip(), file=fout)\n elif rawfile.endswith('.tsv'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip().split('\\t')[field], file=fout)\n\n\ndef print_test_set(test_set, langpair, side, origlang=None, subset=None):\n \"\"\"Prints to STDOUT the specified side of the specified test set\n :param test_set: the test set to print\n :param langpair: the language pair\n :param side: 'src' for source, 'ref' for reference\n :param origlang: print only sentences with a given original language (2-char ISO639-1 code), \"non-\" prefix means negation\n :param subset: print only sentences whose document annotation matches a given regex\n \"\"\"\n files = download_test_set(test_set, langpair)\n if side == 'src':\n files = [files[0]]\n elif side == 'ref':\n files.pop(0)\n streams = [smart_open(file) for file in files]\n streams = _filter_subset(streams, test_set, langpair, origlang, subset)\n for lines in zip(*streams):\n print('\\t'.join(map(lambda x: x.rstrip(), lines)))\n\n\ndef download_test_set(test_set, langpair=None):\n \"\"\"Downloads the specified test to the system location specified by the SACREBLEU environment variable.\n\n :param test_set: the test set to download\n :param langpair: the language pair (needed for some datasets)\n :return: the set of processed files\n \"\"\"\n outdir = os.path.join(SACREBLEU_DIR, test_set)\n os.makedirs(outdir, exist_ok=True)\n expected_checksums = DATASETS[test_set].get('md5', [None] * len(\n DATASETS[test_set]))\n for dataset, expected_md5 in zip(DATASETS[test_set]['data'],\n expected_checksums):\n tarball = os.path.join(outdir, os.path.basename(dataset))\n rawdir = os.path.join(outdir, 'raw')\n lockfile = '{}.lock'.format(tarball)\n with portalocker.Lock(lockfile, 'w', timeout=60):\n if not os.path.exists(tarball) or os.path.getsize(tarball) == 0:\n sacrelogger.info('Downloading %s to %s', dataset, tarball)\n try:\n with urllib.request.urlopen(dataset) as f, open(tarball,\n 'wb') as out:\n out.write(f.read())\n except ssl.SSLError:\n sacrelogger.warning(\n 'An SSL error was encountered in downloading the files. If you\\'re on a Mac, you may need to run the \"Install Certificates.command\" file located in the \"Python 3\" folder, often found under /Applications'\n )\n sys.exit(1)\n if expected_md5 is not None:\n md5 = hashlib.md5()\n with open(tarball, 'rb') as infile:\n for line in infile:\n md5.update(line)\n if md5.hexdigest() != expected_md5:\n sacrelogger.error(\n 'Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'\n .format(md5.hexdigest(), expected_md5))\n sacrelogger.error(\n 'Please manually delete \"{}\" and rerun the command.'\n .format(tarball))\n sacrelogger.error(\n 'If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.'\n )\n sys.exit(1)\n else:\n sacrelogger.info('Checksum passed: {}'.format(md5.\n hexdigest()))\n sacrelogger.info('Extracting %s', tarball)\n if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'):\n import tarfile\n with tarfile.open(tarball) as tar:\n tar.extractall(path=rawdir)\n elif tarball.endswith('.zip'):\n import zipfile\n with zipfile.ZipFile(tarball, 'r') as zipfile:\n zipfile.extractall(path=rawdir)\n found = []\n languages = DATASETS[test_set].keys() if langpair is None else [langpair]\n for pair in languages:\n if '-' not in pair:\n continue\n src, tgt = pair.split('-')\n rawfile = DATASETS[test_set][pair][0]\n field = None\n if rawfile.endswith('.tsv'):\n field, rawfile = rawfile.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, rawfile)\n outpath = os.path.join(outdir, '{}.{}'.format(pair, src))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n refs = DATASETS[test_set][pair][1:]\n for i, ref in enumerate(refs):\n field = None\n if ref.endswith('.tsv'):\n field, ref = ref.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, ref)\n if len(refs) >= 2:\n outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))\n else:\n outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n return found\n\n\nclass Result:\n\n def __init__(self, score: float):\n self.score = score\n\n def __str__(self):\n return self.format()\n\n\nclass BLEU(Result):\n\n def __init__(self, score: float, counts, totals, precisions, bp,\n sys_len, ref_len):\n super().__init__(score)\n self.counts = counts\n self.totals = totals\n self.precisions = precisions\n self.bp = bp\n self.sys_len = sys_len\n self.ref_len = ref_len\n\n def format(self, width=2):\n precisions = '/'.join(['{:.1f}'.format(p) for p in self.precisions])\n return (\n 'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'\n .format(score=self.score, width=width, precisions=precisions,\n bp=self.bp, ratio=self.sys_len / self.ref_len, sys_len=self.\n sys_len, ref_len=self.ref_len))\n\n\nclass CHRF(Result):\n\n def __init__(self, score: float):\n super().__init__(score)\n\n def format(self, width=2):\n return '{score:.{width}f}'.format(score=self.score, width=width)\n\n\n<mask token>\n\n\ndef sentence_bleu(hypothesis: str, references: List[str], smooth_method:\n str='floor', smooth_value: float=None, use_effective_order: bool=True\n ) ->BLEU:\n \"\"\"\n Computes BLEU on a single sentence pair.\n\n Disclaimer: computing BLEU on the sentence level is not its intended use,\n BLEU is a corpus-level metric.\n\n :param hypothesis: Hypothesis string.\n :param reference: Reference string.\n :param smooth_value: For 'floor' smoothing, the floor value to use.\n :param use_effective_order: Account for references that are shorter than the largest n-gram.\n :return: Returns a single BLEU score as a float.\n \"\"\"\n bleu = corpus_bleu(hypothesis, references, smooth_method=smooth_method,\n smooth_value=smooth_value, use_effective_order=use_effective_order)\n return bleu\n\n\ndef corpus_bleu(sys_stream: Union[str, Iterable[str]], ref_streams: Union[\n str, List[Iterable[str]]], smooth_method='exp', smooth_value=None,\n force=False, lowercase=False, tokenize=DEFAULT_TOKENIZER,\n use_effective_order=False) ->BLEU:\n \"\"\"Produces BLEU scores along with its sufficient statistics from a source against one or more references.\n\n :param sys_stream: The system stream (a sequence of segments)\n :param ref_streams: A list of one or more reference streams (each a sequence of segments)\n :param smooth: The smoothing method to use\n :param smooth_value: For 'floor' smoothing, the floor to use\n :param force: Ignore data that looks already tokenized\n :param lowercase: Lowercase the data\n :param tokenize: The tokenizer to use\n :return: a BLEU object containing everything you'd want\n \"\"\"\n if isinstance(sys_stream, str):\n sys_stream = [sys_stream]\n if isinstance(ref_streams, str):\n ref_streams = [[ref_streams]]\n sys_len = 0\n ref_len = 0\n correct = [(0) for n in range(NGRAM_ORDER)]\n total = [(0) for n in range(NGRAM_ORDER)]\n tokenized_count = 0\n fhs = [sys_stream] + ref_streams\n for lines in zip_longest(*fhs):\n if None in lines:\n raise EOFError(\n 'Source and reference streams have different lengths!')\n if lowercase:\n lines = [x.lower() for x in lines]\n if not (force or tokenize == 'none') and lines[0].rstrip().endswith(\n ' .'):\n tokenized_count += 1\n if tokenized_count == 100:\n sacrelogger.warning(\n \"That's 100 lines that end in a tokenized period ('.')\")\n sacrelogger.warning(\n 'It looks like you forgot to detokenize your test data, which may hurt your score.'\n )\n sacrelogger.warning(\n \"If you insist your data is detokenized, or don't care, you can suppress this message with '--force'.\"\n )\n output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]\n ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)\n sys_len += len(output.split())\n ref_len += closest_len\n sys_ngrams = extract_ngrams(output)\n for ngram in sys_ngrams.keys():\n n = len(ngram.split())\n correct[n - 1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))\n total[n - 1] += sys_ngrams[ngram]\n return compute_bleu(correct, total, sys_len, ref_len, smooth_method=\n smooth_method, smooth_value=smooth_value, use_effective_order=\n use_effective_order)\n\n\ndef raw_corpus_bleu(sys_stream, ref_streams, smooth_value=None) ->BLEU:\n \"\"\"Convenience function that wraps corpus_bleu().\n This is convenient if you're using sacrebleu as a library, say for scoring on dev.\n It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).\n\n :param sys_stream: the system stream (a sequence of segments)\n :param ref_streams: a list of one or more reference streams (each a sequence of segments)\n \"\"\"\n return corpus_bleu(sys_stream, ref_streams, smooth_method='floor',\n smooth_value=smooth_value, force=True, tokenize='none',\n use_effective_order=True)\n\n\ndef delete_whitespace(text: str) ->str:\n \"\"\"\n Removes whitespaces from text.\n \"\"\"\n return re.sub('\\\\s+', '', text).strip()\n\n\n<mask token>\n\n\ndef get_corpus_statistics(hypotheses: Iterable[str], references: Iterable[\n str], order: int=CHRF_ORDER, remove_whitespace: bool=True) ->List[float]:\n corpus_statistics = [0] * (order * 3)\n for hypothesis, reference in zip(hypotheses, references):\n statistics = get_sentence_statistics(hypothesis, reference, order=\n order, remove_whitespace=remove_whitespace)\n for i in range(len(statistics)):\n corpus_statistics[i] += statistics[i]\n return corpus_statistics\n\n\n<mask token>\n\n\ndef _chrf(avg_precision, avg_recall, beta: int=CHRF_BETA) ->float:\n if avg_precision + avg_recall == 0:\n return 0.0\n beta_square = beta ** 2\n score = (1 + beta_square) * (avg_precision * avg_recall) / (beta_square *\n avg_precision + avg_recall)\n return score\n\n\ndef corpus_chrf(hypotheses: Iterable[str], references: Iterable[str], order:\n int=CHRF_ORDER, beta: float=CHRF_BETA, remove_whitespace: bool=True\n ) ->CHRF:\n \"\"\"\n Computes Chrf on a corpus.\n\n :param hypotheses: Stream of hypotheses.\n :param references: Stream of references\n :param order: Maximum n-gram order.\n :param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.\n :param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.\n :return: Chrf score.\n \"\"\"\n corpus_statistics = get_corpus_statistics(hypotheses, references, order\n =order, remove_whitespace=remove_whitespace)\n avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics,\n order)\n return CHRF(_chrf(avg_precision, avg_recall, beta=beta))\n\n\n<mask token>\n\n\ndef get_langpairs_for_testset(testset: str) ->List:\n \"\"\"Return a list of language pairs for a given test set.\"\"\"\n return list(filter(lambda x: re.match('\\\\w\\\\w\\\\-\\\\w\\\\w', x), DATASETS.\n get(testset, {}).keys()))\n\n\ndef get_a_list_of_testset_names() ->str:\n \"\"\"Return a string with a formatted list of available test sets plus their descriptions. \"\"\"\n message = 'The available test sets are:'\n for testset in sorted(DATASETS.keys(), reverse=True):\n message += '\\n%20s: %s' % (testset, DATASETS[testset].get(\n 'description', ''))\n return message\n\n\n<mask token>\n\n\ndef _filter_subset(systems, test_sets, langpair, origlang, subset=None):\n \"\"\"Filter sentences with a given origlang (or subset) according to the raw SGM files.\"\"\"\n if origlang is None and subset is None:\n return systems\n if test_sets is None or langpair is None:\n raise ValueError(\n 'Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).'\n )\n indices_to_keep = []\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[\n test_set][langpair][0])\n if not rawfile.endswith('.sgm'):\n raise Exception(\n '--origlang and --subset supports only *.sgm files, not %s',\n rawfile)\n if subset is not None:\n if test_set not in SUBSETS:\n raise Exception(\n 'No subset annotation available for test set ' + test_set)\n doc_to_tags = SUBSETS[test_set]\n number_sentences_included = 0\n with smart_open(rawfile) as fin:\n include_doc = False\n for line in fin:\n if line.startswith('<doc '):\n if origlang is None:\n include_doc = True\n else:\n doc_origlang = re.sub('.* origlang=\"([^\"]+)\".*\\\\n',\n '\\\\1', line)\n if origlang.startswith('non-'):\n include_doc = doc_origlang != origlang[4:]\n else:\n include_doc = doc_origlang == origlang\n if subset is not None:\n doc_id = re.sub('.* docid=\"([^\"]+)\".*\\\\n', '\\\\1', line)\n if not re.search(subset, doc_to_tags.get(doc_id, '')):\n include_doc = False\n if line.startswith('<seg '):\n indices_to_keep.append(include_doc)\n number_sentences_included += 1 if include_doc else 0\n return [[sentence for sentence, keep in zip(sys, indices_to_keep) if\n keep] for sys in systems]\n\n\n<mask token>\n\n\ndef parse_args():\n arg_parser = argparse.ArgumentParser(description=\n \"\"\"sacreBLEU: Hassle-free computation of shareable BLEU scores.\nQuick usage: score your detokenized output against WMT'14 EN-DE:\n cat output.detok.de | sacrebleu -t wmt14 -l en-de\"\"\"\n , formatter_class=argparse.RawDescriptionHelpFormatter)\n arg_parser.add_argument('--test-set', '-t', type=str, default=None,\n help=\n 'the test set to use (see also --list) or a comma-separated list of test sets to be concatenated'\n )\n arg_parser.add_argument('-lc', action='store_true', default=False, help\n ='Use case-insensitive BLEU (default: actual case)')\n arg_parser.add_argument('--sentence-level', '-sl', action='store_true',\n help='Output metric on each sentence.')\n arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor',\n 'add-k', 'none'], default='exp', help=\n 'smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none'\n )\n arg_parser.add_argument('--smooth-value', '-sv', type=float, default=\n None, help=\n 'The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'\n .format(SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))\n arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(),\n default=None, help='tokenization method to use')\n arg_parser.add_argument('--language-pair', '-l', dest='langpair',\n default=None, help=\n 'source-target language pair (2-char ISO639-1 codes)')\n arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=\n None, help=\n 'use a subset of sentences with a given original language (2-char ISO639-1 codes), \"non-\" prefix means negation'\n )\n arg_parser.add_argument('--subset', dest='subset', default=None, help=\n 'use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)'\n )\n arg_parser.add_argument('--download', type=str, default=None, help=\n 'download a test set and quit')\n arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=\n str, default=None, help=\n 'output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit'\n )\n arg_parser.add_argument('--input', '-i', type=str, default='-', help=\n 'Read input from a file instead of STDIN')\n arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, help=\n 'Split the reference stream on tabs, and expect this many references. Default: %(default)s.'\n )\n arg_parser.add_argument('refs', nargs='*', default=[], help=\n 'optional list of references (for backwards-compatibility with older scripts)'\n )\n arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'],\n nargs='+', default=['bleu'], help='metrics to compute (default: bleu)')\n arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,\n help='chrf character order (default: %(default)s)')\n arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,\n help='chrf BETA parameter (default: %(default)s)')\n arg_parser.add_argument('--chrf-whitespace', action='store_true',\n default=False, help=\n 'include whitespace in chrF calculation (default: %(default)s)')\n arg_parser.add_argument('--short', default=False, action='store_true',\n help='produce a shorter (less human readable) signature')\n arg_parser.add_argument('--score-only', '-b', default=False, action=\n 'store_true', help='output only the BLEU score')\n arg_parser.add_argument('--force', default=False, action='store_true',\n help='insist that your tokenized input is actually detokenized')\n arg_parser.add_argument('--quiet', '-q', default=False, action=\n 'store_true', help='suppress informative output')\n arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',\n help='open text files with specified encoding (default: %(default)s)')\n arg_parser.add_argument('--list', default=False, action='store_true',\n help='print a list of all available test sets.')\n arg_parser.add_argument('--citation', '--cite', default=False, action=\n 'store_true', help='dump the bibtex citation and quit.')\n arg_parser.add_argument('--width', '-w', type=int, default=1, help=\n 'floating point width (default: %(default)s)')\n arg_parser.add_argument('--detail', '-d', default=False, action=\n 'store_true', help=\n 'print extra information (split test sets based on origlang)')\n arg_parser.add_argument('-V', '--version', action='version', version=\n '%(prog)s {}'.format(VERSION))\n args = arg_parser.parse_args()\n return args\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef smart_open(file, mode='rt', encoding='utf-8'):\n \"\"\"Convenience function for reading compressed or plain text files.\n :param file: The file to read.\n :param mode: The file mode (read, write).\n :param encoding: The file encoding.\n \"\"\"\n if file.endswith('.gz'):\n return gzip.open(file, mode=mode, encoding=encoding, newline='\\n')\n return open(file, mode=mode, encoding=encoding, newline='\\n')\n\n\ndef my_log(num):\n \"\"\"\n Floors the log function\n\n :param num: the number\n :return: log(num) floored to a very low number\n \"\"\"\n if num == 0.0:\n return -9999999999\n return math.log(num)\n\n\ndef bleu_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'smooth': 's', 'case': 'c', 'tok':\n 'tok', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'tok': args.tokenize, 'version': VERSION, 'smooth': args.\n smooth, 'numrefs': numrefs, 'case': 'lc' if args.lc else 'mixed'}\n if args.tokenize == 'ja-mecab':\n signature['tok'] += '-' + TokenizeMeCab().signature()\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\ndef chrf_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the chrF signature\n \"\"\"\n abbr = {'test': 't', 'lang': 'l', 'numchars': 'n', 'space': 's', 'case':\n 'c', 'numrefs': '#', 'version': 'v', 'origlang': 'o', 'subset': 'S'}\n signature = {'version': VERSION, 'space': args.chrf_whitespace,\n 'numchars': args.chrf_order, 'numrefs': numrefs, 'case': 'lc' if\n args.lc else 'mixed'}\n if args.test_set is not None:\n signature['test'] = args.test_set\n if args.langpair is not None:\n signature['lang'] = args.langpair\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x,\n signature[x]) for x in sorted(signature.keys())])\n return sigstr\n\n\ndef extract_ngrams(line, min_order=1, max_order=NGRAM_ORDER) ->Counter:\n \"\"\"Extracts all the ngrams (min_order <= n <= max_order) from a sequence of tokens.\n\n :param line: A segment containing a sequence of words.\n :param min_order: Minimum n-gram length (default: 1).\n :param max_order: Maximum n-gram length (default: NGRAM_ORDER).\n :return: a dictionary containing ngrams and counts\n \"\"\"\n ngrams = Counter()\n tokens = line.split()\n for n in range(min_order, max_order + 1):\n for i in range(0, len(tokens) - n + 1):\n ngram = ' '.join(tokens[i:i + n])\n ngrams[ngram] += 1\n return ngrams\n\n\ndef extract_char_ngrams(s: str, n: int) ->Counter:\n \"\"\"\n Yields counts of character n-grams from string s of order n.\n \"\"\"\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])\n\n\ndef ref_stats(output, refs):\n ngrams = Counter()\n closest_diff = None\n closest_len = None\n for ref in refs:\n tokens = ref.split()\n reflen = len(tokens)\n diff = abs(len(output.split()) - reflen)\n if closest_diff is None or diff < closest_diff:\n closest_diff = diff\n closest_len = reflen\n elif diff == closest_diff:\n if reflen < closest_len:\n closest_len = reflen\n ngrams_ref = extract_ngrams(ref)\n for ngram in ngrams_ref.keys():\n ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])\n return ngrams, closest_diff, closest_len\n\n\ndef _clean(s):\n \"\"\"\n Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.\n\n :param s: The string.\n :return: A cleaned-up string.\n \"\"\"\n return re.sub('\\\\s+', ' ', s.strip())\n\n\ndef process_to_text(rawfile, txtfile, field: int=None):\n \"\"\"Processes raw files to plain text files.\n :param rawfile: the input file (possibly SGML)\n :param txtfile: the plaintext file\n :param field: For TSV files, which field to extract.\n \"\"\"\n if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:\n sacrelogger.info('Processing %s to %s', rawfile, txtfile)\n if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.xml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub('<seg.*?>(.*)</seg>.*?', '\\\\1',\n line)), file=fout)\n elif rawfile.endswith('.txt'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip(), file=fout)\n elif rawfile.endswith('.tsv'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip().split('\\t')[field], file=fout)\n\n\ndef print_test_set(test_set, langpair, side, origlang=None, subset=None):\n \"\"\"Prints to STDOUT the specified side of the specified test set\n :param test_set: the test set to print\n :param langpair: the language pair\n :param side: 'src' for source, 'ref' for reference\n :param origlang: print only sentences with a given original language (2-char ISO639-1 code), \"non-\" prefix means negation\n :param subset: print only sentences whose document annotation matches a given regex\n \"\"\"\n files = download_test_set(test_set, langpair)\n if side == 'src':\n files = [files[0]]\n elif side == 'ref':\n files.pop(0)\n streams = [smart_open(file) for file in files]\n streams = _filter_subset(streams, test_set, langpair, origlang, subset)\n for lines in zip(*streams):\n print('\\t'.join(map(lambda x: x.rstrip(), lines)))\n\n\ndef download_test_set(test_set, langpair=None):\n \"\"\"Downloads the specified test to the system location specified by the SACREBLEU environment variable.\n\n :param test_set: the test set to download\n :param langpair: the language pair (needed for some datasets)\n :return: the set of processed files\n \"\"\"\n outdir = os.path.join(SACREBLEU_DIR, test_set)\n os.makedirs(outdir, exist_ok=True)\n expected_checksums = DATASETS[test_set].get('md5', [None] * len(\n DATASETS[test_set]))\n for dataset, expected_md5 in zip(DATASETS[test_set]['data'],\n expected_checksums):\n tarball = os.path.join(outdir, os.path.basename(dataset))\n rawdir = os.path.join(outdir, 'raw')\n lockfile = '{}.lock'.format(tarball)\n with portalocker.Lock(lockfile, 'w', timeout=60):\n if not os.path.exists(tarball) or os.path.getsize(tarball) == 0:\n sacrelogger.info('Downloading %s to %s', dataset, tarball)\n try:\n with urllib.request.urlopen(dataset) as f, open(tarball,\n 'wb') as out:\n out.write(f.read())\n except ssl.SSLError:\n sacrelogger.warning(\n 'An SSL error was encountered in downloading the files. If you\\'re on a Mac, you may need to run the \"Install Certificates.command\" file located in the \"Python 3\" folder, often found under /Applications'\n )\n sys.exit(1)\n if expected_md5 is not None:\n md5 = hashlib.md5()\n with open(tarball, 'rb') as infile:\n for line in infile:\n md5.update(line)\n if md5.hexdigest() != expected_md5:\n sacrelogger.error(\n 'Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'\n .format(md5.hexdigest(), expected_md5))\n sacrelogger.error(\n 'Please manually delete \"{}\" and rerun the command.'\n .format(tarball))\n sacrelogger.error(\n 'If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.'\n )\n sys.exit(1)\n else:\n sacrelogger.info('Checksum passed: {}'.format(md5.\n hexdigest()))\n sacrelogger.info('Extracting %s', tarball)\n if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'):\n import tarfile\n with tarfile.open(tarball) as tar:\n tar.extractall(path=rawdir)\n elif tarball.endswith('.zip'):\n import zipfile\n with zipfile.ZipFile(tarball, 'r') as zipfile:\n zipfile.extractall(path=rawdir)\n found = []\n languages = DATASETS[test_set].keys() if langpair is None else [langpair]\n for pair in languages:\n if '-' not in pair:\n continue\n src, tgt = pair.split('-')\n rawfile = DATASETS[test_set][pair][0]\n field = None\n if rawfile.endswith('.tsv'):\n field, rawfile = rawfile.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, rawfile)\n outpath = os.path.join(outdir, '{}.{}'.format(pair, src))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n refs = DATASETS[test_set][pair][1:]\n for i, ref in enumerate(refs):\n field = None\n if ref.endswith('.tsv'):\n field, ref = ref.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, ref)\n if len(refs) >= 2:\n outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))\n else:\n outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n return found\n\n\nclass Result:\n\n def __init__(self, score: float):\n self.score = score\n\n def __str__(self):\n return self.format()\n\n\nclass BLEU(Result):\n\n def __init__(self, score: float, counts, totals, precisions, bp,\n sys_len, ref_len):\n super().__init__(score)\n self.counts = counts\n self.totals = totals\n self.precisions = precisions\n self.bp = bp\n self.sys_len = sys_len\n self.ref_len = ref_len\n\n def format(self, width=2):\n precisions = '/'.join(['{:.1f}'.format(p) for p in self.precisions])\n return (\n 'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'\n .format(score=self.score, width=width, precisions=precisions,\n bp=self.bp, ratio=self.sys_len / self.ref_len, sys_len=self.\n sys_len, ref_len=self.ref_len))\n\n\nclass CHRF(Result):\n\n def __init__(self, score: float):\n super().__init__(score)\n\n def format(self, width=2):\n return '{score:.{width}f}'.format(score=self.score, width=width)\n\n\n<mask token>\n\n\ndef sentence_bleu(hypothesis: str, references: List[str], smooth_method:\n str='floor', smooth_value: float=None, use_effective_order: bool=True\n ) ->BLEU:\n \"\"\"\n Computes BLEU on a single sentence pair.\n\n Disclaimer: computing BLEU on the sentence level is not its intended use,\n BLEU is a corpus-level metric.\n\n :param hypothesis: Hypothesis string.\n :param reference: Reference string.\n :param smooth_value: For 'floor' smoothing, the floor value to use.\n :param use_effective_order: Account for references that are shorter than the largest n-gram.\n :return: Returns a single BLEU score as a float.\n \"\"\"\n bleu = corpus_bleu(hypothesis, references, smooth_method=smooth_method,\n smooth_value=smooth_value, use_effective_order=use_effective_order)\n return bleu\n\n\ndef corpus_bleu(sys_stream: Union[str, Iterable[str]], ref_streams: Union[\n str, List[Iterable[str]]], smooth_method='exp', smooth_value=None,\n force=False, lowercase=False, tokenize=DEFAULT_TOKENIZER,\n use_effective_order=False) ->BLEU:\n \"\"\"Produces BLEU scores along with its sufficient statistics from a source against one or more references.\n\n :param sys_stream: The system stream (a sequence of segments)\n :param ref_streams: A list of one or more reference streams (each a sequence of segments)\n :param smooth: The smoothing method to use\n :param smooth_value: For 'floor' smoothing, the floor to use\n :param force: Ignore data that looks already tokenized\n :param lowercase: Lowercase the data\n :param tokenize: The tokenizer to use\n :return: a BLEU object containing everything you'd want\n \"\"\"\n if isinstance(sys_stream, str):\n sys_stream = [sys_stream]\n if isinstance(ref_streams, str):\n ref_streams = [[ref_streams]]\n sys_len = 0\n ref_len = 0\n correct = [(0) for n in range(NGRAM_ORDER)]\n total = [(0) for n in range(NGRAM_ORDER)]\n tokenized_count = 0\n fhs = [sys_stream] + ref_streams\n for lines in zip_longest(*fhs):\n if None in lines:\n raise EOFError(\n 'Source and reference streams have different lengths!')\n if lowercase:\n lines = [x.lower() for x in lines]\n if not (force or tokenize == 'none') and lines[0].rstrip().endswith(\n ' .'):\n tokenized_count += 1\n if tokenized_count == 100:\n sacrelogger.warning(\n \"That's 100 lines that end in a tokenized period ('.')\")\n sacrelogger.warning(\n 'It looks like you forgot to detokenize your test data, which may hurt your score.'\n )\n sacrelogger.warning(\n \"If you insist your data is detokenized, or don't care, you can suppress this message with '--force'.\"\n )\n output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]\n ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)\n sys_len += len(output.split())\n ref_len += closest_len\n sys_ngrams = extract_ngrams(output)\n for ngram in sys_ngrams.keys():\n n = len(ngram.split())\n correct[n - 1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))\n total[n - 1] += sys_ngrams[ngram]\n return compute_bleu(correct, total, sys_len, ref_len, smooth_method=\n smooth_method, smooth_value=smooth_value, use_effective_order=\n use_effective_order)\n\n\ndef raw_corpus_bleu(sys_stream, ref_streams, smooth_value=None) ->BLEU:\n \"\"\"Convenience function that wraps corpus_bleu().\n This is convenient if you're using sacrebleu as a library, say for scoring on dev.\n It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).\n\n :param sys_stream: the system stream (a sequence of segments)\n :param ref_streams: a list of one or more reference streams (each a sequence of segments)\n \"\"\"\n return corpus_bleu(sys_stream, ref_streams, smooth_method='floor',\n smooth_value=smooth_value, force=True, tokenize='none',\n use_effective_order=True)\n\n\ndef delete_whitespace(text: str) ->str:\n \"\"\"\n Removes whitespaces from text.\n \"\"\"\n return re.sub('\\\\s+', '', text).strip()\n\n\ndef get_sentence_statistics(hypothesis: str, reference: str, order: int=\n CHRF_ORDER, remove_whitespace: bool=True) ->List[float]:\n hypothesis = delete_whitespace(hypothesis\n ) if remove_whitespace else hypothesis\n reference = delete_whitespace(reference\n ) if remove_whitespace else reference\n statistics = [0] * (order * 3)\n for i in range(order):\n n = i + 1\n hypothesis_ngrams = extract_char_ngrams(hypothesis, n)\n reference_ngrams = extract_char_ngrams(reference, n)\n common_ngrams = hypothesis_ngrams & reference_ngrams\n statistics[3 * i + 0] = sum(hypothesis_ngrams.values())\n statistics[3 * i + 1] = sum(reference_ngrams.values())\n statistics[3 * i + 2] = sum(common_ngrams.values())\n return statistics\n\n\ndef get_corpus_statistics(hypotheses: Iterable[str], references: Iterable[\n str], order: int=CHRF_ORDER, remove_whitespace: bool=True) ->List[float]:\n corpus_statistics = [0] * (order * 3)\n for hypothesis, reference in zip(hypotheses, references):\n statistics = get_sentence_statistics(hypothesis, reference, order=\n order, remove_whitespace=remove_whitespace)\n for i in range(len(statistics)):\n corpus_statistics[i] += statistics[i]\n return corpus_statistics\n\n\ndef _avg_precision_and_recall(statistics: List[float], order: int) ->Tuple[\n float, float]:\n avg_precision = 0.0\n avg_recall = 0.0\n effective_order = 0\n for i in range(order):\n hypotheses_ngrams = statistics[3 * i + 0]\n references_ngrams = statistics[3 * i + 1]\n common_ngrams = statistics[3 * i + 2]\n if hypotheses_ngrams > 0 and references_ngrams > 0:\n avg_precision += common_ngrams / hypotheses_ngrams\n avg_recall += common_ngrams / references_ngrams\n effective_order += 1\n if effective_order == 0:\n return 0.0, 0.0\n avg_precision /= effective_order\n avg_recall /= effective_order\n return avg_precision, avg_recall\n\n\ndef _chrf(avg_precision, avg_recall, beta: int=CHRF_BETA) ->float:\n if avg_precision + avg_recall == 0:\n return 0.0\n beta_square = beta ** 2\n score = (1 + beta_square) * (avg_precision * avg_recall) / (beta_square *\n avg_precision + avg_recall)\n return score\n\n\ndef corpus_chrf(hypotheses: Iterable[str], references: Iterable[str], order:\n int=CHRF_ORDER, beta: float=CHRF_BETA, remove_whitespace: bool=True\n ) ->CHRF:\n \"\"\"\n Computes Chrf on a corpus.\n\n :param hypotheses: Stream of hypotheses.\n :param references: Stream of references\n :param order: Maximum n-gram order.\n :param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.\n :param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.\n :return: Chrf score.\n \"\"\"\n corpus_statistics = get_corpus_statistics(hypotheses, references, order\n =order, remove_whitespace=remove_whitespace)\n avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics,\n order)\n return CHRF(_chrf(avg_precision, avg_recall, beta=beta))\n\n\ndef sentence_chrf(hypothesis: str, reference: str, order: int=CHRF_ORDER,\n beta: float=CHRF_BETA, remove_whitespace: bool=True) ->CHRF:\n \"\"\"\n Computes ChrF on a single sentence pair.\n\n :param hypothesis: Hypothesis string.\n :param reference: Reference string.\n :param order: Maximum n-gram order.\n :param remove_whitespace: Whether to delete whitespaces from hypothesis and reference strings.\n :param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.\n :return: Chrf score.\n \"\"\"\n statistics = get_sentence_statistics(hypothesis, reference, order=order,\n remove_whitespace=remove_whitespace)\n avg_precision, avg_recall = _avg_precision_and_recall(statistics, order)\n return CHRF(_chrf(avg_precision, avg_recall, beta=beta))\n\n\ndef get_langpairs_for_testset(testset: str) ->List:\n \"\"\"Return a list of language pairs for a given test set.\"\"\"\n return list(filter(lambda x: re.match('\\\\w\\\\w\\\\-\\\\w\\\\w', x), DATASETS.\n get(testset, {}).keys()))\n\n\ndef get_a_list_of_testset_names() ->str:\n \"\"\"Return a string with a formatted list of available test sets plus their descriptions. \"\"\"\n message = 'The available test sets are:'\n for testset in sorted(DATASETS.keys(), reverse=True):\n message += '\\n%20s: %s' % (testset, DATASETS[testset].get(\n 'description', ''))\n return message\n\n\ndef _available_origlangs(test_sets, langpair):\n \"\"\"Return a list of origlang values in according to the raw SGM files.\"\"\"\n origlangs = set()\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[\n test_set][langpair][0])\n if rawfile.endswith('.sgm'):\n with smart_open(rawfile) as fin:\n for line in fin:\n if line.startswith('<doc '):\n doc_origlang = re.sub('.* origlang=\"([^\"]+)\".*\\\\n',\n '\\\\1', line)\n origlangs.add(doc_origlang)\n return sorted(list(origlangs))\n\n\ndef _filter_subset(systems, test_sets, langpair, origlang, subset=None):\n \"\"\"Filter sentences with a given origlang (or subset) according to the raw SGM files.\"\"\"\n if origlang is None and subset is None:\n return systems\n if test_sets is None or langpair is None:\n raise ValueError(\n 'Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).'\n )\n indices_to_keep = []\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[\n test_set][langpair][0])\n if not rawfile.endswith('.sgm'):\n raise Exception(\n '--origlang and --subset supports only *.sgm files, not %s',\n rawfile)\n if subset is not None:\n if test_set not in SUBSETS:\n raise Exception(\n 'No subset annotation available for test set ' + test_set)\n doc_to_tags = SUBSETS[test_set]\n number_sentences_included = 0\n with smart_open(rawfile) as fin:\n include_doc = False\n for line in fin:\n if line.startswith('<doc '):\n if origlang is None:\n include_doc = True\n else:\n doc_origlang = re.sub('.* origlang=\"([^\"]+)\".*\\\\n',\n '\\\\1', line)\n if origlang.startswith('non-'):\n include_doc = doc_origlang != origlang[4:]\n else:\n include_doc = doc_origlang == origlang\n if subset is not None:\n doc_id = re.sub('.* docid=\"([^\"]+)\".*\\\\n', '\\\\1', line)\n if not re.search(subset, doc_to_tags.get(doc_id, '')):\n include_doc = False\n if line.startswith('<seg '):\n indices_to_keep.append(include_doc)\n number_sentences_included += 1 if include_doc else 0\n return [[sentence for sentence, keep in zip(sys, indices_to_keep) if\n keep] for sys in systems]\n\n\ndef main():\n args = parse_args()\n sys.stdin = open(sys.stdin.fileno(), mode='r', encoding='utf-8',\n buffering=True, newline='\\n')\n sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf-8',\n buffering=True)\n if not args.quiet:\n logging.basicConfig(level=logging.INFO, format='sacreBLEU: %(message)s'\n )\n if args.download:\n download_test_set(args.download, args.langpair)\n sys.exit(0)\n if args.list:\n if args.test_set:\n print(' '.join(get_langpairs_for_testset(args.test_set)))\n else:\n print(get_a_list_of_testset_names())\n sys.exit(0)\n if args.sentence_level and len(args.metrics) > 1:\n sacrelogger.error(\n 'Only one metric can be used with Sentence-level reporting.')\n sys.exit(1)\n if args.citation:\n if not args.test_set:\n sacrelogger.error('I need a test set (-t).')\n sys.exit(1)\n for test_set in args.test_set.split(','):\n if 'citation' not in DATASETS[test_set]:\n sacrelogger.error('No citation found for %s', test_set)\n else:\n print(DATASETS[test_set]['citation'])\n sys.exit(0)\n if args.num_refs != 1 and (args.test_set is not None or len(args.refs) > 1\n ):\n sacrelogger.error(\n 'The --num-refs argument allows you to provide any number of tab-delimited references in a single file.'\n )\n sacrelogger.error(\n 'You can only use it with externaly-provided references, however (i.e., not with `-t`),'\n )\n sacrelogger.error(\n 'and you cannot then provide multiple reference files.')\n sys.exit(1)\n if args.test_set is not None:\n for test_set in args.test_set.split(','):\n if test_set not in DATASETS:\n sacrelogger.error('Unknown test set \"%s\"\\n%s', test_set,\n get_a_list_of_testset_names())\n sys.exit(1)\n if args.test_set is None:\n if len(args.refs) == 0:\n sacrelogger.error(\n 'I need either a predefined test set (-t) or a list of references'\n )\n sacrelogger.error(get_a_list_of_testset_names())\n sys.exit(1)\n elif len(args.refs) > 0:\n sacrelogger.error(\n 'I need exactly one of (a) a predefined test set (-t) or (b) a list of references'\n )\n sys.exit(1)\n elif args.langpair is None:\n sacrelogger.error('I need a language pair (-l).')\n sys.exit(1)\n else:\n for test_set in args.test_set.split(','):\n if args.langpair not in DATASETS[test_set]:\n sacrelogger.error('No such language pair \"%s\"', args.langpair)\n sacrelogger.error(\n 'Available language pairs for test set \"%s\": %s',\n test_set, ', '.join(x for x in DATASETS[test_set].keys(\n ) if '-' in x))\n sys.exit(1)\n if args.echo:\n if args.langpair is None or args.test_set is None:\n sacrelogger.warning(\n '--echo requires a test set (--t) and a language pair (-l)')\n sys.exit(1)\n for test_set in args.test_set.split(','):\n print_test_set(test_set, args.langpair, args.echo, args.\n origlang, args.subset)\n sys.exit(0)\n if args.test_set is not None and args.tokenize == 'none':\n sacrelogger.warning(\n \"\"\"You are turning off sacrebleu's internal tokenization ('--tokenize none'), presumably to supply\nyour own reference tokenization. Published numbers will not be comparable with other papers.\n\"\"\"\n )\n if args.tokenize is None:\n if args.langpair is not None and args.langpair.split('-')[1] == 'zh':\n args.tokenize = 'zh'\n elif args.langpair is not None and args.langpair.split('-')[1] == 'ja':\n args.tokenize = 'ja-mecab'\n else:\n args.tokenize = DEFAULT_TOKENIZER\n if args.langpair is not None and 'bleu' in args.metrics:\n if args.langpair.split('-')[1] == 'zh' and args.tokenize != 'zh':\n logger.warning(\n 'You should also pass \"--tok zh\" when scoring Chinese...')\n if args.langpair.split('-')[1\n ] == 'ja' and not args.tokenize.startswith('ja-'):\n logger.warning(\n 'You should also pass \"--tok ja-mecab\" when scoring Japanese...'\n )\n if args.test_set is None:\n concat_ref_files = [args.refs]\n else:\n concat_ref_files = []\n for test_set in args.test_set.split(','):\n _, *ref_files = download_test_set(test_set, args.langpair)\n if len(ref_files) == 0:\n sacrelogger.warning('No references found for test set {}/{}.'\n .format(test_set, args.langpair))\n concat_ref_files.append(ref_files)\n inputfh = io.TextIOWrapper(sys.stdin.buffer, encoding=args.encoding\n ) if args.input == '-' else smart_open(args.input, encoding=args.\n encoding)\n full_system = inputfh.readlines()\n full_refs = [[] for x in range(max(len(concat_ref_files[0]), args.\n num_refs))]\n for ref_files in concat_ref_files:\n for refno, ref_file in enumerate(ref_files):\n for lineno, line in enumerate(smart_open(ref_file, encoding=\n args.encoding), 1):\n if args.num_refs != 1:\n splits = line.rstrip().split(sep='\\t', maxsplit=args.\n num_refs - 1)\n if len(splits) != args.num_refs:\n sacrelogger.error(\n 'FATAL: line {}: expected {} fields, but found {}.'\n .format(lineno, args.num_refs, len(splits)))\n sys.exit(17)\n for refno, split in enumerate(splits):\n full_refs[refno].append(split)\n else:\n full_refs[refno].append(line)\n system, *refs = _filter_subset([full_system, *full_refs], args.test_set,\n args.langpair, args.origlang, args.subset)\n if len(system) == 0:\n message = 'Test set %s contains no sentence' % args.test_set\n if args.origlang is not None or args.subset is not None:\n message += ' with'\n message += ('' if args.origlang is None else ' origlang=' +\n args.origlang)\n message += '' if args.subset is None else ' subset=' + args.subset\n sacrelogger.error(message)\n exit(1)\n if args.sentence_level:\n for output, *references in zip(system, *refs):\n results = []\n for metric in args.metrics:\n if metric == 'bleu':\n bleu = sentence_bleu(output, [[x] for x in references],\n smooth_method=args.smooth, smooth_value=args.\n smooth_value)\n results.append(bleu)\n if metric == 'chrf':\n chrf = sentence_chrf(output, references[0], args.\n chrf_order, args.chrf_beta, remove_whitespace=not\n args.chrf_whitespace)\n results.append(chrf)\n display_metric(args.metrics, results, len(refs), args)\n sys.exit(0)\n results = []\n try:\n for metric in args.metrics:\n if metric == 'bleu':\n bleu = corpus_bleu(system, refs, smooth_method=args.smooth,\n smooth_value=args.smooth_value, force=args.force,\n lowercase=args.lc, tokenize=args.tokenize)\n results.append(bleu)\n elif metric == 'chrf':\n chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta,\n order=args.chrf_order, remove_whitespace=not args.\n chrf_whitespace)\n results.append(chrf)\n except EOFError:\n sacrelogger.error(\n 'The input and reference stream(s) were of different lengths.')\n if args.test_set is not None:\n sacrelogger.error(\n \"\"\"\nThis could be a problem with your system output or with sacreBLEU's reference database.\nIf the latter, you can clean out the references cache by typing:\n\n rm -r %s/%s\n\nThey will be downloaded automatically again the next time you run sacreBLEU.\"\"\"\n , SACREBLEU_DIR, args.test_set)\n sys.exit(1)\n display_metric(args.metrics, results, len(refs), args)\n if args.detail:\n width = args.width\n sents_digits = len(str(len(full_system)))\n origlangs = args.origlang if args.origlang else _available_origlangs(\n args.test_set, args.langpair)\n for origlang in origlangs:\n subsets = [None]\n if args.subset is not None:\n subsets += [args.subset]\n elif all(t in SUBSETS for t in args.test_set.split(',')):\n subsets += COUNTRIES + DOMAINS\n for subset in subsets:\n system, *refs = _filter_subset([full_system, *full_refs],\n args.test_set, args.langpair, origlang, subset)\n if len(system) == 0:\n continue\n if subset in COUNTRIES:\n subset_str = '%20s' % ('country=' + subset)\n elif subset in DOMAINS:\n subset_str = '%20s' % ('domain=' + subset)\n else:\n subset_str = '%20s' % ''\n if 'bleu' in args.metrics:\n bleu = corpus_bleu(system, refs, smooth_method=args.\n smooth, smooth_value=args.smooth_value, force=args.\n force, lowercase=args.lc, tokenize=args.tokenize)\n print('origlang={} {}: sentences={:{}} BLEU={:{}.{}f}'.\n format(origlang, subset_str, len(system),\n sents_digits, bleu.score, width + 4, width))\n if 'chrf' in args.metrics:\n chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta,\n order=args.chrf_order, remove_whitespace=not args.\n chrf_whitespace)\n print('origlang={} {}: sentences={:{}} chrF={:{}.{}f}'.\n format(origlang, subset_str, len(system),\n sents_digits, chrf.score, width + 4, width))\n\n\ndef display_metric(metrics_to_print, results, num_refs, args):\n \"\"\"\n Badly in need of refactoring.\n One idea is to put all of this in the BLEU and CHRF classes, and then define\n a Result::signature() function.\n \"\"\"\n for metric, result in zip(metrics_to_print, results):\n if metric == 'bleu':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = bleu_signature(args, num_refs)\n print(result.format(args.width).replace('BLEU', 'BLEU+' +\n version_str))\n elif metric == 'chrf':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = chrf_signature(args, num_refs)\n print('chrF{0:d}+{1} = {2:.{3}f}'.format(args.chrf_beta,\n version_str, result.score, args.width))\n\n\ndef parse_args():\n arg_parser = argparse.ArgumentParser(description=\n \"\"\"sacreBLEU: Hassle-free computation of shareable BLEU scores.\nQuick usage: score your detokenized output against WMT'14 EN-DE:\n cat output.detok.de | sacrebleu -t wmt14 -l en-de\"\"\"\n , formatter_class=argparse.RawDescriptionHelpFormatter)\n arg_parser.add_argument('--test-set', '-t', type=str, default=None,\n help=\n 'the test set to use (see also --list) or a comma-separated list of test sets to be concatenated'\n )\n arg_parser.add_argument('-lc', action='store_true', default=False, help\n ='Use case-insensitive BLEU (default: actual case)')\n arg_parser.add_argument('--sentence-level', '-sl', action='store_true',\n help='Output metric on each sentence.')\n arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor',\n 'add-k', 'none'], default='exp', help=\n 'smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none'\n )\n arg_parser.add_argument('--smooth-value', '-sv', type=float, default=\n None, help=\n 'The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'\n .format(SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))\n arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(),\n default=None, help='tokenization method to use')\n arg_parser.add_argument('--language-pair', '-l', dest='langpair',\n default=None, help=\n 'source-target language pair (2-char ISO639-1 codes)')\n arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=\n None, help=\n 'use a subset of sentences with a given original language (2-char ISO639-1 codes), \"non-\" prefix means negation'\n )\n arg_parser.add_argument('--subset', dest='subset', default=None, help=\n 'use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)'\n )\n arg_parser.add_argument('--download', type=str, default=None, help=\n 'download a test set and quit')\n arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=\n str, default=None, help=\n 'output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit'\n )\n arg_parser.add_argument('--input', '-i', type=str, default='-', help=\n 'Read input from a file instead of STDIN')\n arg_parser.add_argument('--num-refs', '-nr', type=int, default=1, help=\n 'Split the reference stream on tabs, and expect this many references. Default: %(default)s.'\n )\n arg_parser.add_argument('refs', nargs='*', default=[], help=\n 'optional list of references (for backwards-compatibility with older scripts)'\n )\n arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'],\n nargs='+', default=['bleu'], help='metrics to compute (default: bleu)')\n arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,\n help='chrf character order (default: %(default)s)')\n arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,\n help='chrf BETA parameter (default: %(default)s)')\n arg_parser.add_argument('--chrf-whitespace', action='store_true',\n default=False, help=\n 'include whitespace in chrF calculation (default: %(default)s)')\n arg_parser.add_argument('--short', default=False, action='store_true',\n help='produce a shorter (less human readable) signature')\n arg_parser.add_argument('--score-only', '-b', default=False, action=\n 'store_true', help='output only the BLEU score')\n arg_parser.add_argument('--force', default=False, action='store_true',\n help='insist that your tokenized input is actually detokenized')\n arg_parser.add_argument('--quiet', '-q', default=False, action=\n 'store_true', help='suppress informative output')\n arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',\n help='open text files with specified encoding (default: %(default)s)')\n arg_parser.add_argument('--list', default=False, action='store_true',\n help='print a list of all available test sets.')\n arg_parser.add_argument('--citation', '--cite', default=False, action=\n 'store_true', help='dump the bibtex citation and quit.')\n arg_parser.add_argument('--width', '-w', type=int, default=1, help=\n 'floating point width (default: %(default)s)')\n arg_parser.add_argument('--detail', '-d', default=False, action=\n 'store_true', help=\n 'print extra information (split test sets based on origlang)')\n arg_parser.add_argument('-V', '--version', action='version', version=\n '%(prog)s {}'.format(VERSION))\n args = arg_parser.parse_args()\n return args\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2017--2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may not\n# use this file except in compliance with the License. A copy of the License\n# is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is distributed on\n# an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n\"\"\"\nSacreBLEU provides hassle-free computation of shareable, comparable, and reproducible BLEU scores.\nInspired by Rico Sennrich's `multi-bleu-detok.perl`, it produces the official WMT scores but works with plain text.\nIt also knows all the standard test sets and handles downloading, processing, and tokenization for you.\n\nSee the [README.md] file for more information.\n\"\"\"\n\nimport argparse\nimport gzip\nimport hashlib\nimport io\nimport logging\nimport math\nimport os\nimport portalocker\nimport re\nimport sys\nimport ssl\nimport urllib.request\n\nfrom collections import Counter\nfrom itertools import zip_longest\nfrom typing import List, Iterable, Tuple, Union\nfrom .tokenizer import TOKENIZERS, TokenizeMeCab\nfrom .dataset import DATASETS, DOMAINS, COUNTRIES, SUBSETS\nfrom . import __version__ as VERSION\n\nsacrelogger = logging.getLogger('sacrebleu')\n\ntry:\n # SIGPIPE is not available on Windows machines, throwing an exception.\n from signal import SIGPIPE\n\n # If SIGPIPE is available, change behaviour to default instead of ignore.\n from signal import signal, SIG_DFL\n signal(SIGPIPE, SIG_DFL)\n\nexcept ImportError:\n sacrelogger.warning('Could not import signal.SIGPIPE (this is expected on Windows machines)')\n\n# Where to store downloaded test sets.\n# Define the environment variable $SACREBLEU, or use the default of ~/.sacrebleu.\n#\n# Querying for a HOME environment variable can result in None (e.g., on Windows)\n# in which case the os.path.join() throws a TypeError. Using expanduser() is\n# a safe way to get the user's home folder.\nUSERHOME = os.path.expanduser(\"~\")\nSACREBLEU_DIR = os.environ.get('SACREBLEU', os.path.join(USERHOME, '.sacrebleu'))\n\n# n-gram order. Don't change this.\nNGRAM_ORDER = 4\n\n# Default values for CHRF\nCHRF_ORDER = 6\n# default to 2 (per http://www.aclweb.org/anthology/W16-2341)\nCHRF_BETA = 2\n\n# The default floor value to use with `--smooth floor`\nSMOOTH_VALUE_DEFAULT = {'floor': 0.0, 'add-k': 1}\n\n\nDEFAULT_TOKENIZER = '13a'\n\n\ndef smart_open(file, mode='rt', encoding='utf-8'):\n \"\"\"Convenience function for reading compressed or plain text files.\n :param file: The file to read.\n :param mode: The file mode (read, write).\n :param encoding: The file encoding.\n \"\"\"\n if file.endswith('.gz'):\n return gzip.open(file, mode=mode, encoding=encoding, newline=\"\\n\")\n return open(file, mode=mode, encoding=encoding, newline=\"\\n\")\n\n\ndef my_log(num):\n \"\"\"\n Floors the log function\n\n :param num: the number\n :return: log(num) floored to a very low number\n \"\"\"\n\n if num == 0.0:\n return -9999999999\n return math.log(num)\n\n\ndef bleu_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the signature\n \"\"\"\n\n # Abbreviations for the signature\n abbr = {\n 'test': 't',\n 'lang': 'l',\n 'smooth': 's',\n 'case': 'c',\n 'tok': 'tok',\n 'numrefs': '#',\n 'version': 'v',\n 'origlang': 'o',\n 'subset': 'S',\n }\n\n signature = {'tok': args.tokenize,\n 'version': VERSION,\n 'smooth': args.smooth,\n 'numrefs': numrefs,\n 'case': 'lc' if args.lc else 'mixed'}\n\n # For the Japanese tokenizer, add a dictionary type and its version to the signature.\n if args.tokenize == \"ja-mecab\":\n signature['tok'] += \"-\" + TokenizeMeCab().signature()\n\n if args.test_set is not None:\n signature['test'] = args.test_set\n\n if args.langpair is not None:\n signature['lang'] = args.langpair\n\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x, signature[x]) for x in sorted(signature.keys())])\n\n return sigstr\n\n\ndef chrf_signature(args, numrefs):\n \"\"\"\n Builds a signature that uniquely identifies the scoring parameters used.\n :param args: the arguments passed into the script\n :return: the chrF signature\n \"\"\"\n\n # Abbreviations for the signature\n abbr = {\n 'test': 't',\n 'lang': 'l',\n 'numchars': 'n',\n 'space': 's',\n 'case': 'c',\n 'numrefs': '#',\n 'version': 'v',\n 'origlang': 'o',\n 'subset': 'S',\n }\n\n signature = {'version': VERSION,\n 'space': args.chrf_whitespace,\n 'numchars': args.chrf_order,\n 'numrefs': numrefs,\n 'case': 'lc' if args.lc else 'mixed'}\n\n if args.test_set is not None:\n signature['test'] = args.test_set\n\n if args.langpair is not None:\n signature['lang'] = args.langpair\n\n if args.origlang is not None:\n signature['origlang'] = args.origlang\n if args.subset is not None:\n signature['subset'] = args.subset\n\n sigstr = '+'.join(['{}.{}'.format(abbr[x] if args.short else x, signature[x]) for x in sorted(signature.keys())])\n\n return sigstr\n\n\ndef extract_ngrams(line, min_order=1, max_order=NGRAM_ORDER) -> Counter:\n \"\"\"Extracts all the ngrams (min_order <= n <= max_order) from a sequence of tokens.\n\n :param line: A segment containing a sequence of words.\n :param min_order: Minimum n-gram length (default: 1).\n :param max_order: Maximum n-gram length (default: NGRAM_ORDER).\n :return: a dictionary containing ngrams and counts\n \"\"\"\n\n ngrams = Counter()\n tokens = line.split()\n for n in range(min_order, max_order + 1):\n for i in range(0, len(tokens) - n + 1):\n ngram = ' '.join(tokens[i: i + n])\n ngrams[ngram] += 1\n\n return ngrams\n\n\ndef extract_char_ngrams(s: str, n: int) -> Counter:\n \"\"\"\n Yields counts of character n-grams from string s of order n.\n \"\"\"\n return Counter([s[i:i + n] for i in range(len(s) - n + 1)])\n\n\ndef ref_stats(output, refs):\n ngrams = Counter()\n closest_diff = None\n closest_len = None\n for ref in refs:\n tokens = ref.split()\n reflen = len(tokens)\n diff = abs(len(output.split()) - reflen)\n if closest_diff is None or diff < closest_diff:\n closest_diff = diff\n closest_len = reflen\n elif diff == closest_diff:\n if reflen < closest_len:\n closest_len = reflen\n\n ngrams_ref = extract_ngrams(ref)\n for ngram in ngrams_ref.keys():\n ngrams[ngram] = max(ngrams[ngram], ngrams_ref[ngram])\n\n return ngrams, closest_diff, closest_len\n\n\ndef _clean(s):\n \"\"\"\n Removes trailing and leading spaces and collapses multiple consecutive internal spaces to a single one.\n\n :param s: The string.\n :return: A cleaned-up string.\n \"\"\"\n return re.sub(r'\\s+', ' ', s.strip())\n\n\ndef process_to_text(rawfile, txtfile, field: int=None):\n \"\"\"Processes raw files to plain text files.\n :param rawfile: the input file (possibly SGML)\n :param txtfile: the plaintext file\n :param field: For TSV files, which field to extract.\n \"\"\"\n\n if not os.path.exists(txtfile) or os.path.getsize(txtfile) == 0:\n sacrelogger.info(\"Processing %s to %s\", rawfile, txtfile)\n if rawfile.endswith('.sgm') or rawfile.endswith('.sgml'):\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\\\1', line)), file=fout)\n elif rawfile.endswith('.xml'): # IWSLT\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n if line.startswith('<seg '):\n print(_clean(re.sub(r'<seg.*?>(.*)</seg>.*?', '\\\\1', line)), file=fout)\n elif rawfile.endswith('.txt'): # wmt17/ms\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip(), file=fout)\n elif rawfile.endswith('.tsv'): # MTNT\n with smart_open(rawfile) as fin, smart_open(txtfile, 'wt') as fout:\n for line in fin:\n print(line.rstrip().split('\\t')[field], file=fout)\n\n\ndef print_test_set(test_set, langpair, side, origlang=None, subset=None):\n \"\"\"Prints to STDOUT the specified side of the specified test set\n :param test_set: the test set to print\n :param langpair: the language pair\n :param side: 'src' for source, 'ref' for reference\n :param origlang: print only sentences with a given original language (2-char ISO639-1 code), \"non-\" prefix means negation\n :param subset: print only sentences whose document annotation matches a given regex\n \"\"\"\n\n files = download_test_set(test_set, langpair)\n if side == 'src':\n files = [files[0]]\n elif side == 'ref':\n files.pop(0)\n\n streams = [smart_open(file) for file in files]\n streams = _filter_subset(streams, test_set, langpair, origlang, subset)\n for lines in zip(*streams):\n print('\\t'.join(map(lambda x: x.rstrip(), lines)))\n\n\ndef download_test_set(test_set, langpair=None):\n \"\"\"Downloads the specified test to the system location specified by the SACREBLEU environment variable.\n\n :param test_set: the test set to download\n :param langpair: the language pair (needed for some datasets)\n :return: the set of processed files\n \"\"\"\n\n outdir = os.path.join(SACREBLEU_DIR, test_set)\n os.makedirs(outdir, exist_ok=True)\n\n expected_checksums = DATASETS[test_set].get('md5', [None] * len(DATASETS[test_set]))\n for dataset, expected_md5 in zip(DATASETS[test_set]['data'], expected_checksums):\n tarball = os.path.join(outdir, os.path.basename(dataset))\n rawdir = os.path.join(outdir, 'raw')\n\n lockfile = '{}.lock'.format(tarball)\n with portalocker.Lock(lockfile, 'w', timeout=60):\n if not os.path.exists(tarball) or os.path.getsize(tarball) == 0:\n sacrelogger.info(\"Downloading %s to %s\", dataset, tarball)\n try:\n with urllib.request.urlopen(dataset) as f, open(tarball, 'wb') as out:\n out.write(f.read())\n except ssl.SSLError:\n sacrelogger.warning('An SSL error was encountered in downloading the files. If you\\'re on a Mac, '\n 'you may need to run the \"Install Certificates.command\" file located in the '\n '\"Python 3\" folder, often found under /Applications')\n sys.exit(1)\n\n # Check md5sum\n if expected_md5 is not None:\n md5 = hashlib.md5()\n with open(tarball, 'rb') as infile:\n for line in infile:\n md5.update(line)\n if md5.hexdigest() != expected_md5:\n sacrelogger.error('Fatal: MD5 sum of downloaded file was incorrect (got {}, expected {}).'.format(md5.hexdigest(), expected_md5))\n sacrelogger.error('Please manually delete \"{}\" and rerun the command.'.format(tarball))\n sacrelogger.error('If the problem persists, the tarball may have changed, in which case, please contact the SacreBLEU maintainer.')\n sys.exit(1)\n else:\n sacrelogger.info('Checksum passed: {}'.format(md5.hexdigest()))\n\n # Extract the tarball\n sacrelogger.info('Extracting %s', tarball)\n if tarball.endswith('.tar.gz') or tarball.endswith('.tgz'):\n import tarfile\n with tarfile.open(tarball) as tar:\n tar.extractall(path=rawdir)\n elif tarball.endswith('.zip'):\n import zipfile\n with zipfile.ZipFile(tarball, 'r') as zipfile:\n zipfile.extractall(path=rawdir)\n\n found = []\n\n # Process the files into plain text\n languages = DATASETS[test_set].keys() if langpair is None else [langpair]\n for pair in languages:\n if '-' not in pair:\n continue\n src, tgt = pair.split('-')\n rawfile = DATASETS[test_set][pair][0]\n field = None # used for TSV files\n if rawfile.endswith('.tsv'):\n field, rawfile = rawfile.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, rawfile)\n outpath = os.path.join(outdir, '{}.{}'.format(pair, src))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n\n refs = DATASETS[test_set][pair][1:]\n for i, ref in enumerate(refs):\n field = None\n if ref.endswith('.tsv'):\n field, ref = ref.split(':', maxsplit=1)\n field = int(field)\n rawpath = os.path.join(rawdir, ref)\n if len(refs) >= 2:\n outpath = os.path.join(outdir, '{}.{}.{}'.format(pair, tgt, i))\n else:\n outpath = os.path.join(outdir, '{}.{}'.format(pair, tgt))\n process_to_text(rawpath, outpath, field=field)\n found.append(outpath)\n\n return found\n\n\nclass Result:\n def __init__(self, score: float):\n self.score = score\n\n def __str__(self):\n return self.format()\n\n\nclass BLEU(Result):\n def __init__(self,\n score: float,\n counts,\n totals,\n precisions,\n bp,\n sys_len,\n ref_len):\n super().__init__(score)\n\n self.counts = counts\n self.totals = totals\n self.precisions = precisions\n self.bp = bp\n self.sys_len = sys_len\n self.ref_len = ref_len\n\n def format(self, width=2):\n precisions = \"/\".join([\"{:.1f}\".format(p) for p in self.precisions])\n return 'BLEU = {score:.{width}f} {precisions} (BP = {bp:.3f} ratio = {ratio:.3f} hyp_len = {sys_len:d} ref_len = {ref_len:d})'.format(\n score=self.score,\n width=width,\n precisions=precisions,\n bp=self.bp,\n ratio=self.sys_len / self.ref_len,\n sys_len=self.sys_len,\n ref_len=self.ref_len)\n\n\nclass CHRF(Result):\n def __init__(self, score: float):\n super().__init__(score)\n\n def format(self, width=2):\n return '{score:.{width}f}'.format(score=self.score, width=width)\n\n\ndef compute_bleu(correct: List[int],\n total: List[int],\n sys_len: int,\n ref_len: int,\n smooth_method = 'none',\n smooth_value = None,\n use_effective_order = False) -> BLEU:\n \"\"\"Computes BLEU score from its sufficient statistics. Adds smoothing.\n\n Smoothing methods (citing \"A Systematic Comparison of Smoothing Techniques for Sentence-Level BLEU\",\n Boxing Chen and Colin Cherry, WMT 2014: http://aclweb.org/anthology/W14-3346)\n\n - exp: NIST smoothing method (Method 3)\n - floor: Method 1\n - add-k: Method 2 (generalizing Lin and Och, 2004)\n - none: do nothing.\n\n :param correct: List of counts of correct ngrams, 1 <= n <= NGRAM_ORDER\n :param total: List of counts of total ngrams, 1 <= n <= NGRAM_ORDER\n :param sys_len: The cumulative system length\n :param ref_len: The cumulative reference length\n :param smooth: The smoothing method to use\n :param smooth_value: The smoothing value added, if smooth method 'floor' is used\n :param use_effective_order: If true, use the length of `correct` for the n-gram order instead of NGRAM_ORDER.\n :return: A BLEU object with the score (100-based) and other statistics.\n \"\"\"\n if smooth_method in SMOOTH_VALUE_DEFAULT and smooth_value is None:\n smooth_value = SMOOTH_VALUE_DEFAULT[smooth_method]\n\n precisions = [0 for x in range(NGRAM_ORDER)]\n\n smooth_mteval = 1.\n effective_order = NGRAM_ORDER\n for n in range(1, NGRAM_ORDER + 1):\n if smooth_method == 'add-k' and n > 1:\n correct[n-1] += smooth_value\n total[n-1] += smooth_value\n if total[n-1] == 0:\n break\n\n if use_effective_order:\n effective_order = n\n\n if correct[n-1] == 0:\n if smooth_method == 'exp':\n smooth_mteval *= 2\n precisions[n-1] = 100. / (smooth_mteval * total[n-1])\n elif smooth_method == 'floor':\n precisions[n-1] = 100. * smooth_value / total[n-1]\n else:\n precisions[n-1] = 100. * correct[n-1] / total[n-1]\n\n # If the system guesses no i-grams, 1 <= i <= NGRAM_ORDER, the BLEU score is 0 (technically undefined).\n # This is a problem for sentence-level BLEU or a corpus of short sentences, where systems will get no credit\n # if sentence lengths fall under the NGRAM_ORDER threshold. This fix scales NGRAM_ORDER to the observed\n # maximum order. It is only available through the API and off by default\n\n brevity_penalty = 1.0\n if sys_len < ref_len:\n brevity_penalty = math.exp(1 - ref_len / sys_len) if sys_len > 0 else 0.0\n\n score = brevity_penalty * math.exp(sum(map(my_log, precisions[:effective_order])) / effective_order)\n\n return BLEU(score, correct, total, precisions, brevity_penalty, sys_len, ref_len)\n\n\ndef sentence_bleu(hypothesis: str,\n references: List[str],\n smooth_method: str = 'floor',\n smooth_value: float = None,\n use_effective_order: bool = True) -> BLEU:\n \"\"\"\n Computes BLEU on a single sentence pair.\n\n Disclaimer: computing BLEU on the sentence level is not its intended use,\n BLEU is a corpus-level metric.\n\n :param hypothesis: Hypothesis string.\n :param reference: Reference string.\n :param smooth_value: For 'floor' smoothing, the floor value to use.\n :param use_effective_order: Account for references that are shorter than the largest n-gram.\n :return: Returns a single BLEU score as a float.\n \"\"\"\n bleu = corpus_bleu(hypothesis, references,\n smooth_method=smooth_method,\n smooth_value=smooth_value,\n use_effective_order=use_effective_order)\n return bleu\n\n\ndef corpus_bleu(sys_stream: Union[str, Iterable[str]],\n ref_streams: Union[str, List[Iterable[str]]],\n smooth_method='exp',\n smooth_value=None,\n force=False,\n lowercase=False,\n tokenize=DEFAULT_TOKENIZER,\n use_effective_order=False) -> BLEU:\n \"\"\"Produces BLEU scores along with its sufficient statistics from a source against one or more references.\n\n :param sys_stream: The system stream (a sequence of segments)\n :param ref_streams: A list of one or more reference streams (each a sequence of segments)\n :param smooth: The smoothing method to use\n :param smooth_value: For 'floor' smoothing, the floor to use\n :param force: Ignore data that looks already tokenized\n :param lowercase: Lowercase the data\n :param tokenize: The tokenizer to use\n :return: a BLEU object containing everything you'd want\n \"\"\"\n\n # Add some robustness to the input arguments\n if isinstance(sys_stream, str):\n sys_stream = [sys_stream]\n if isinstance(ref_streams, str):\n ref_streams = [[ref_streams]]\n\n sys_len = 0\n ref_len = 0\n\n correct = [0 for n in range(NGRAM_ORDER)]\n total = [0 for n in range(NGRAM_ORDER)]\n\n # look for already-tokenized sentences\n tokenized_count = 0\n\n fhs = [sys_stream] + ref_streams\n for lines in zip_longest(*fhs):\n if None in lines:\n raise EOFError(\"Source and reference streams have different lengths!\")\n\n if lowercase:\n lines = [x.lower() for x in lines]\n\n if not (force or tokenize == 'none') and lines[0].rstrip().endswith(' .'):\n tokenized_count += 1\n\n if tokenized_count == 100:\n sacrelogger.warning('That\\'s 100 lines that end in a tokenized period (\\'.\\')')\n sacrelogger.warning('It looks like you forgot to detokenize your test data, which may hurt your score.')\n sacrelogger.warning('If you insist your data is detokenized, or don\\'t care, you can suppress this message with \\'--force\\'.')\n\n output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]\n\n ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)\n\n sys_len += len(output.split())\n ref_len += closest_len\n\n sys_ngrams = extract_ngrams(output)\n for ngram in sys_ngrams.keys():\n n = len(ngram.split())\n correct[n-1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))\n total[n-1] += sys_ngrams[ngram]\n\n return compute_bleu(correct, total, sys_len, ref_len, smooth_method=smooth_method, smooth_value=smooth_value, use_effective_order=use_effective_order)\n\n\ndef raw_corpus_bleu(sys_stream,\n ref_streams,\n smooth_value=None) -> BLEU:\n \"\"\"Convenience function that wraps corpus_bleu().\n This is convenient if you're using sacrebleu as a library, say for scoring on dev.\n It uses no tokenization and 'floor' smoothing, with the floor default to 0 (no smoothing).\n\n :param sys_stream: the system stream (a sequence of segments)\n :param ref_streams: a list of one or more reference streams (each a sequence of segments)\n \"\"\"\n return corpus_bleu(sys_stream, ref_streams, smooth_method='floor', smooth_value=smooth_value, force=True, tokenize='none', use_effective_order=True)\n\n\ndef delete_whitespace(text: str) -> str:\n \"\"\"\n Removes whitespaces from text.\n \"\"\"\n return re.sub(r'\\s+', '', text).strip()\n\n\ndef get_sentence_statistics(hypothesis: str,\n reference: str,\n order: int = CHRF_ORDER,\n remove_whitespace: bool = True) -> List[float]:\n hypothesis = delete_whitespace(hypothesis) if remove_whitespace else hypothesis\n reference = delete_whitespace(reference) if remove_whitespace else reference\n statistics = [0] * (order * 3)\n for i in range(order):\n n = i + 1\n hypothesis_ngrams = extract_char_ngrams(hypothesis, n)\n reference_ngrams = extract_char_ngrams(reference, n)\n common_ngrams = hypothesis_ngrams & reference_ngrams\n statistics[3 * i + 0] = sum(hypothesis_ngrams.values())\n statistics[3 * i + 1] = sum(reference_ngrams.values())\n statistics[3 * i + 2] = sum(common_ngrams.values())\n return statistics\n\n\ndef get_corpus_statistics(hypotheses: Iterable[str],\n references: Iterable[str],\n order: int = CHRF_ORDER,\n remove_whitespace: bool = True) -> List[float]:\n corpus_statistics = [0] * (order * 3)\n for hypothesis, reference in zip(hypotheses, references):\n statistics = get_sentence_statistics(hypothesis, reference, order=order, remove_whitespace=remove_whitespace)\n for i in range(len(statistics)):\n corpus_statistics[i] += statistics[i]\n return corpus_statistics\n\n\ndef _avg_precision_and_recall(statistics: List[float], order: int) -> Tuple[float, float]:\n avg_precision = 0.0\n avg_recall = 0.0\n effective_order = 0\n for i in range(order):\n hypotheses_ngrams = statistics[3 * i + 0]\n references_ngrams = statistics[3 * i + 1]\n common_ngrams = statistics[3 * i + 2]\n if hypotheses_ngrams > 0 and references_ngrams > 0:\n avg_precision += common_ngrams / hypotheses_ngrams\n avg_recall += common_ngrams / references_ngrams\n effective_order += 1\n if effective_order == 0:\n return 0.0, 0.0\n avg_precision /= effective_order\n avg_recall /= effective_order\n return avg_precision, avg_recall\n\n\ndef _chrf(avg_precision, avg_recall, beta: int = CHRF_BETA) -> float:\n if avg_precision + avg_recall == 0:\n return 0.0\n beta_square = beta ** 2\n score = (1 + beta_square) * (avg_precision * avg_recall) / ((beta_square * avg_precision) + avg_recall)\n return score\n\n\ndef corpus_chrf(hypotheses: Iterable[str],\n references: Iterable[str],\n order: int = CHRF_ORDER,\n beta: float = CHRF_BETA,\n remove_whitespace: bool = True) -> CHRF:\n \"\"\"\n Computes Chrf on a corpus.\n\n :param hypotheses: Stream of hypotheses.\n :param references: Stream of references\n :param order: Maximum n-gram order.\n :param remove_whitespace: Whether to delete all whitespace from hypothesis and reference strings.\n :param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.\n :return: Chrf score.\n \"\"\"\n corpus_statistics = get_corpus_statistics(hypotheses, references, order=order, remove_whitespace=remove_whitespace)\n avg_precision, avg_recall = _avg_precision_and_recall(corpus_statistics, order)\n return CHRF(_chrf(avg_precision, avg_recall, beta=beta))\n\n\ndef sentence_chrf(hypothesis: str,\n reference: str,\n order: int = CHRF_ORDER,\n beta: float = CHRF_BETA,\n remove_whitespace: bool = True) -> CHRF:\n \"\"\"\n Computes ChrF on a single sentence pair.\n\n :param hypothesis: Hypothesis string.\n :param reference: Reference string.\n :param order: Maximum n-gram order.\n :param remove_whitespace: Whether to delete whitespaces from hypothesis and reference strings.\n :param beta: Defines importance of recall w.r.t precision. If beta=1, same importance.\n :return: Chrf score.\n \"\"\"\n statistics = get_sentence_statistics(hypothesis, reference, order=order, remove_whitespace=remove_whitespace)\n avg_precision, avg_recall = _avg_precision_and_recall(statistics, order)\n return CHRF(_chrf(avg_precision, avg_recall, beta=beta))\n\n\ndef get_langpairs_for_testset(testset: str) -> List:\n \"\"\"Return a list of language pairs for a given test set.\"\"\"\n return list(filter(lambda x: re.match('\\w\\w\\-\\w\\w', x), DATASETS.get(testset, {}).keys()))\n\n\ndef get_a_list_of_testset_names() -> str:\n \"\"\"Return a string with a formatted list of available test sets plus their descriptions. \"\"\"\n message = 'The available test sets are:'\n for testset in sorted(DATASETS.keys(), reverse=True):\n message += '\\n%20s: %s' % (testset, DATASETS[testset].get('description', ''))\n return message\n\n\ndef _available_origlangs(test_sets, langpair):\n \"\"\"Return a list of origlang values in according to the raw SGM files.\"\"\"\n origlangs = set()\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])\n if rawfile.endswith('.sgm'):\n with smart_open(rawfile) as fin:\n for line in fin:\n if line.startswith('<doc '):\n doc_origlang = re.sub(r'.* origlang=\"([^\"]+)\".*\\n', '\\\\1', line)\n origlangs.add(doc_origlang)\n return sorted(list(origlangs))\n\n\ndef _filter_subset(systems, test_sets, langpair, origlang, subset=None):\n \"\"\"Filter sentences with a given origlang (or subset) according to the raw SGM files.\"\"\"\n if origlang is None and subset is None:\n return systems\n if test_sets is None or langpair is None:\n raise ValueError('Filtering for --origlang or --subset needs a test (-t) and a language pair (-l).')\n\n indices_to_keep = []\n for test_set in test_sets.split(','):\n rawfile = os.path.join(SACREBLEU_DIR, test_set, 'raw', DATASETS[test_set][langpair][0])\n if not rawfile.endswith('.sgm'):\n raise Exception('--origlang and --subset supports only *.sgm files, not %s', rawfile)\n if subset is not None:\n if test_set not in SUBSETS:\n raise Exception('No subset annotation available for test set ' + test_set)\n doc_to_tags = SUBSETS[test_set]\n number_sentences_included = 0\n with smart_open(rawfile) as fin:\n include_doc = False\n for line in fin:\n if line.startswith('<doc '):\n if origlang is None:\n include_doc = True\n else:\n doc_origlang = re.sub(r'.* origlang=\"([^\"]+)\".*\\n', '\\\\1', line)\n if origlang.startswith('non-'):\n include_doc = doc_origlang != origlang[4:]\n else:\n include_doc = doc_origlang == origlang\n if subset is not None:\n doc_id = re.sub(r'.* docid=\"([^\"]+)\".*\\n', '\\\\1', line)\n if not re.search(subset, doc_to_tags.get(doc_id, '')):\n include_doc = False\n if line.startswith('<seg '):\n indices_to_keep.append(include_doc)\n number_sentences_included += 1 if include_doc else 0\n return [[sentence for sentence,keep in zip(sys, indices_to_keep) if keep] for sys in systems]\n\n\ndef main():\n args = parse_args()\n\n # Explicitly set the encoding\n sys.stdin = open(sys.stdin.fileno(), mode='r', encoding='utf-8', buffering=True, newline=\"\\n\")\n sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf-8', buffering=True)\n\n if not args.quiet:\n logging.basicConfig(level=logging.INFO, format='sacreBLEU: %(message)s')\n\n if args.download:\n download_test_set(args.download, args.langpair)\n sys.exit(0)\n\n if args.list:\n if args.test_set:\n print(' '.join(get_langpairs_for_testset(args.test_set)))\n else:\n print(get_a_list_of_testset_names())\n sys.exit(0)\n\n if args.sentence_level and len(args.metrics) > 1:\n sacrelogger.error('Only one metric can be used with Sentence-level reporting.')\n sys.exit(1)\n\n if args.citation:\n if not args.test_set:\n sacrelogger.error('I need a test set (-t).')\n sys.exit(1)\n for test_set in args.test_set.split(','):\n if 'citation' not in DATASETS[test_set]:\n sacrelogger.error('No citation found for %s', test_set)\n else:\n print(DATASETS[test_set]['citation'])\n sys.exit(0)\n\n if args.num_refs != 1 and (args.test_set is not None or len(args.refs) > 1):\n sacrelogger.error('The --num-refs argument allows you to provide any number of tab-delimited references in a single file.')\n sacrelogger.error('You can only use it with externaly-provided references, however (i.e., not with `-t`),')\n sacrelogger.error('and you cannot then provide multiple reference files.')\n sys.exit(1)\n\n if args.test_set is not None:\n for test_set in args.test_set.split(','):\n if test_set not in DATASETS:\n sacrelogger.error('Unknown test set \"%s\"\\n%s', test_set, get_a_list_of_testset_names())\n sys.exit(1)\n\n if args.test_set is None:\n if len(args.refs) == 0:\n sacrelogger.error('I need either a predefined test set (-t) or a list of references')\n sacrelogger.error(get_a_list_of_testset_names())\n sys.exit(1)\n elif len(args.refs) > 0:\n sacrelogger.error('I need exactly one of (a) a predefined test set (-t) or (b) a list of references')\n sys.exit(1)\n elif args.langpair is None:\n sacrelogger.error('I need a language pair (-l).')\n sys.exit(1)\n else:\n for test_set in args.test_set.split(','):\n if args.langpair not in DATASETS[test_set]:\n sacrelogger.error('No such language pair \"%s\"', args.langpair)\n sacrelogger.error('Available language pairs for test set \"%s\": %s', test_set,\n ', '.join(x for x in DATASETS[test_set].keys() if '-' in x))\n sys.exit(1)\n\n if args.echo:\n if args.langpair is None or args.test_set is None:\n sacrelogger.warning(\"--echo requires a test set (--t) and a language pair (-l)\")\n sys.exit(1)\n for test_set in args.test_set.split(','):\n print_test_set(test_set, args.langpair, args.echo, args.origlang, args.subset)\n sys.exit(0)\n\n if args.test_set is not None and args.tokenize == 'none':\n sacrelogger.warning(\"You are turning off sacrebleu's internal tokenization ('--tokenize none'), presumably to supply\\n\"\n \"your own reference tokenization. Published numbers will not be comparable with other papers.\\n\")\n\n # Internal tokenizer settings. Set to 'zh' for Chinese DEFAULT_TOKENIZER (\n if args.tokenize is None:\n # set default\n if args.langpair is not None and args.langpair.split('-')[1] == 'zh':\n args.tokenize = 'zh'\n elif args.langpair is not None and args.langpair.split('-')[1] == 'ja':\n args.tokenize = 'ja-mecab'\n else:\n args.tokenize = DEFAULT_TOKENIZER\n\n if args.langpair is not None and 'bleu' in args.metrics:\n if args.langpair.split('-')[1] == 'zh' and args.tokenize != 'zh':\n logger.warning('You should also pass \"--tok zh\" when scoring Chinese...')\n if args.langpair.split('-')[1] == 'ja' and not args.tokenize.startswith('ja-'):\n logger.warning('You should also pass \"--tok ja-mecab\" when scoring Japanese...')\n\n # concat_ref_files is a list of list of reference filenames, for example:\n # concat_ref_files = [[testset1_refA, testset1_refB], [testset2_refA, testset2_refB]]\n if args.test_set is None:\n concat_ref_files = [args.refs]\n else:\n concat_ref_files = []\n for test_set in args.test_set.split(','):\n _, *ref_files = download_test_set(test_set, args.langpair)\n if len(ref_files) == 0:\n sacrelogger.warning('No references found for test set {}/{}.'.format(test_set, args.langpair))\n concat_ref_files.append(ref_files)\n\n\n inputfh = io.TextIOWrapper(sys.stdin.buffer, encoding=args.encoding) if args.input == '-' else smart_open(args.input, encoding=args.encoding)\n full_system = inputfh.readlines()\n\n # Read references\n full_refs = [[] for x in range(max(len(concat_ref_files[0]), args.num_refs))]\n for ref_files in concat_ref_files:\n for refno, ref_file in enumerate(ref_files):\n for lineno, line in enumerate(smart_open(ref_file, encoding=args.encoding), 1):\n if args.num_refs != 1:\n splits = line.rstrip().split(sep='\\t', maxsplit=args.num_refs-1)\n if len(splits) != args.num_refs:\n sacrelogger.error('FATAL: line {}: expected {} fields, but found {}.'.format(lineno, args.num_refs, len(splits)))\n sys.exit(17)\n for refno, split in enumerate(splits):\n full_refs[refno].append(split)\n else:\n full_refs[refno].append(line)\n\n # Filter sentences according to a given origlang\n system, *refs = _filter_subset([full_system, *full_refs], args.test_set, args.langpair, args.origlang, args.subset)\n if len(system) == 0:\n message = 'Test set %s contains no sentence' % args.test_set\n if args.origlang is not None or args.subset is not None:\n message += ' with'\n message += '' if args.origlang is None else ' origlang=' + args.origlang\n message += '' if args.subset is None else ' subset=' + args.subset\n sacrelogger.error(message)\n exit(1)\n\n # Handle sentence level and quit\n if args.sentence_level:\n for output, *references in zip(system, *refs):\n results = []\n for metric in args.metrics:\n if metric == 'bleu':\n bleu = sentence_bleu(output,\n [[x] for x in references],\n smooth_method=args.smooth,\n smooth_value=args.smooth_value)\n results.append(bleu)\n if metric == 'chrf':\n chrf = sentence_chrf(output,\n references[0],\n args.chrf_order,\n args.chrf_beta,\n remove_whitespace=not args.chrf_whitespace)\n results.append(chrf)\n\n display_metric(args.metrics, results, len(refs), args)\n\n sys.exit(0)\n\n # Else, handle system level\n results = []\n try:\n for metric in args.metrics:\n if metric == 'bleu':\n bleu = corpus_bleu(system, refs, smooth_method=args.smooth, smooth_value=args.smooth_value, force=args.force, lowercase=args.lc, tokenize=args.tokenize)\n results.append(bleu)\n elif metric == 'chrf':\n chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta, order=args.chrf_order, remove_whitespace=not args.chrf_whitespace)\n results.append(chrf)\n except EOFError:\n sacrelogger.error('The input and reference stream(s) were of different lengths.')\n if args.test_set is not None:\n sacrelogger.error('\\nThis could be a problem with your system output or with sacreBLEU\\'s reference database.\\n'\n 'If the latter, you can clean out the references cache by typing:\\n'\n '\\n'\n ' rm -r %s/%s\\n'\n '\\n'\n 'They will be downloaded automatically again the next time you run sacreBLEU.', SACREBLEU_DIR,\n args.test_set)\n sys.exit(1)\n\n display_metric(args.metrics, results, len(refs), args)\n\n if args.detail:\n width = args.width\n sents_digits = len(str(len(full_system)))\n origlangs = args.origlang if args.origlang else _available_origlangs(args.test_set, args.langpair)\n for origlang in origlangs:\n subsets = [None]\n if args.subset is not None:\n subsets += [args.subset]\n elif all(t in SUBSETS for t in args.test_set.split(',')):\n subsets += COUNTRIES + DOMAINS\n for subset in subsets:\n system, *refs = _filter_subset([full_system, *full_refs], args.test_set, args.langpair, origlang, subset)\n if len(system) == 0:\n continue\n if subset in COUNTRIES:\n subset_str = '%20s' % ('country=' + subset)\n elif subset in DOMAINS:\n subset_str = '%20s' % ('domain=' + subset)\n else:\n subset_str = '%20s' % ''\n if 'bleu' in args.metrics:\n bleu = corpus_bleu(system, refs, smooth_method=args.smooth, smooth_value=args.smooth_value, force=args.force, lowercase=args.lc, tokenize=args.tokenize)\n print('origlang={} {}: sentences={:{}} BLEU={:{}.{}f}'.format(origlang, subset_str, len(system), sents_digits, bleu.score, width+4, width))\n if 'chrf' in args.metrics:\n chrf = corpus_chrf(system, refs[0], beta=args.chrf_beta, order=args.chrf_order, remove_whitespace=not args.chrf_whitespace)\n print('origlang={} {}: sentences={:{}} chrF={:{}.{}f}'.format(origlang, subset_str, len(system), sents_digits, chrf.score, width+4, width))\n\n\ndef display_metric(metrics_to_print, results, num_refs, args):\n \"\"\"\n Badly in need of refactoring.\n One idea is to put all of this in the BLEU and CHRF classes, and then define\n a Result::signature() function.\n \"\"\"\n for metric, result in zip(metrics_to_print, results):\n if metric == 'bleu':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = bleu_signature(args, num_refs)\n print(result.format(args.width).replace('BLEU', 'BLEU+' + version_str))\n\n elif metric == 'chrf':\n if args.score_only:\n print('{0:.{1}f}'.format(result.score, args.width))\n else:\n version_str = chrf_signature(args, num_refs)\n print('chrF{0:d}+{1} = {2:.{3}f}'.format(args.chrf_beta, version_str, result.score, args.width))\n\n\ndef parse_args():\n arg_parser = argparse.ArgumentParser(\n description='sacreBLEU: Hassle-free computation of shareable BLEU scores.\\n'\n 'Quick usage: score your detokenized output against WMT\\'14 EN-DE:\\n'\n ' cat output.detok.de | sacrebleu -t wmt14 -l en-de',\n # epilog = 'Available test sets: ' + ','.join(sorted(DATASETS.keys(), reverse=True)),\n formatter_class=argparse.RawDescriptionHelpFormatter)\n arg_parser.add_argument('--test-set', '-t', type=str, default=None,\n help='the test set to use (see also --list) or a comma-separated list of test sets to be concatenated')\n arg_parser.add_argument('-lc', action='store_true', default=False,\n help='Use case-insensitive BLEU (default: actual case)')\n arg_parser.add_argument('--sentence-level', '-sl', action='store_true',\n help='Output metric on each sentence.')\n arg_parser.add_argument('--smooth', '-s', choices=['exp', 'floor', 'add-k', 'none'],\n default='exp',\n help='smoothing method: exponential decay (default), floor (increment zero counts), add-k (increment num/denom by k for n>1), or none')\n arg_parser.add_argument('--smooth-value', '-sv', type=float, default=None,\n help='The value to pass to the smoothing technique, only used for floor and add-k. Default floor: {}, add-k: {}.'.format(\n SMOOTH_VALUE_DEFAULT['floor'], SMOOTH_VALUE_DEFAULT['add-k']))\n arg_parser.add_argument('--tokenize', '-tok', choices=TOKENIZERS.keys(), default=None,\n help='tokenization method to use')\n arg_parser.add_argument('--language-pair', '-l', dest='langpair', default=None,\n help='source-target language pair (2-char ISO639-1 codes)')\n arg_parser.add_argument('--origlang', '-ol', dest='origlang', default=None,\n help='use a subset of sentences with a given original language (2-char ISO639-1 codes), \"non-\" prefix means negation')\n arg_parser.add_argument('--subset', dest='subset', default=None,\n help='use a subset of sentences whose document annotation matches a give regex (see SUBSETS in the source code)')\n arg_parser.add_argument('--download', type=str, default=None,\n help='download a test set and quit')\n arg_parser.add_argument('--echo', choices=['src', 'ref', 'both'], type=str, default=None,\n help='output the source (src), reference (ref), or both (both, pasted) to STDOUT and quit')\n arg_parser.add_argument('--input', '-i', type=str, default='-',\n help='Read input from a file instead of STDIN')\n arg_parser.add_argument('--num-refs', '-nr', type=int, default=1,\n help='Split the reference stream on tabs, and expect this many references. Default: %(default)s.')\n arg_parser.add_argument('refs', nargs='*', default=[],\n help='optional list of references (for backwards-compatibility with older scripts)')\n arg_parser.add_argument('--metrics', '-m', choices=['bleu', 'chrf'], nargs='+',\n default=['bleu'],\n help='metrics to compute (default: bleu)')\n arg_parser.add_argument('--chrf-order', type=int, default=CHRF_ORDER,\n help='chrf character order (default: %(default)s)')\n arg_parser.add_argument('--chrf-beta', type=int, default=CHRF_BETA,\n help='chrf BETA parameter (default: %(default)s)')\n arg_parser.add_argument('--chrf-whitespace', action='store_true', default=False,\n help='include whitespace in chrF calculation (default: %(default)s)')\n arg_parser.add_argument('--short', default=False, action='store_true',\n help='produce a shorter (less human readable) signature')\n arg_parser.add_argument('--score-only', '-b', default=False, action='store_true',\n help='output only the BLEU score')\n arg_parser.add_argument('--force', default=False, action='store_true',\n help='insist that your tokenized input is actually detokenized')\n arg_parser.add_argument('--quiet', '-q', default=False, action='store_true',\n help='suppress informative output')\n arg_parser.add_argument('--encoding', '-e', type=str, default='utf-8',\n help='open text files with specified encoding (default: %(default)s)')\n arg_parser.add_argument('--list', default=False, action='store_true',\n help='print a list of all available test sets.')\n arg_parser.add_argument('--citation', '--cite', default=False, action='store_true',\n help='dump the bibtex citation and quit.')\n arg_parser.add_argument('--width', '-w', type=int, default=1,\n help='floating point width (default: %(default)s)')\n arg_parser.add_argument('--detail', '-d', default=False, action='store_true',\n help='print extra information (split test sets based on origlang)')\n arg_parser.add_argument('-V', '--version', action='version',\n version='%(prog)s {}'.format(VERSION))\n args = arg_parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
27,
29,
31,
37,
42
]
}
|
[
27,
29,
31,
37,
42
] |
class BinarySearchTreeNode:
def __init__(self, node_data):
self.data = node_data
self.left = None
self.right = None
def bst_contains(root: BinarySearchTreeNode, number):
if root is None:
return 0
if(root.data == number):
return 1
elif(root.data < number):
#si int es mas grande que el data actual, buscas en derecha
#-----------return es importantitismo------------
return bst_contains(root.right, number)
elif(root.data > number):
#si int es mas pequeno que el data actual, buscas en derecha
#-----------return es importantitismo------------
return bst_contains(root.left, number)
|
normal
|
{
"blob_id": "3bdf3a48451b83347a6c9a9851b5b85b608f0b63",
"index": 2826,
"step-1": "<mask token>\n",
"step-2": "class BinarySearchTreeNode:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class BinarySearchTreeNode:\n\n def __init__(self, node_data):\n self.data = node_data\n self.left = None\n self.right = None\n\n\n<mask token>\n",
"step-4": "class BinarySearchTreeNode:\n\n def __init__(self, node_data):\n self.data = node_data\n self.left = None\n self.right = None\n\n\ndef bst_contains(root: BinarySearchTreeNode, number):\n if root is None:\n return 0\n if root.data == number:\n return 1\n elif root.data < number:\n return bst_contains(root.right, number)\n elif root.data > number:\n return bst_contains(root.left, number)\n",
"step-5": "class BinarySearchTreeNode:\n def __init__(self, node_data):\n self.data = node_data\n self.left = None\n self.right = None\n\ndef bst_contains(root: BinarySearchTreeNode, number):\n if root is None:\n return 0\n\n if(root.data == number):\n return 1\n elif(root.data < number):\n #si int es mas grande que el data actual, buscas en derecha\n #-----------return es importantitismo------------\n return bst_contains(root.right, number)\n\n elif(root.data > number):\n #si int es mas pequeno que el data actual, buscas en derecha\n #-----------return es importantitismo------------\n return bst_contains(root.left, number)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding:utf-8 -*-
from odoo import api, models, fields, _
class hrsalaryRule(models.Model):
_inherit = "hr.salary.rule"
is_tax_fdfp = fields.Boolean("Est un impôt FDFP")
|
normal
|
{
"blob_id": "097a87f7f1346e5db1599e59680232912348aef7",
"index": 311,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass hrsalaryRule(models.Model):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass hrsalaryRule(models.Model):\n _inherit = 'hr.salary.rule'\n is_tax_fdfp = fields.Boolean('Est un impôt FDFP')\n",
"step-4": "from odoo import api, models, fields, _\n\n\nclass hrsalaryRule(models.Model):\n _inherit = 'hr.salary.rule'\n is_tax_fdfp = fields.Boolean('Est un impôt FDFP')\n",
"step-5": "# -*- coding:utf-8 -*-\r\n\r\nfrom odoo import api, models, fields, _\r\n\r\n\r\nclass hrsalaryRule(models.Model):\r\n _inherit = \"hr.salary.rule\"\r\n\r\n is_tax_fdfp = fields.Boolean(\"Est un impôt FDFP\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from concurrent import futures
import time
import math
import logging
import grpc
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
import sys
sys.path.append('/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')
import cv2
from PRNet.utils.cv_plot import plot_kpt, plot_vertices
import pymesh
import threading
from Queue import Queue
from tensorflow.python.framework import tensor_util
import numpy as np
import pyaudio
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=22500,
output=True)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (10,350)
fontScale = 1
fontColor = (255,255,255)
lineType = 2
subtitles = Queue()
q = Queue()
def worker():
display_subtitle = ""
while True:
item = q.get()
image = np.zeros((480, 640))
if item is not None:
vertices = item
show_img = plot_vertices(np.zeros_like(image), vertices)
else:
show_img = image
# Display the resulting frame
if not subtitles.empty():
text = subtitles.get()
subtitles.task_done()
display_subtitle = text
cv2.putText(show_img,display_subtitle,
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
cv2.imshow('frame',show_img)
# Press Q on keyboard to stop recording
if cv2.waitKey(1) & 0xFF == ord('q'):
break
q.task_done()
class FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):
def Predict(self, request, context):
"""Predict -- provides access to loaded TensorFlow model.
"""
global q
global stream
if "vertices" in request.inputs:
print("vertices")
vertices = tensor_util.MakeNdarray(request.inputs["vertices"])
q.put(vertices)
elif "audio" in request.inputs:
print('audio')
# audio = tensor_util.MakeNdarray(request.inputs['audio'])
print(type(request.inputs['audio'].string_val[0]))
audio = request.inputs['audio'].string_val[0]
# print(request.inputs['audio'])
stream.write(audio)
elif "subtitle" in request.inputs:
print('subtitle')
subtitles.put(request.inputs['subtitle'].string_val[0])
dumbresult = predict_pb2.PredictResponse()
dumbresult.outputs["message"].CopyFrom(tf.make_tensor_proto("OK"))
return dumbresult
def serve():
t = threading.Thread(target=worker)
t.daemon = True
t.start()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(
FakeServer(), server)
server.add_insecure_port('[::]:50051')
server.start()
# server.wait_for_termination()
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
stream.stop_stream()
stream.close()
p.terminate()
q.join() # block until all tasks are donet
subtitles.join()
if __name__ == '__main__':
logging.basicConfig()
serve()
|
normal
|
{
"blob_id": "0ec5d6ce11851a577046cf73cf98c91b6dfb9f67",
"index": 1550,
"step-1": "<mask token>\n\n\ndef worker():\n display_subtitle = ''\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,\n font, fontScale, fontColor, lineType)\n cv2.imshow('frame', show_img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n q.task_done()\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if 'vertices' in request.inputs:\n print('vertices')\n vertices = tensor_util.MakeNdarray(request.inputs['vertices'])\n q.put(vertices)\n elif 'audio' in request.inputs:\n print('audio')\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n stream.write(audio)\n elif 'subtitle' in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))\n return dumbresult\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(\n '/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')\n<mask token>\n\n\ndef worker():\n display_subtitle = ''\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,\n font, fontScale, fontColor, lineType)\n cv2.imshow('frame', show_img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n q.task_done()\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if 'vertices' in request.inputs:\n print('vertices')\n vertices = tensor_util.MakeNdarray(request.inputs['vertices'])\n q.put(vertices)\n elif 'audio' in request.inputs:\n print('audio')\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n stream.write(audio)\n elif 'subtitle' in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))\n return dumbresult\n\n\ndef serve():\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(\n FakeServer(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n _ONE_DAY_IN_SECONDS = 60 * 60 * 24\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n stream.stop_stream()\n stream.close()\n p.terminate()\n q.join()\n subtitles.join()\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n serve()\n",
"step-3": "<mask token>\nsys.path.append(\n '/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')\n<mask token>\np = pyaudio.PyAudio()\nstream = p.open(format=pyaudio.paInt16, channels=1, rate=22500, output=True)\nfont = cv2.FONT_HERSHEY_SIMPLEX\nbottomLeftCornerOfText = 10, 350\nfontScale = 1\nfontColor = 255, 255, 255\nlineType = 2\nsubtitles = Queue()\nq = Queue()\n\n\ndef worker():\n display_subtitle = ''\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,\n font, fontScale, fontColor, lineType)\n cv2.imshow('frame', show_img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n q.task_done()\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if 'vertices' in request.inputs:\n print('vertices')\n vertices = tensor_util.MakeNdarray(request.inputs['vertices'])\n q.put(vertices)\n elif 'audio' in request.inputs:\n print('audio')\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n stream.write(audio)\n elif 'subtitle' in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))\n return dumbresult\n\n\ndef serve():\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(\n FakeServer(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n _ONE_DAY_IN_SECONDS = 60 * 60 * 24\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n stream.stop_stream()\n stream.close()\n p.terminate()\n q.join()\n subtitles.join()\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n serve()\n",
"step-4": "from concurrent import futures\nimport time\nimport math\nimport logging\nimport grpc\nimport tensorflow as tf\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\nimport sys\nsys.path.append(\n '/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')\nimport cv2\nfrom PRNet.utils.cv_plot import plot_kpt, plot_vertices\nimport pymesh\nimport threading\nfrom Queue import Queue\nfrom tensorflow.python.framework import tensor_util\nimport numpy as np\nimport pyaudio\np = pyaudio.PyAudio()\nstream = p.open(format=pyaudio.paInt16, channels=1, rate=22500, output=True)\nfont = cv2.FONT_HERSHEY_SIMPLEX\nbottomLeftCornerOfText = 10, 350\nfontScale = 1\nfontColor = 255, 255, 255\nlineType = 2\nsubtitles = Queue()\nq = Queue()\n\n\ndef worker():\n display_subtitle = ''\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img, display_subtitle, bottomLeftCornerOfText,\n font, fontScale, fontColor, lineType)\n cv2.imshow('frame', show_img)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n q.task_done()\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if 'vertices' in request.inputs:\n print('vertices')\n vertices = tensor_util.MakeNdarray(request.inputs['vertices'])\n q.put(vertices)\n elif 'audio' in request.inputs:\n print('audio')\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n stream.write(audio)\n elif 'subtitle' in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs['message'].CopyFrom(tf.make_tensor_proto('OK'))\n return dumbresult\n\n\ndef serve():\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(\n FakeServer(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n _ONE_DAY_IN_SECONDS = 60 * 60 * 24\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n stream.stop_stream()\n stream.close()\n p.terminate()\n q.join()\n subtitles.join()\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n serve()\n",
"step-5": "from concurrent import futures\nimport time\nimport math\nimport logging\n\nimport grpc\nimport tensorflow as tf\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_service_pb2_grpc\n\nimport sys\nsys.path.append('/home/yitao/Documents/fun-project/tensorflow-related/miniature-winner/')\n\nimport cv2\nfrom PRNet.utils.cv_plot import plot_kpt, plot_vertices\nimport pymesh\nimport threading\nfrom Queue import Queue\nfrom tensorflow.python.framework import tensor_util\nimport numpy as np\n\n\nimport pyaudio\n\np = pyaudio.PyAudio()\n\nstream = p.open(format=pyaudio.paInt16,\n channels=1,\n rate=22500,\n output=True)\n\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\nbottomLeftCornerOfText = (10,350)\nfontScale = 1\nfontColor = (255,255,255)\nlineType = 2\n\n\nsubtitles = Queue()\n\nq = Queue()\ndef worker():\n display_subtitle = \"\"\n while True:\n item = q.get()\n image = np.zeros((480, 640))\n if item is not None:\n vertices = item\n show_img = plot_vertices(np.zeros_like(image), vertices)\n else:\n show_img = image \n # Display the resulting frame\n\n if not subtitles.empty():\n text = subtitles.get()\n subtitles.task_done()\n display_subtitle = text\n cv2.putText(show_img,display_subtitle, \n bottomLeftCornerOfText, \n font, \n fontScale,\n fontColor,\n lineType)\n cv2.imshow('frame',show_img)\n\n\n # Press Q on keyboard to stop recording\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break \n q.task_done()\n\n\n\n\nclass FakeServer(prediction_service_pb2_grpc.PredictionServiceServicer):\n def Predict(self, request, context):\n \"\"\"Predict -- provides access to loaded TensorFlow model.\n \"\"\"\n global q\n global stream\n if \"vertices\" in request.inputs:\n print(\"vertices\")\n vertices = tensor_util.MakeNdarray(request.inputs[\"vertices\"])\n q.put(vertices)\n elif \"audio\" in request.inputs:\n print('audio')\n # audio = tensor_util.MakeNdarray(request.inputs['audio'])\n print(type(request.inputs['audio'].string_val[0]))\n audio = request.inputs['audio'].string_val[0]\n # print(request.inputs['audio'])\n stream.write(audio)\n elif \"subtitle\" in request.inputs:\n print('subtitle')\n subtitles.put(request.inputs['subtitle'].string_val[0])\n\n\n\n\n dumbresult = predict_pb2.PredictResponse()\n dumbresult.outputs[\"message\"].CopyFrom(tf.make_tensor_proto(\"OK\"))\n return dumbresult\n\n\n\ndef serve():\n\n t = threading.Thread(target=worker)\n t.daemon = True\n t.start()\n\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n prediction_service_pb2_grpc.add_PredictionServiceServicer_to_server(\n FakeServer(), server)\n server.add_insecure_port('[::]:50051')\n server.start()\n # server.wait_for_termination()\n\n\n\n\n _ONE_DAY_IN_SECONDS = 60 * 60 * 24\n\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n\n stream.stop_stream()\n stream.close()\n\n p.terminate()\n q.join() # block until all tasks are donet\n subtitles.join()\n\nif __name__ == '__main__':\n logging.basicConfig()\n serve()\n\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import csv as csv
import hashlib
from sets import Set
def func_hash(parameter):
hash_object = hashlib.sha384(parameter)
table_hash = hash_object.hexdigest()
return table_hash
def myFunk():
with open('users.csv', 'w') as fp:
a = csv.writer(fp, delimiter=',')
roles = ['inspector', 'admin']
data = [['Userneme', 'hash_password', 'role'],
['Olya', func_hash('Olya'), 'admin'],
['Stas', func_hash('Stas'), 'admin'],
['Dima', func_hash('Dima'), 'admin'],
['Kyrylo', func_hash('Kyrylo'), 'admin'],
['Lubchyk', func_hash('Lubchyk'), 'inspector'],
['Sashko', func_hash('Sashko'),roles],
]
a.writerows(data)
myFunk()
|
normal
|
{
"blob_id": "96d13a883590ca969e997bbb27bcdbee1b24252f",
"index": 2730,
"step-1": "<mask token>\n\n\ndef myFunk():\n with open('users.csv', 'w') as fp:\n a = csv.writer(fp, delimiter=',')\n roles = ['inspector', 'admin']\n data = [['Userneme', 'hash_password', 'role'], ['Olya', func_hash(\n 'Olya'), 'admin'], ['Stas', func_hash('Stas'), 'admin'], [\n 'Dima', func_hash('Dima'), 'admin'], ['Kyrylo', func_hash(\n 'Kyrylo'), 'admin'], ['Lubchyk', func_hash('Lubchyk'),\n 'inspector'], ['Sashko', func_hash('Sashko'), roles]]\n a.writerows(data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef func_hash(parameter):\n hash_object = hashlib.sha384(parameter)\n table_hash = hash_object.hexdigest()\n return table_hash\n\n\ndef myFunk():\n with open('users.csv', 'w') as fp:\n a = csv.writer(fp, delimiter=',')\n roles = ['inspector', 'admin']\n data = [['Userneme', 'hash_password', 'role'], ['Olya', func_hash(\n 'Olya'), 'admin'], ['Stas', func_hash('Stas'), 'admin'], [\n 'Dima', func_hash('Dima'), 'admin'], ['Kyrylo', func_hash(\n 'Kyrylo'), 'admin'], ['Lubchyk', func_hash('Lubchyk'),\n 'inspector'], ['Sashko', func_hash('Sashko'), roles]]\n a.writerows(data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef func_hash(parameter):\n hash_object = hashlib.sha384(parameter)\n table_hash = hash_object.hexdigest()\n return table_hash\n\n\ndef myFunk():\n with open('users.csv', 'w') as fp:\n a = csv.writer(fp, delimiter=',')\n roles = ['inspector', 'admin']\n data = [['Userneme', 'hash_password', 'role'], ['Olya', func_hash(\n 'Olya'), 'admin'], ['Stas', func_hash('Stas'), 'admin'], [\n 'Dima', func_hash('Dima'), 'admin'], ['Kyrylo', func_hash(\n 'Kyrylo'), 'admin'], ['Lubchyk', func_hash('Lubchyk'),\n 'inspector'], ['Sashko', func_hash('Sashko'), roles]]\n a.writerows(data)\n\n\nmyFunk()\n",
"step-4": "import csv as csv\nimport hashlib\nfrom sets import Set\n\n\ndef func_hash(parameter):\n hash_object = hashlib.sha384(parameter)\n table_hash = hash_object.hexdigest()\n return table_hash\n\n\ndef myFunk():\n with open('users.csv', 'w') as fp:\n a = csv.writer(fp, delimiter=',')\n roles = ['inspector', 'admin']\n data = [['Userneme', 'hash_password', 'role'], ['Olya', func_hash(\n 'Olya'), 'admin'], ['Stas', func_hash('Stas'), 'admin'], [\n 'Dima', func_hash('Dima'), 'admin'], ['Kyrylo', func_hash(\n 'Kyrylo'), 'admin'], ['Lubchyk', func_hash('Lubchyk'),\n 'inspector'], ['Sashko', func_hash('Sashko'), roles]]\n a.writerows(data)\n\n\nmyFunk()\n",
"step-5": "import csv as csv\nimport hashlib\nfrom sets import Set\n\ndef func_hash(parameter):\n hash_object = hashlib.sha384(parameter)\n table_hash = hash_object.hexdigest()\n return table_hash\n\ndef myFunk():\n\twith open('users.csv', 'w') as fp:\n\t a = csv.writer(fp, delimiter=',')\n\t roles = ['inspector', 'admin']\n\t data = [['Userneme', 'hash_password', 'role'],\n\t ['Olya', func_hash('Olya'), 'admin'],\n\t ['Stas', func_hash('Stas'), 'admin'],\n\t ['Dima', func_hash('Dima'), 'admin'],\n\t ['Kyrylo', func_hash('Kyrylo'), 'admin'],\n\t ['Lubchyk', func_hash('Lubchyk'), 'inspector'],\n\t ['Sashko', func_hash('Sashko'),roles],\n\t ]\n\t a.writerows(data)\n\nmyFunk()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import pymongo
import pandas as pd
import re
from pymongo import MongoClient
from nltk.corpus import stopwords
from nltk import word_tokenize
from gensim import corpora
import pickle
client = MongoClient()
db = client.redditCrawler
collection = db.data_test1
def remove_posts(data, index_list):
data = data.drop(index_list)
return data.reset_index(drop=True)
data = pd.DataFrame(list(collection.find()))
mod_posts = [i for i in range(len(data)) if 'moronic Monday' in data['title'][i]]
#remove all the mod posts that include 'moronic Monday'
data = remove_posts(data, mod_posts)
titles = data['title']
content = data['post']
comments = data['comments']
# collect only the comments without vote scores, dates, etc
comments_in_thread = []
for index, thread in enumerate(comments):
aggregate = []
for comment in thread:
if type(comment['comment_reply']) == str:
aggregate.append(comment['comment_reply'].lower())
comments_in_thread.append(aggregate)
comments = comments_in_thread
#number of titles and post need to be the same
assert len(titles) == len(content)
assert len(comments) == len(content)
#preprocess
stop_words = stopwords.words('english')
stop_words.extend(['would',
'people',
'money',
'think',
'thinks',
'thanks',
'thing',
'things',
'ok',
'nt',
'actually',
'like',
'get',
'even',
'could',
'also',
])
#Function to clean off each dataset item; stop words (what, if, is, where, how, I, she)
def preprocess(text):
#no content/nan/len of 0
#text = [re.sub('[^a-zA-Z0-9]+', ' ', word) for word in text]
text = text.lower()
text = text.replace('$', ' ')
text = text.replace('-', ' ')
text = text.replace("/", ' ')
text = text.replace(".", ' ')
text = word_tokenize(text)
## text = [re.sub('[^a-zA-Z0-9]+', '', word) for word in text]
text = [word for word in text if word not in stop_words]
text = [word for word in text if word.isalpha()]
return text
#pass titles and comments through pre-processor
titles = [preprocess(title) for title in titles]
posts = [preprocess(text) for text in content]
# process comments
##comments = [[preprocess(comment) for comment in thread] for thread in comments]
temp = []
for i, thread in enumerate(comments):
temp_thread = []
temp_thread.extend(titles[i])
for comment in thread:
temp_thread.extend(preprocess(comment))
temp.append(temp_thread)
comments = temp
# form a list of dictionaries for each title, compile
# each word and its corresponding frequencies in the post's comment section
list_of_dict = []
for index, title in enumerate(titles):
text = ''
bag_of_words = set(title)
text = ' '.join(comments_in_thread[index])
## text = comments[index]
dictionary = {word:text.count(word) for word in bag_of_words if text.count(word) > 0}
list_of_dict.append(dictionary)
title_keywords = [list(Dict.keys()) if len(Dict) > 0 else [0] for Dict in list_of_dict]
title_keywords = [word for sublist in title_keywords for word in sublist if word != 0 ]
title_keywords = set(title_keywords)
##title_keywords = set(title_keywords)
##count the number of keywords in the comment section
def count_keywords(comments, keywords):
## sample = ' '.join(comments).split()
return {word: comments.count(word) for word in keywords if comments.count(word) > 0}
keyword_dict = [count_keywords(comment, title_keywords) for comment in comments]
for index, thread in enumerate(keyword_dict):
#normalize each keyword by the number of words present
df = pd.DataFrame()
df['word'] = thread.keys()
df['count'] = thread.values()
df = df.sort_values('count', ascending = False)
#dividing by number of words in each thread
## df['frequency'] = df['count']/(len(comments[index]))
df['frequency'] = df['count']/(1+len(comments_in_thread[index]))
df['count'] = df['count']/(len(comments[index]))**0.5
keyword_dict[index] = df.reset_index(drop=True)
#save varialbes
variables = [data['title'], titles, posts, comments, comments_in_thread,
list_of_dict, title_keywords, keyword_dict]
with open('variables.txt', 'wb') as fp:
pickle.dump(variables, fp)
|
normal
|
{
"blob_id": "341fb4442ba1d1bb13dbbe123e1051e1ceeb91e7",
"index": 4431,
"step-1": "<mask token>\n\n\ndef remove_posts(data, index_list):\n data = data.drop(index_list)\n return data.reset_index(drop=True)\n\n\n<mask token>\n\n\ndef preprocess(text):\n text = text.lower()\n text = text.replace('$', ' ')\n text = text.replace('-', ' ')\n text = text.replace('/', ' ')\n text = text.replace('.', ' ')\n text = word_tokenize(text)\n text = [word for word in text if word not in stop_words]\n text = [word for word in text if word.isalpha()]\n return text\n\n\n<mask token>\n\n\ndef count_keywords(comments, keywords):\n return {word: comments.count(word) for word in keywords if comments.\n count(word) > 0}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef remove_posts(data, index_list):\n data = data.drop(index_list)\n return data.reset_index(drop=True)\n\n\n<mask token>\nfor index, thread in enumerate(comments):\n aggregate = []\n for comment in thread:\n if type(comment['comment_reply']) == str:\n aggregate.append(comment['comment_reply'].lower())\n comments_in_thread.append(aggregate)\n<mask token>\nassert len(titles) == len(content)\nassert len(comments) == len(content)\n<mask token>\nstop_words.extend(['would', 'people', 'money', 'think', 'thinks', 'thanks',\n 'thing', 'things', 'ok', 'nt', 'actually', 'like', 'get', 'even',\n 'could', 'also'])\n\n\ndef preprocess(text):\n text = text.lower()\n text = text.replace('$', ' ')\n text = text.replace('-', ' ')\n text = text.replace('/', ' ')\n text = text.replace('.', ' ')\n text = word_tokenize(text)\n text = [word for word in text if word not in stop_words]\n text = [word for word in text if word.isalpha()]\n return text\n\n\n<mask token>\nfor i, thread in enumerate(comments):\n temp_thread = []\n temp_thread.extend(titles[i])\n for comment in thread:\n temp_thread.extend(preprocess(comment))\n temp.append(temp_thread)\n<mask token>\nfor index, title in enumerate(titles):\n text = ''\n bag_of_words = set(title)\n text = ' '.join(comments_in_thread[index])\n dictionary = {word: text.count(word) for word in bag_of_words if text.\n count(word) > 0}\n list_of_dict.append(dictionary)\n<mask token>\n\n\ndef count_keywords(comments, keywords):\n return {word: comments.count(word) for word in keywords if comments.\n count(word) > 0}\n\n\n<mask token>\nfor index, thread in enumerate(keyword_dict):\n df = pd.DataFrame()\n df['word'] = thread.keys()\n df['count'] = thread.values()\n df = df.sort_values('count', ascending=False)\n df['frequency'] = df['count'] / (1 + len(comments_in_thread[index]))\n df['count'] = df['count'] / len(comments[index]) ** 0.5\n keyword_dict[index] = df.reset_index(drop=True)\n<mask token>\nwith open('variables.txt', 'wb') as fp:\n pickle.dump(variables, fp)\n",
"step-3": "<mask token>\nclient = MongoClient()\ndb = client.redditCrawler\ncollection = db.data_test1\n\n\ndef remove_posts(data, index_list):\n data = data.drop(index_list)\n return data.reset_index(drop=True)\n\n\ndata = pd.DataFrame(list(collection.find()))\nmod_posts = [i for i in range(len(data)) if 'moronic Monday' in data[\n 'title'][i]]\ndata = remove_posts(data, mod_posts)\ntitles = data['title']\ncontent = data['post']\ncomments = data['comments']\ncomments_in_thread = []\nfor index, thread in enumerate(comments):\n aggregate = []\n for comment in thread:\n if type(comment['comment_reply']) == str:\n aggregate.append(comment['comment_reply'].lower())\n comments_in_thread.append(aggregate)\ncomments = comments_in_thread\nassert len(titles) == len(content)\nassert len(comments) == len(content)\nstop_words = stopwords.words('english')\nstop_words.extend(['would', 'people', 'money', 'think', 'thinks', 'thanks',\n 'thing', 'things', 'ok', 'nt', 'actually', 'like', 'get', 'even',\n 'could', 'also'])\n\n\ndef preprocess(text):\n text = text.lower()\n text = text.replace('$', ' ')\n text = text.replace('-', ' ')\n text = text.replace('/', ' ')\n text = text.replace('.', ' ')\n text = word_tokenize(text)\n text = [word for word in text if word not in stop_words]\n text = [word for word in text if word.isalpha()]\n return text\n\n\ntitles = [preprocess(title) for title in titles]\nposts = [preprocess(text) for text in content]\ntemp = []\nfor i, thread in enumerate(comments):\n temp_thread = []\n temp_thread.extend(titles[i])\n for comment in thread:\n temp_thread.extend(preprocess(comment))\n temp.append(temp_thread)\ncomments = temp\nlist_of_dict = []\nfor index, title in enumerate(titles):\n text = ''\n bag_of_words = set(title)\n text = ' '.join(comments_in_thread[index])\n dictionary = {word: text.count(word) for word in bag_of_words if text.\n count(word) > 0}\n list_of_dict.append(dictionary)\ntitle_keywords = [(list(Dict.keys()) if len(Dict) > 0 else [0]) for Dict in\n list_of_dict]\ntitle_keywords = [word for sublist in title_keywords for word in sublist if\n word != 0]\ntitle_keywords = set(title_keywords)\n\n\ndef count_keywords(comments, keywords):\n return {word: comments.count(word) for word in keywords if comments.\n count(word) > 0}\n\n\nkeyword_dict = [count_keywords(comment, title_keywords) for comment in comments\n ]\nfor index, thread in enumerate(keyword_dict):\n df = pd.DataFrame()\n df['word'] = thread.keys()\n df['count'] = thread.values()\n df = df.sort_values('count', ascending=False)\n df['frequency'] = df['count'] / (1 + len(comments_in_thread[index]))\n df['count'] = df['count'] / len(comments[index]) ** 0.5\n keyword_dict[index] = df.reset_index(drop=True)\nvariables = [data['title'], titles, posts, comments, comments_in_thread,\n list_of_dict, title_keywords, keyword_dict]\nwith open('variables.txt', 'wb') as fp:\n pickle.dump(variables, fp)\n",
"step-4": "import pymongo\nimport pandas as pd\nimport re\nfrom pymongo import MongoClient\nfrom nltk.corpus import stopwords\nfrom nltk import word_tokenize\nfrom gensim import corpora\nimport pickle\nclient = MongoClient()\ndb = client.redditCrawler\ncollection = db.data_test1\n\n\ndef remove_posts(data, index_list):\n data = data.drop(index_list)\n return data.reset_index(drop=True)\n\n\ndata = pd.DataFrame(list(collection.find()))\nmod_posts = [i for i in range(len(data)) if 'moronic Monday' in data[\n 'title'][i]]\ndata = remove_posts(data, mod_posts)\ntitles = data['title']\ncontent = data['post']\ncomments = data['comments']\ncomments_in_thread = []\nfor index, thread in enumerate(comments):\n aggregate = []\n for comment in thread:\n if type(comment['comment_reply']) == str:\n aggregate.append(comment['comment_reply'].lower())\n comments_in_thread.append(aggregate)\ncomments = comments_in_thread\nassert len(titles) == len(content)\nassert len(comments) == len(content)\nstop_words = stopwords.words('english')\nstop_words.extend(['would', 'people', 'money', 'think', 'thinks', 'thanks',\n 'thing', 'things', 'ok', 'nt', 'actually', 'like', 'get', 'even',\n 'could', 'also'])\n\n\ndef preprocess(text):\n text = text.lower()\n text = text.replace('$', ' ')\n text = text.replace('-', ' ')\n text = text.replace('/', ' ')\n text = text.replace('.', ' ')\n text = word_tokenize(text)\n text = [word for word in text if word not in stop_words]\n text = [word for word in text if word.isalpha()]\n return text\n\n\ntitles = [preprocess(title) for title in titles]\nposts = [preprocess(text) for text in content]\ntemp = []\nfor i, thread in enumerate(comments):\n temp_thread = []\n temp_thread.extend(titles[i])\n for comment in thread:\n temp_thread.extend(preprocess(comment))\n temp.append(temp_thread)\ncomments = temp\nlist_of_dict = []\nfor index, title in enumerate(titles):\n text = ''\n bag_of_words = set(title)\n text = ' '.join(comments_in_thread[index])\n dictionary = {word: text.count(word) for word in bag_of_words if text.\n count(word) > 0}\n list_of_dict.append(dictionary)\ntitle_keywords = [(list(Dict.keys()) if len(Dict) > 0 else [0]) for Dict in\n list_of_dict]\ntitle_keywords = [word for sublist in title_keywords for word in sublist if\n word != 0]\ntitle_keywords = set(title_keywords)\n\n\ndef count_keywords(comments, keywords):\n return {word: comments.count(word) for word in keywords if comments.\n count(word) > 0}\n\n\nkeyword_dict = [count_keywords(comment, title_keywords) for comment in comments\n ]\nfor index, thread in enumerate(keyword_dict):\n df = pd.DataFrame()\n df['word'] = thread.keys()\n df['count'] = thread.values()\n df = df.sort_values('count', ascending=False)\n df['frequency'] = df['count'] / (1 + len(comments_in_thread[index]))\n df['count'] = df['count'] / len(comments[index]) ** 0.5\n keyword_dict[index] = df.reset_index(drop=True)\nvariables = [data['title'], titles, posts, comments, comments_in_thread,\n list_of_dict, title_keywords, keyword_dict]\nwith open('variables.txt', 'wb') as fp:\n pickle.dump(variables, fp)\n",
"step-5": "import pymongo\nimport pandas as pd\nimport re\nfrom pymongo import MongoClient\nfrom nltk.corpus import stopwords\nfrom nltk import word_tokenize\nfrom gensim import corpora\n\nimport pickle\n\nclient = MongoClient()\ndb = client.redditCrawler\ncollection = db.data_test1\n\ndef remove_posts(data, index_list):\n data = data.drop(index_list)\n return data.reset_index(drop=True)\n\ndata = pd.DataFrame(list(collection.find()))\nmod_posts = [i for i in range(len(data)) if 'moronic Monday' in data['title'][i]]\n\n#remove all the mod posts that include 'moronic Monday'\ndata = remove_posts(data, mod_posts)\ntitles = data['title']\ncontent = data['post']\ncomments = data['comments']\n\n# collect only the comments without vote scores, dates, etc\ncomments_in_thread = []\nfor index, thread in enumerate(comments):\n aggregate = []\n for comment in thread:\n if type(comment['comment_reply']) == str:\n aggregate.append(comment['comment_reply'].lower())\n comments_in_thread.append(aggregate)\n\ncomments = comments_in_thread\n#number of titles and post need to be the same\nassert len(titles) == len(content) \nassert len(comments) == len(content)\n\n\n#preprocess\nstop_words = stopwords.words('english')\nstop_words.extend(['would',\n 'people',\n 'money',\n 'think',\n 'thinks',\n 'thanks',\n 'thing',\n 'things',\n 'ok',\n 'nt',\n 'actually',\n 'like',\n 'get',\n 'even',\n 'could',\n 'also',\n ])\n\n#Function to clean off each dataset item; stop words (what, if, is, where, how, I, she)\n\ndef preprocess(text):\n #no content/nan/len of 0\n #text = [re.sub('[^a-zA-Z0-9]+', ' ', word) for word in text]\n text = text.lower()\n text = text.replace('$', ' ')\n text = text.replace('-', ' ')\n text = text.replace(\"/\", ' ')\n text = text.replace(\".\", ' ')\n text = word_tokenize(text)\n## text = [re.sub('[^a-zA-Z0-9]+', '', word) for word in text]\n text = [word for word in text if word not in stop_words] \n text = [word for word in text if word.isalpha()]\n return text\n\n#pass titles and comments through pre-processor\ntitles = [preprocess(title) for title in titles]\nposts = [preprocess(text) for text in content]\n\n# process comments\n##comments = [[preprocess(comment) for comment in thread] for thread in comments]\ntemp = []\nfor i, thread in enumerate(comments):\n temp_thread = []\n temp_thread.extend(titles[i])\n for comment in thread:\n temp_thread.extend(preprocess(comment))\n temp.append(temp_thread)\n\ncomments = temp\n\n# form a list of dictionaries for each title, compile\n# each word and its corresponding frequencies in the post's comment section\nlist_of_dict = []\nfor index, title in enumerate(titles):\n text = ''\n bag_of_words = set(title)\n text = ' '.join(comments_in_thread[index])\n## text = comments[index]\n dictionary = {word:text.count(word) for word in bag_of_words if text.count(word) > 0}\n list_of_dict.append(dictionary)\n\ntitle_keywords = [list(Dict.keys()) if len(Dict) > 0 else [0] for Dict in list_of_dict]\ntitle_keywords = [word for sublist in title_keywords for word in sublist if word != 0 ]\ntitle_keywords = set(title_keywords)\n##title_keywords = set(title_keywords)\n\n##count the number of keywords in the comment section\ndef count_keywords(comments, keywords):\n## sample = ' '.join(comments).split()\n return {word: comments.count(word) for word in keywords if comments.count(word) > 0}\n\nkeyword_dict = [count_keywords(comment, title_keywords) for comment in comments]\nfor index, thread in enumerate(keyword_dict):\n #normalize each keyword by the number of words present\n df = pd.DataFrame()\n df['word'] = thread.keys()\n df['count'] = thread.values()\n df = df.sort_values('count', ascending = False)\n #dividing by number of words in each thread\n## df['frequency'] = df['count']/(len(comments[index]))\n df['frequency'] = df['count']/(1+len(comments_in_thread[index]))\n df['count'] = df['count']/(len(comments[index]))**0.5\n keyword_dict[index] = df.reset_index(drop=True)\n\n#save varialbes\nvariables = [data['title'], titles, posts, comments, comments_in_thread,\n list_of_dict, title_keywords, keyword_dict]\n\nwith open('variables.txt', 'wb') as fp:\n pickle.dump(variables, fp)\n\n\n\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# Comic Downloader
#! python3
import urllib, bs4, requests
url = 'http://explosm.net/comics/39/'
base_url = 'http://explosm.net'
for i in range(1,4000):
req = requests.get(url)
req.raise_for_status()
soup = bs4.BeautifulSoup(req.text, "lxml")
comic = soup.select('#main-comic')
comicUrl = 'http:' + comic[0].get('src')
urllib.request.urlretrieve(comicUrl, str(i))
print(str(i) + ' done')
next_comic = soup.select('.next-comic')
url = base_url + next_comic[0].get('href')
|
normal
|
{
"blob_id": "66e77b8237850a29127402310bfab3061f7ebca4",
"index": 2346,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, 4000):\n req = requests.get(url)\n req.raise_for_status()\n soup = bs4.BeautifulSoup(req.text, 'lxml')\n comic = soup.select('#main-comic')\n comicUrl = 'http:' + comic[0].get('src')\n urllib.request.urlretrieve(comicUrl, str(i))\n print(str(i) + ' done')\n next_comic = soup.select('.next-comic')\n url = base_url + next_comic[0].get('href')\n",
"step-3": "<mask token>\nurl = 'http://explosm.net/comics/39/'\nbase_url = 'http://explosm.net'\nfor i in range(1, 4000):\n req = requests.get(url)\n req.raise_for_status()\n soup = bs4.BeautifulSoup(req.text, 'lxml')\n comic = soup.select('#main-comic')\n comicUrl = 'http:' + comic[0].get('src')\n urllib.request.urlretrieve(comicUrl, str(i))\n print(str(i) + ' done')\n next_comic = soup.select('.next-comic')\n url = base_url + next_comic[0].get('href')\n",
"step-4": "import urllib, bs4, requests\nurl = 'http://explosm.net/comics/39/'\nbase_url = 'http://explosm.net'\nfor i in range(1, 4000):\n req = requests.get(url)\n req.raise_for_status()\n soup = bs4.BeautifulSoup(req.text, 'lxml')\n comic = soup.select('#main-comic')\n comicUrl = 'http:' + comic[0].get('src')\n urllib.request.urlretrieve(comicUrl, str(i))\n print(str(i) + ' done')\n next_comic = soup.select('.next-comic')\n url = base_url + next_comic[0].get('href')\n",
"step-5": "# Comic Downloader\n\n#! python3\n\nimport urllib, bs4, requests\nurl = 'http://explosm.net/comics/39/'\nbase_url = 'http://explosm.net'\n\nfor i in range(1,4000):\n\n req = requests.get(url)\n req.raise_for_status()\n soup = bs4.BeautifulSoup(req.text, \"lxml\")\n comic = soup.select('#main-comic')\n comicUrl = 'http:' + comic[0].get('src')\n urllib.request.urlretrieve(comicUrl, str(i))\n print(str(i) + ' done')\n next_comic = soup.select('.next-comic')\n url = base_url + next_comic[0].get('href')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
@File : jump.py
@copyright : GG
@Coder: Leslie_s
@Date: 2020/1/26
"""
import requests
from lxml import html
import pandas as pd
import time
import pandas as pd
import datetime
import re
import json
headers = {
'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange',
'accept-encoding':'gzip, deflate, br',
'accept-language':'zh-CN,zh;q=0.8',
'upgrade - insecure - requests': '1',
'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',# 需要填写
}
url = 'https://3g.dxy.cn/newh5/view/pneumonia?scene=2&clicktime=1579582238&enterid=1579582238&from=timeline&isappinstalled=0'
r = requests.get(url, headers=headers,timeout=15,allow_redirects=False)
r.encoding='utf-8'
t1 = html.fromstring(r.text)
doc = r.text
test_com = r'(?P<first>"provinceName":"[\u4e00-\u9fa5]{1,9}"),(?P<second>"provinceShortName":"[\u4e00-\u9fa5]{1,9}"),(?P<three>"confirmedCount":\d{1,9})'
iter_dict = {}
gg_a = r'provinceName":(?P<first>"[\u4e00-\u9fa5]{1,9}"),"provinceShortName":(?P<second>"[\u4e00-\u9fa5]{1,9}"),"confirmedCount":(?P<three>\d{1,9})'
r=re.finditer(gg_a,doc)
train = re.findall(gg_a,doc)
for i in r:
print(i.group(1))
provinceName=i.group(1)
provinceShortName=i.group(2)
confirmedCount=i.group(3)
iter_dict.setdefault( provinceShortName,confirmedCount)
#
# result = re.finditer(test_com,r.text)
# for i in result:
# print(i.group(1))
#
# search = re.finditer(test_com, r.text)
# print('group 0:', search.group(0))
# list_provincename=[]
# list_confircount=[]
# for match in matches_pro:
# print(match.group(1))
# list_provincename.append(match.group(1))
# for match in matches_confirmedCount:
# print(match.group(1))
# list_confircount.append(match.group(1))
#
# dic_result = dict(zip(list_confircount,list_provincename))
#
|
normal
|
{
"blob_id": "5aecd021297fee4407d6b529c24afb3c6398f7ba",
"index": 7205,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in r:\n print(i.group(1))\n provinceName = i.group(1)\n provinceShortName = i.group(2)\n confirmedCount = i.group(3)\n iter_dict.setdefault(provinceShortName, confirmedCount)\n",
"step-3": "<mask token>\nheaders = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange'\n , 'accept-encoding': 'gzip, deflate, br', 'accept-language':\n 'zh-CN,zh;q=0.8', 'upgrade - insecure - requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'\n }\nurl = (\n 'https://3g.dxy.cn/newh5/view/pneumonia?scene=2&clicktime=1579582238&enterid=1579582238&from=timeline&isappinstalled=0'\n )\nr = requests.get(url, headers=headers, timeout=15, allow_redirects=False)\nr.encoding = 'utf-8'\nt1 = html.fromstring(r.text)\ndoc = r.text\ntest_com = (\n '(?P<first>\"provinceName\":\"[\\\\u4e00-\\\\u9fa5]{1,9}\"),(?P<second>\"provinceShortName\":\"[\\\\u4e00-\\\\u9fa5]{1,9}\"),(?P<three>\"confirmedCount\":\\\\d{1,9})'\n )\niter_dict = {}\ngg_a = (\n 'provinceName\":(?P<first>\"[\\\\u4e00-\\\\u9fa5]{1,9}\"),\"provinceShortName\":(?P<second>\"[\\\\u4e00-\\\\u9fa5]{1,9}\"),\"confirmedCount\":(?P<three>\\\\d{1,9})'\n )\nr = re.finditer(gg_a, doc)\ntrain = re.findall(gg_a, doc)\nfor i in r:\n print(i.group(1))\n provinceName = i.group(1)\n provinceShortName = i.group(2)\n confirmedCount = i.group(3)\n iter_dict.setdefault(provinceShortName, confirmedCount)\n",
"step-4": "<mask token>\nimport requests\nfrom lxml import html\nimport pandas as pd\nimport time\nimport pandas as pd\nimport datetime\nimport re\nimport json\nheaders = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange'\n , 'accept-encoding': 'gzip, deflate, br', 'accept-language':\n 'zh-CN,zh;q=0.8', 'upgrade - insecure - requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'\n }\nurl = (\n 'https://3g.dxy.cn/newh5/view/pneumonia?scene=2&clicktime=1579582238&enterid=1579582238&from=timeline&isappinstalled=0'\n )\nr = requests.get(url, headers=headers, timeout=15, allow_redirects=False)\nr.encoding = 'utf-8'\nt1 = html.fromstring(r.text)\ndoc = r.text\ntest_com = (\n '(?P<first>\"provinceName\":\"[\\\\u4e00-\\\\u9fa5]{1,9}\"),(?P<second>\"provinceShortName\":\"[\\\\u4e00-\\\\u9fa5]{1,9}\"),(?P<three>\"confirmedCount\":\\\\d{1,9})'\n )\niter_dict = {}\ngg_a = (\n 'provinceName\":(?P<first>\"[\\\\u4e00-\\\\u9fa5]{1,9}\"),\"provinceShortName\":(?P<second>\"[\\\\u4e00-\\\\u9fa5]{1,9}\"),\"confirmedCount\":(?P<three>\\\\d{1,9})'\n )\nr = re.finditer(gg_a, doc)\ntrain = re.findall(gg_a, doc)\nfor i in r:\n print(i.group(1))\n provinceName = i.group(1)\n provinceShortName = i.group(2)\n confirmedCount = i.group(3)\n iter_dict.setdefault(provinceShortName, confirmedCount)\n",
"step-5": "\"\"\"\n@File : jump.py\n@copyright : GG\n@Coder: Leslie_s\n@Date: 2020/1/26\n\"\"\"\nimport requests\nfrom lxml import html\nimport pandas as pd\nimport time\nimport pandas as pd\nimport datetime\nimport re\nimport json\n\nheaders = {\n\n 'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange',\n\n 'accept-encoding':'gzip, deflate, br',\n\n 'accept-language':'zh-CN,zh;q=0.8',\n 'upgrade - insecure - requests': '1',\n 'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',# 需要填写\n\n }\nurl = 'https://3g.dxy.cn/newh5/view/pneumonia?scene=2&clicktime=1579582238&enterid=1579582238&from=timeline&isappinstalled=0'\nr = requests.get(url, headers=headers,timeout=15,allow_redirects=False)\nr.encoding='utf-8'\nt1 = html.fromstring(r.text)\ndoc = r.text\ntest_com = r'(?P<first>\"provinceName\":\"[\\u4e00-\\u9fa5]{1,9}\"),(?P<second>\"provinceShortName\":\"[\\u4e00-\\u9fa5]{1,9}\"),(?P<three>\"confirmedCount\":\\d{1,9})'\niter_dict = {}\ngg_a = r'provinceName\":(?P<first>\"[\\u4e00-\\u9fa5]{1,9}\"),\"provinceShortName\":(?P<second>\"[\\u4e00-\\u9fa5]{1,9}\"),\"confirmedCount\":(?P<three>\\d{1,9})'\nr=re.finditer(gg_a,doc)\ntrain = re.findall(gg_a,doc)\nfor i in r:\n print(i.group(1))\n provinceName=i.group(1)\n provinceShortName=i.group(2)\n confirmedCount=i.group(3)\n iter_dict.setdefault( provinceShortName,confirmedCount)\n#\n# result = re.finditer(test_com,r.text)\n# for i in result:\n# print(i.group(1))\n#\n# search = re.finditer(test_com, r.text)\n# print('group 0:', search.group(0))\n# list_provincename=[]\n# list_confircount=[]\n# for match in matches_pro:\n# print(match.group(1))\n# list_provincename.append(match.group(1))\n# for match in matches_confirmedCount:\n# print(match.group(1))\n# list_confircount.append(match.group(1))\n#\n# dic_result = dict(zip(list_confircount,list_provincename))\n#\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import re, glob, os
lst = []
def rename(dir, pattern, titlePattern):
for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
#title = title[22:]
#hexa = []
#hexb = []
hexa = title[:2]
hexb = title[2:4]
#title = title[4:]
title = (title[4:] + '_' + str(int(hexa,16)) + '_' + str(int(hexb, 16)))
#print(title)
#lst.append(title)
os.rename(pathAndFilename,
os.path.join(dir, titlePattern % title + ext))
def renamer(files, pattern, replacement):
for pathname in glob.glob(files):
basename= os.path.basename(pathname)
new_filename= re.sub(pattern, replacement, basename)
if new_filename != basename:
os.rename(
pathname,
os.path.join(os.path.dirname(pathname), new_filename))
rename(r'C:\test', r'*.jpeg', r'%s')
#print(lst)
|
normal
|
{
"blob_id": "22aa6042b77c3cfd1f102a0ea22a43223e366d2f",
"index": 1476,
"step-1": "<mask token>\n\n\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n hexa = title[:2]\n hexb = title[2:4]\n title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))\n os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +\n ext))\n\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename = os.path.basename(pathname)\n new_filename = re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(pathname, os.path.join(os.path.dirname(pathname),\n new_filename))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n hexa = title[:2]\n hexb = title[2:4]\n title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))\n os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +\n ext))\n\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename = os.path.basename(pathname)\n new_filename = re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(pathname, os.path.join(os.path.dirname(pathname),\n new_filename))\n\n\nrename('C:\\\\test', '*.jpeg', '%s')\n",
"step-3": "<mask token>\nlst = []\n\n\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n hexa = title[:2]\n hexb = title[2:4]\n title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))\n os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +\n ext))\n\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename = os.path.basename(pathname)\n new_filename = re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(pathname, os.path.join(os.path.dirname(pathname),\n new_filename))\n\n\nrename('C:\\\\test', '*.jpeg', '%s')\n",
"step-4": "import re, glob, os\nlst = []\n\n\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n hexa = title[:2]\n hexb = title[2:4]\n title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))\n os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +\n ext))\n\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename = os.path.basename(pathname)\n new_filename = re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(pathname, os.path.join(os.path.dirname(pathname),\n new_filename))\n\n\nrename('C:\\\\test', '*.jpeg', '%s')\n",
"step-5": "import re, glob, os\nlst = []\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n #title = title[22:]\n #hexa = []\n #hexb = []\n hexa = title[:2]\n hexb = title[2:4]\n #title = title[4:]\n\n title = (title[4:] + '_' + str(int(hexa,16)) + '_' + str(int(hexb, 16)))\n \n #print(title)\n #lst.append(title)\n os.rename(pathAndFilename, \n os.path.join(dir, titlePattern % title + ext))\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename= os.path.basename(pathname)\n new_filename= re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(\n pathname,\n os.path.join(os.path.dirname(pathname), new_filename))\n\n\nrename(r'C:\\test', r'*.jpeg', r'%s')\n#print(lst)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 11 13:25:03 2020
@author: Dr. Michael Sigmond, Canadian Centre for Climate Modelling and Analysis
"""
import matplotlib.colors as col
import matplotlib.cm as cm
import numpy as np
def register_cccmacms(cmap='all'):
"""create my personal colormaps with discrete colors and register them.
default is to register all of them. can also specify which one.
(@@ input arg cmap not implemented yet 2/27/14)
"""
#print 'registering cmaps'
# define individual colors as RGB triples
# from colorwheel.m
# =============================================
# kem_w20 (20) OR blue2red_w20
# blueish at top, white in middle, reddish at bottom
cpool = np.array([ [153,255,255], \
[204,255,229], \
[240,255,240],\
[204,255,153],\
[178,255,102],\
[216,255,76],\
[255,255,51],\
[255,220,51],\
[255,187,51],\
[255,153,51],\
[255,0,0],\
[204,0,0],\
[153,0,0]], \
dtype=float)
acccbar = (cpool/255.)
thecmap = col.ListedColormap(acccbar,'acccbar')
cm.register_cmap(cmap=thecmap)
return
register_cccmacms()
|
normal
|
{
"blob_id": "31a5bf0b275238e651dcb93ce80446a49a4edcf4",
"index": 6561,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef register_cccmacms(cmap='all'):\n \"\"\"create my personal colormaps with discrete colors and register them.\n \n \n default is to register all of them. can also specify which one.\n \n \n (@@ input arg cmap not implemented yet 2/27/14)\n \n \n \"\"\"\n cpool = np.array([[153, 255, 255], [204, 255, 229], [240, 255, 240], [\n 204, 255, 153], [178, 255, 102], [216, 255, 76], [255, 255, 51], [\n 255, 220, 51], [255, 187, 51], [255, 153, 51], [255, 0, 0], [204, 0,\n 0], [153, 0, 0]], dtype=float)\n acccbar = cpool / 255.0\n thecmap = col.ListedColormap(acccbar, 'acccbar')\n cm.register_cmap(cmap=thecmap)\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef register_cccmacms(cmap='all'):\n \"\"\"create my personal colormaps with discrete colors and register them.\n \n \n default is to register all of them. can also specify which one.\n \n \n (@@ input arg cmap not implemented yet 2/27/14)\n \n \n \"\"\"\n cpool = np.array([[153, 255, 255], [204, 255, 229], [240, 255, 240], [\n 204, 255, 153], [178, 255, 102], [216, 255, 76], [255, 255, 51], [\n 255, 220, 51], [255, 187, 51], [255, 153, 51], [255, 0, 0], [204, 0,\n 0], [153, 0, 0]], dtype=float)\n acccbar = cpool / 255.0\n thecmap = col.ListedColormap(acccbar, 'acccbar')\n cm.register_cmap(cmap=thecmap)\n return\n\n\nregister_cccmacms()\n",
"step-4": "<mask token>\nimport matplotlib.colors as col\nimport matplotlib.cm as cm\nimport numpy as np\n\n\ndef register_cccmacms(cmap='all'):\n \"\"\"create my personal colormaps with discrete colors and register them.\n \n \n default is to register all of them. can also specify which one.\n \n \n (@@ input arg cmap not implemented yet 2/27/14)\n \n \n \"\"\"\n cpool = np.array([[153, 255, 255], [204, 255, 229], [240, 255, 240], [\n 204, 255, 153], [178, 255, 102], [216, 255, 76], [255, 255, 51], [\n 255, 220, 51], [255, 187, 51], [255, 153, 51], [255, 0, 0], [204, 0,\n 0], [153, 0, 0]], dtype=float)\n acccbar = cpool / 255.0\n thecmap = col.ListedColormap(acccbar, 'acccbar')\n cm.register_cmap(cmap=thecmap)\n return\n\n\nregister_cccmacms()\n",
"step-5": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 11 13:25:03 2020\n\n@author: Dr. Michael Sigmond, Canadian Centre for Climate Modelling and Analysis\n\"\"\"\n\n\nimport matplotlib.colors as col\n\n\nimport matplotlib.cm as cm\n\nimport numpy as np\n\n\ndef register_cccmacms(cmap='all'):\n \n \n \"\"\"create my personal colormaps with discrete colors and register them.\n \n \n default is to register all of them. can also specify which one.\n \n \n (@@ input arg cmap not implemented yet 2/27/14)\n \n \n \"\"\"\n \n \n #print 'registering cmaps'\n \n \n \n \n \n \n # define individual colors as RGB triples\n \n \n # from colorwheel.m\n \n \n # =============================================\n \n \n # kem_w20 (20) OR blue2red_w20\n \n \n # blueish at top, white in middle, reddish at bottom\n \n \n \n cpool = np.array([ [153,255,255], \\\n \n \n [204,255,229], \\\n \n \n [240,255,240],\\\n \n \n [204,255,153],\\\n \n \n [178,255,102],\\\n \n \n [216,255,76],\\\n \n \n [255,255,51],\\\n \n \n [255,220,51],\\\n \n \n [255,187,51],\\\n \n \n [255,153,51],\\\n \n \n [255,0,0],\\\n \n \n [204,0,0],\\\n \n \n [153,0,0]], \\\n \n \n dtype=float)\n \n \n \n acccbar = (cpool/255.)\n \n \n thecmap = col.ListedColormap(acccbar,'acccbar')\n \n \n cm.register_cmap(cmap=thecmap)\n\n return\n\nregister_cccmacms()\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import urllib
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib import messages
from django.utils.translation import gettext as _
from .forms import CountryForm
from .models import Countries
from django.utils.timezone import datetime
from django.contrib.auth.decorators import login_required
# COUNTRIES LIST
@login_required(login_url='user_login')
def countries_list(request):
countries = Countries.objects.all()
context = {
'countries': countries,
}
return render(request, 'map_app/countries/list.html', context)
# CREATE COUNTRY
@login_required(login_url='user_login')
def countries_add(request):
if request.method == 'POST':
form = CountryForm(request.POST or None)
if form.is_valid():
form.save()
messages.success(
request, 'the country has been added successfuly :) ')
return redirect('countries_add')
else:
form = CountryForm()
context = {
'form': form,
}
return render(request, 'map_app/countries/add.html', context)
# DETAILS OF COUNTRY
@login_required(login_url='user_login')
def country_details(request, id):
country = get_object_or_404(Countries, id=id)
context = {
'country': country,
}
return render(request, 'map_app/countries/details.html', context)
# UPDATE COUNTRY
@login_required(login_url='user_login')
def country_edit(request, id):
country = get_object_or_404(Countries, id=id)
if request.method == 'POST':
form = CountryForm(request.POST or None, instance=country)
if form.is_valid():
inst = form.save(commit=False)
inst.updated_at = datetime.utcnow()
inst.save()
messages.success(
request, 'the country has been updated successfuly :) ')
return redirect('countries_list')
else:
form = CountryForm(instance=country)
context = {
'country': country,
'form': form,
}
return render(request, 'map_app/countries/edit.html', context)
# DELETE COUNTRY
@login_required(login_url='user_login')
def country_delete(request, id):
country = get_object_or_404(Countries, id=id)
if request.method == 'POST':
country.delete()
messages.success(
request, 'the country has been deleted in successfuly')
return redirect('home')
context = {
'country': country,
}
return render(request, 'map_app/countries/delete.html', context)
|
normal
|
{
"blob_id": "8640de519ebf7f95588ac40b55662da85ffc926e",
"index": 5224,
"step-1": "<mask token>\n\n\n@login_required(login_url='user_login')\ndef countries_add(request):\n if request.method == 'POST':\n form = CountryForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request,\n 'the country has been added successfuly :) ')\n return redirect('countries_add')\n else:\n form = CountryForm()\n context = {'form': form}\n return render(request, 'map_app/countries/add.html', context)\n\n\n<mask token>\n\n\n@login_required(login_url='user_login')\ndef country_edit(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n form = CountryForm(request.POST or None, instance=country)\n if form.is_valid():\n inst = form.save(commit=False)\n inst.updated_at = datetime.utcnow()\n inst.save()\n messages.success(request,\n 'the country has been updated successfuly :) ')\n return redirect('countries_list')\n else:\n form = CountryForm(instance=country)\n context = {'country': country, 'form': form}\n return render(request, 'map_app/countries/edit.html', context)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@login_required(login_url='user_login')\ndef countries_add(request):\n if request.method == 'POST':\n form = CountryForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request,\n 'the country has been added successfuly :) ')\n return redirect('countries_add')\n else:\n form = CountryForm()\n context = {'form': form}\n return render(request, 'map_app/countries/add.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_details(request, id):\n country = get_object_or_404(Countries, id=id)\n context = {'country': country}\n return render(request, 'map_app/countries/details.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_edit(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n form = CountryForm(request.POST or None, instance=country)\n if form.is_valid():\n inst = form.save(commit=False)\n inst.updated_at = datetime.utcnow()\n inst.save()\n messages.success(request,\n 'the country has been updated successfuly :) ')\n return redirect('countries_list')\n else:\n form = CountryForm(instance=country)\n context = {'country': country, 'form': form}\n return render(request, 'map_app/countries/edit.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_delete(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n country.delete()\n messages.success(request, 'the country has been deleted in successfuly'\n )\n return redirect('home')\n context = {'country': country}\n return render(request, 'map_app/countries/delete.html', context)\n",
"step-3": "<mask token>\n\n\n@login_required(login_url='user_login')\ndef countries_list(request):\n countries = Countries.objects.all()\n context = {'countries': countries}\n return render(request, 'map_app/countries/list.html', context)\n\n\n@login_required(login_url='user_login')\ndef countries_add(request):\n if request.method == 'POST':\n form = CountryForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request,\n 'the country has been added successfuly :) ')\n return redirect('countries_add')\n else:\n form = CountryForm()\n context = {'form': form}\n return render(request, 'map_app/countries/add.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_details(request, id):\n country = get_object_or_404(Countries, id=id)\n context = {'country': country}\n return render(request, 'map_app/countries/details.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_edit(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n form = CountryForm(request.POST or None, instance=country)\n if form.is_valid():\n inst = form.save(commit=False)\n inst.updated_at = datetime.utcnow()\n inst.save()\n messages.success(request,\n 'the country has been updated successfuly :) ')\n return redirect('countries_list')\n else:\n form = CountryForm(instance=country)\n context = {'country': country, 'form': form}\n return render(request, 'map_app/countries/edit.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_delete(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n country.delete()\n messages.success(request, 'the country has been deleted in successfuly'\n )\n return redirect('home')\n context = {'country': country}\n return render(request, 'map_app/countries/delete.html', context)\n",
"step-4": "import urllib\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom django.utils.translation import gettext as _\nfrom .forms import CountryForm\nfrom .models import Countries\nfrom django.utils.timezone import datetime\nfrom django.contrib.auth.decorators import login_required\n\n\n@login_required(login_url='user_login')\ndef countries_list(request):\n countries = Countries.objects.all()\n context = {'countries': countries}\n return render(request, 'map_app/countries/list.html', context)\n\n\n@login_required(login_url='user_login')\ndef countries_add(request):\n if request.method == 'POST':\n form = CountryForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(request,\n 'the country has been added successfuly :) ')\n return redirect('countries_add')\n else:\n form = CountryForm()\n context = {'form': form}\n return render(request, 'map_app/countries/add.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_details(request, id):\n country = get_object_or_404(Countries, id=id)\n context = {'country': country}\n return render(request, 'map_app/countries/details.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_edit(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n form = CountryForm(request.POST or None, instance=country)\n if form.is_valid():\n inst = form.save(commit=False)\n inst.updated_at = datetime.utcnow()\n inst.save()\n messages.success(request,\n 'the country has been updated successfuly :) ')\n return redirect('countries_list')\n else:\n form = CountryForm(instance=country)\n context = {'country': country, 'form': form}\n return render(request, 'map_app/countries/edit.html', context)\n\n\n@login_required(login_url='user_login')\ndef country_delete(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n country.delete()\n messages.success(request, 'the country has been deleted in successfuly'\n )\n return redirect('home')\n context = {'country': country}\n return render(request, 'map_app/countries/delete.html', context)\n",
"step-5": "import urllib\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom django.utils.translation import gettext as _\nfrom .forms import CountryForm\nfrom .models import Countries\nfrom django.utils.timezone import datetime\nfrom django.contrib.auth.decorators import login_required\n\n\n# COUNTRIES LIST\n@login_required(login_url='user_login')\ndef countries_list(request):\n countries = Countries.objects.all()\n context = {\n 'countries': countries,\n }\n return render(request, 'map_app/countries/list.html', context)\n\n\n# CREATE COUNTRY\n@login_required(login_url='user_login')\ndef countries_add(request):\n if request.method == 'POST':\n form = CountryForm(request.POST or None)\n if form.is_valid():\n form.save()\n messages.success(\n request, 'the country has been added successfuly :) ')\n return redirect('countries_add')\n else:\n form = CountryForm()\n context = {\n 'form': form,\n }\n return render(request, 'map_app/countries/add.html', context)\n\n\n# DETAILS OF COUNTRY\n@login_required(login_url='user_login')\ndef country_details(request, id):\n country = get_object_or_404(Countries, id=id)\n\n context = {\n 'country': country,\n }\n return render(request, 'map_app/countries/details.html', context)\n\n# UPDATE COUNTRY\n\n\n@login_required(login_url='user_login')\ndef country_edit(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n form = CountryForm(request.POST or None, instance=country)\n if form.is_valid():\n inst = form.save(commit=False)\n inst.updated_at = datetime.utcnow()\n inst.save()\n messages.success(\n request, 'the country has been updated successfuly :) ')\n return redirect('countries_list')\n else:\n form = CountryForm(instance=country)\n\n context = {\n 'country': country,\n 'form': form,\n }\n return render(request, 'map_app/countries/edit.html', context)\n\n\n# DELETE COUNTRY\n@login_required(login_url='user_login')\ndef country_delete(request, id):\n country = get_object_or_404(Countries, id=id)\n if request.method == 'POST':\n country.delete()\n\n messages.success(\n request, 'the country has been deleted in successfuly')\n return redirect('home')\n\n context = {\n 'country': country,\n }\n return render(request, 'map_app/countries/delete.html', context)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import nevergrad as ng
import numpy as np
import torch
from pix2latent.utils.image import binarize
class _BaseNevergradOptimizer():
"""
Base template for NeverGrad optimization. Should be used jointly with
BaseOptimizer.
For full list of available optimizers
> https://github.com/facebookresearch/nevergrad
or ...
> print(self.valid_methods)
Args:
method: nevergrad optimization method
NOTE:
nevergrad CMA have been observed to perform wrose than the original
codebase. use with warning. nevergrad has a perk of being optimized
in parallel, hence batch-size can be arbitrarily chosen.
"""
def __init__(self, method):
self.method = method
self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]
# this is not an exhaustive list
self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']
self.is_sequential = self.method in self.sequential_methods
if self.is_sequential:
seq_msg = '{} is a sequential method. batch size is set to 1'
cprint(seq_msg.format(self.method), 'y')
assert self.method in self.valid_methods, \
f'unknown nevergrad method: {self.method}'
self.ng_optimizers = {}
self._sampled = {}
return
@torch.no_grad()
def setup_ng(self, var_manager, budget):
"""
initializes NeverGrad optimizer.
Args
var_manager (VariableManger): instance of the variable manager
budget (int): number of optimization iteration.
"""
for var_name, var_dict in var_manager.variable_info.items():
if var_dict['grad_free'] is False:
continue
if type(var_dict['grad_free']) == tuple:
mu, sigma = var_dict['grad_free']
if mu is None:
mu = np.zeros(var_dict['shape'])
if sigma is None:
sigma = 1.
cma_opt = CMA(mu, sigma=sigma)
else:
mu = np.zeros(var_dict['shape'])
sigma = 1.0
opt_fn = ng.optimizers.registry[self.method]
p = ng.p.Array(init=mu)#.set_mutation(sigma=sigma)
ng_opt = opt_fn(parametrization=p, budget=budget)
self.ng_optimizers[(var_dict['var_type'], var_name)] = ng_opt
assert len(self.ng_optimizers.keys()) == 1, \
'currently only a single input variable can be optimized via '+\
'Nevergrad but got: {}'.format(self.ng_optimizers.keys())
return
@torch.no_grad()
def ng_init(self, var_manager, num_samples):
"""
Args
var_manager (VariableManger): instance of the variable manager
num_samples (int): number of samples for mini-batch optimization
"""
if self.is_sequential:
vars = var_manager.initialize(num_seeds=1)
num_samples = 1
else:
vars = var_manager.initialize(num_samples=num_samples)
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = [ng_opt.ask() for _ in range(num_samples)]
_ng_data = np.concatenate([x.args for x in ng_data])
for i, d in enumerate(_ng_data):
vars[var_type][var_name].data[i].data = \
torch.Tensor(d).data.type_as(
vars[var_type][var_name].data[i].data)
self._sampled[(var_type, var_name)] = ng_data
return vars
@torch.no_grad()
def ng_update(self, variables, loss=None, inverted_loss=False):
"""
Updates NG distribution either with the provided loss or loss that
is recomputed.
Args:
variables (dict): a dictionary instance generated from the
variable manager.
loss (array or list): a 1-dimensional array or list consisting of
losses corresponding to each sample. If the loss is not
provided, uses the variables to recompute the loss.
[Default: None]
inverted_loss (bool): if True, the loss is computed after inverting
the generated images back to the original target. For example
this is used to compute the loss on the original target.
[Default: False]
"""
for (var_type, var_name), ng_opt in self.ng_optimizers.items():
ng_data = self._sampled[(var_type, var_name)]
if loss is None:
out, loss, _ = self.step(variables, optimize=False)
if inverted_loss and hasattr(variables, 'transform'):
target_type = \
self.var_manager.variable_info['target']['var_type']
weight_type = \
self.var_manager.variable_info['weight']['var_type']
target = self.var_manager.variable_info['target']['default']
weight = self.var_manager.variable_info['weight']['default']
target = target.unsqueeze(0).type_as(out)
weight = weight.unsqueeze(0).type_as(out)
t_fn = self.transform_fns['target']['fn']
t_param = torch.stack(variables.transform.t.data)
out = t_fn(out, t_param, invert=True)
loss = self.loss_fn(out, target, binarize(weight))
loss = loss.cpu().detach().numpy()
for d, l in zip(ng_data, loss):
ng_opt.tell(d, l)
return
|
normal
|
{
"blob_id": "4a136a6284add3bcbd7f9546e18e79151cea685f",
"index": 623,
"step-1": "<mask token>\n\n\nclass _BaseNevergradOptimizer:\n <mask token>\n\n def __init__(self, method):\n self.method = method\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\n self.is_sequential = self.method in self.sequential_methods\n if self.is_sequential:\n seq_msg = '{} is a sequential method. batch size is set to 1'\n cprint(seq_msg.format(self.method), 'y')\n assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'\n self.ng_optimizers = {}\n self._sampled = {}\n return\n\n @torch.no_grad()\n def setup_ng(self, var_manager, budget):\n \"\"\"\n initializes NeverGrad optimizer.\n\n Args\n var_manager (VariableManger): instance of the variable manager\n budget (int): number of optimization iteration.\n \"\"\"\n for var_name, var_dict in var_manager.variable_info.items():\n if var_dict['grad_free'] is False:\n continue\n if type(var_dict['grad_free']) == tuple:\n mu, sigma = var_dict['grad_free']\n if mu is None:\n mu = np.zeros(var_dict['shape'])\n if sigma is None:\n sigma = 1.0\n cma_opt = CMA(mu, sigma=sigma)\n else:\n mu = np.zeros(var_dict['shape'])\n sigma = 1.0\n opt_fn = ng.optimizers.registry[self.method]\n p = ng.p.Array(init=mu)\n ng_opt = opt_fn(parametrization=p, budget=budget)\n self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt\n assert len(self.ng_optimizers.keys()\n ) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(\n self.ng_optimizers.keys())\n return\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass _BaseNevergradOptimizer:\n <mask token>\n\n def __init__(self, method):\n self.method = method\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\n self.is_sequential = self.method in self.sequential_methods\n if self.is_sequential:\n seq_msg = '{} is a sequential method. batch size is set to 1'\n cprint(seq_msg.format(self.method), 'y')\n assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'\n self.ng_optimizers = {}\n self._sampled = {}\n return\n\n @torch.no_grad()\n def setup_ng(self, var_manager, budget):\n \"\"\"\n initializes NeverGrad optimizer.\n\n Args\n var_manager (VariableManger): instance of the variable manager\n budget (int): number of optimization iteration.\n \"\"\"\n for var_name, var_dict in var_manager.variable_info.items():\n if var_dict['grad_free'] is False:\n continue\n if type(var_dict['grad_free']) == tuple:\n mu, sigma = var_dict['grad_free']\n if mu is None:\n mu = np.zeros(var_dict['shape'])\n if sigma is None:\n sigma = 1.0\n cma_opt = CMA(mu, sigma=sigma)\n else:\n mu = np.zeros(var_dict['shape'])\n sigma = 1.0\n opt_fn = ng.optimizers.registry[self.method]\n p = ng.p.Array(init=mu)\n ng_opt = opt_fn(parametrization=p, budget=budget)\n self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt\n assert len(self.ng_optimizers.keys()\n ) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(\n self.ng_optimizers.keys())\n return\n <mask token>\n\n @torch.no_grad()\n def ng_update(self, variables, loss=None, inverted_loss=False):\n \"\"\"\n Updates NG distribution either with the provided loss or loss that\n is recomputed.\n\n Args:\n variables (dict): a dictionary instance generated from the\n variable manager.\n loss (array or list): a 1-dimensional array or list consisting of\n losses corresponding to each sample. If the loss is not\n provided, uses the variables to recompute the loss.\n [Default: None]\n inverted_loss (bool): if True, the loss is computed after inverting\n the generated images back to the original target. For example\n this is used to compute the loss on the original target.\n [Default: False]\n \"\"\"\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = self._sampled[var_type, var_name]\n if loss is None:\n out, loss, _ = self.step(variables, optimize=False)\n if inverted_loss and hasattr(variables, 'transform'):\n target_type = self.var_manager.variable_info['target'][\n 'var_type']\n weight_type = self.var_manager.variable_info['weight'][\n 'var_type']\n target = self.var_manager.variable_info['target']['default']\n weight = self.var_manager.variable_info['weight']['default']\n target = target.unsqueeze(0).type_as(out)\n weight = weight.unsqueeze(0).type_as(out)\n t_fn = self.transform_fns['target']['fn']\n t_param = torch.stack(variables.transform.t.data)\n out = t_fn(out, t_param, invert=True)\n loss = self.loss_fn(out, target, binarize(weight))\n loss = loss.cpu().detach().numpy()\n for d, l in zip(ng_data, loss):\n ng_opt.tell(d, l)\n return\n",
"step-3": "<mask token>\n\n\nclass _BaseNevergradOptimizer:\n <mask token>\n\n def __init__(self, method):\n self.method = method\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\n self.is_sequential = self.method in self.sequential_methods\n if self.is_sequential:\n seq_msg = '{} is a sequential method. batch size is set to 1'\n cprint(seq_msg.format(self.method), 'y')\n assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'\n self.ng_optimizers = {}\n self._sampled = {}\n return\n\n @torch.no_grad()\n def setup_ng(self, var_manager, budget):\n \"\"\"\n initializes NeverGrad optimizer.\n\n Args\n var_manager (VariableManger): instance of the variable manager\n budget (int): number of optimization iteration.\n \"\"\"\n for var_name, var_dict in var_manager.variable_info.items():\n if var_dict['grad_free'] is False:\n continue\n if type(var_dict['grad_free']) == tuple:\n mu, sigma = var_dict['grad_free']\n if mu is None:\n mu = np.zeros(var_dict['shape'])\n if sigma is None:\n sigma = 1.0\n cma_opt = CMA(mu, sigma=sigma)\n else:\n mu = np.zeros(var_dict['shape'])\n sigma = 1.0\n opt_fn = ng.optimizers.registry[self.method]\n p = ng.p.Array(init=mu)\n ng_opt = opt_fn(parametrization=p, budget=budget)\n self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt\n assert len(self.ng_optimizers.keys()\n ) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(\n self.ng_optimizers.keys())\n return\n\n @torch.no_grad()\n def ng_init(self, var_manager, num_samples):\n \"\"\"\n Args\n var_manager (VariableManger): instance of the variable manager\n num_samples (int): number of samples for mini-batch optimization\n \"\"\"\n if self.is_sequential:\n vars = var_manager.initialize(num_seeds=1)\n num_samples = 1\n else:\n vars = var_manager.initialize(num_samples=num_samples)\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = [ng_opt.ask() for _ in range(num_samples)]\n _ng_data = np.concatenate([x.args for x in ng_data])\n for i, d in enumerate(_ng_data):\n vars[var_type][var_name].data[i].data = torch.Tensor(d\n ).data.type_as(vars[var_type][var_name].data[i].data)\n self._sampled[var_type, var_name] = ng_data\n return vars\n\n @torch.no_grad()\n def ng_update(self, variables, loss=None, inverted_loss=False):\n \"\"\"\n Updates NG distribution either with the provided loss or loss that\n is recomputed.\n\n Args:\n variables (dict): a dictionary instance generated from the\n variable manager.\n loss (array or list): a 1-dimensional array or list consisting of\n losses corresponding to each sample. If the loss is not\n provided, uses the variables to recompute the loss.\n [Default: None]\n inverted_loss (bool): if True, the loss is computed after inverting\n the generated images back to the original target. For example\n this is used to compute the loss on the original target.\n [Default: False]\n \"\"\"\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = self._sampled[var_type, var_name]\n if loss is None:\n out, loss, _ = self.step(variables, optimize=False)\n if inverted_loss and hasattr(variables, 'transform'):\n target_type = self.var_manager.variable_info['target'][\n 'var_type']\n weight_type = self.var_manager.variable_info['weight'][\n 'var_type']\n target = self.var_manager.variable_info['target']['default']\n weight = self.var_manager.variable_info['weight']['default']\n target = target.unsqueeze(0).type_as(out)\n weight = weight.unsqueeze(0).type_as(out)\n t_fn = self.transform_fns['target']['fn']\n t_param = torch.stack(variables.transform.t.data)\n out = t_fn(out, t_param, invert=True)\n loss = self.loss_fn(out, target, binarize(weight))\n loss = loss.cpu().detach().numpy()\n for d, l in zip(ng_data, loss):\n ng_opt.tell(d, l)\n return\n",
"step-4": "<mask token>\n\n\nclass _BaseNevergradOptimizer:\n \"\"\"\n Base template for NeverGrad optimization. Should be used jointly with\n BaseOptimizer.\n\n For full list of available optimizers\n > https://github.com/facebookresearch/nevergrad\n\n or ...\n > print(self.valid_methods)\n\n Args:\n method: nevergrad optimization method\n\n NOTE:\n nevergrad CMA have been observed to perform wrose than the original\n codebase. use with warning. nevergrad has a perk of being optimized\n in parallel, hence batch-size can be arbitrarily chosen.\n \"\"\"\n\n def __init__(self, method):\n self.method = method\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\n self.is_sequential = self.method in self.sequential_methods\n if self.is_sequential:\n seq_msg = '{} is a sequential method. batch size is set to 1'\n cprint(seq_msg.format(self.method), 'y')\n assert self.method in self.valid_methods, f'unknown nevergrad method: {self.method}'\n self.ng_optimizers = {}\n self._sampled = {}\n return\n\n @torch.no_grad()\n def setup_ng(self, var_manager, budget):\n \"\"\"\n initializes NeverGrad optimizer.\n\n Args\n var_manager (VariableManger): instance of the variable manager\n budget (int): number of optimization iteration.\n \"\"\"\n for var_name, var_dict in var_manager.variable_info.items():\n if var_dict['grad_free'] is False:\n continue\n if type(var_dict['grad_free']) == tuple:\n mu, sigma = var_dict['grad_free']\n if mu is None:\n mu = np.zeros(var_dict['shape'])\n if sigma is None:\n sigma = 1.0\n cma_opt = CMA(mu, sigma=sigma)\n else:\n mu = np.zeros(var_dict['shape'])\n sigma = 1.0\n opt_fn = ng.optimizers.registry[self.method]\n p = ng.p.Array(init=mu)\n ng_opt = opt_fn(parametrization=p, budget=budget)\n self.ng_optimizers[var_dict['var_type'], var_name] = ng_opt\n assert len(self.ng_optimizers.keys()\n ) == 1, 'currently only a single input variable can be optimized via ' + 'Nevergrad but got: {}'.format(\n self.ng_optimizers.keys())\n return\n\n @torch.no_grad()\n def ng_init(self, var_manager, num_samples):\n \"\"\"\n Args\n var_manager (VariableManger): instance of the variable manager\n num_samples (int): number of samples for mini-batch optimization\n \"\"\"\n if self.is_sequential:\n vars = var_manager.initialize(num_seeds=1)\n num_samples = 1\n else:\n vars = var_manager.initialize(num_samples=num_samples)\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = [ng_opt.ask() for _ in range(num_samples)]\n _ng_data = np.concatenate([x.args for x in ng_data])\n for i, d in enumerate(_ng_data):\n vars[var_type][var_name].data[i].data = torch.Tensor(d\n ).data.type_as(vars[var_type][var_name].data[i].data)\n self._sampled[var_type, var_name] = ng_data\n return vars\n\n @torch.no_grad()\n def ng_update(self, variables, loss=None, inverted_loss=False):\n \"\"\"\n Updates NG distribution either with the provided loss or loss that\n is recomputed.\n\n Args:\n variables (dict): a dictionary instance generated from the\n variable manager.\n loss (array or list): a 1-dimensional array or list consisting of\n losses corresponding to each sample. If the loss is not\n provided, uses the variables to recompute the loss.\n [Default: None]\n inverted_loss (bool): if True, the loss is computed after inverting\n the generated images back to the original target. For example\n this is used to compute the loss on the original target.\n [Default: False]\n \"\"\"\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\n ng_data = self._sampled[var_type, var_name]\n if loss is None:\n out, loss, _ = self.step(variables, optimize=False)\n if inverted_loss and hasattr(variables, 'transform'):\n target_type = self.var_manager.variable_info['target'][\n 'var_type']\n weight_type = self.var_manager.variable_info['weight'][\n 'var_type']\n target = self.var_manager.variable_info['target']['default']\n weight = self.var_manager.variable_info['weight']['default']\n target = target.unsqueeze(0).type_as(out)\n weight = weight.unsqueeze(0).type_as(out)\n t_fn = self.transform_fns['target']['fn']\n t_param = torch.stack(variables.transform.t.data)\n out = t_fn(out, t_param, invert=True)\n loss = self.loss_fn(out, target, binarize(weight))\n loss = loss.cpu().detach().numpy()\n for d, l in zip(ng_data, loss):\n ng_opt.tell(d, l)\n return\n",
"step-5": "import nevergrad as ng\r\n\r\nimport numpy as np\r\nimport torch\r\n\r\nfrom pix2latent.utils.image import binarize\r\n\r\n\r\n\r\nclass _BaseNevergradOptimizer():\r\n \"\"\"\r\n Base template for NeverGrad optimization. Should be used jointly with\r\n BaseOptimizer.\r\n\r\n For full list of available optimizers\r\n > https://github.com/facebookresearch/nevergrad\r\n\r\n or ...\r\n > print(self.valid_methods)\r\n\r\n Args:\r\n method: nevergrad optimization method\r\n\r\n NOTE:\r\n nevergrad CMA have been observed to perform wrose than the original\r\n codebase. use with warning. nevergrad has a perk of being optimized\r\n in parallel, hence batch-size can be arbitrarily chosen.\r\n \"\"\"\r\n\r\n def __init__(self, method):\r\n\r\n self.method = method\r\n self.valid_methods = [x[0] for x in ng.optimizers.registry.items()]\r\n\r\n # this is not an exhaustive list\r\n self.sequential_methods = ['SQPCMA', 'chainCMAPowell', 'Powell']\r\n self.is_sequential = self.method in self.sequential_methods\r\n\r\n if self.is_sequential:\r\n seq_msg = '{} is a sequential method. batch size is set to 1'\r\n cprint(seq_msg.format(self.method), 'y')\r\n\r\n assert self.method in self.valid_methods, \\\r\n f'unknown nevergrad method: {self.method}'\r\n\r\n self.ng_optimizers = {}\r\n self._sampled = {}\r\n return\r\n\r\n\r\n @torch.no_grad()\r\n def setup_ng(self, var_manager, budget):\r\n \"\"\"\r\n initializes NeverGrad optimizer.\r\n\r\n Args\r\n var_manager (VariableManger): instance of the variable manager\r\n budget (int): number of optimization iteration.\r\n \"\"\"\r\n\r\n for var_name, var_dict in var_manager.variable_info.items():\r\n\r\n if var_dict['grad_free'] is False:\r\n continue\r\n\r\n if type(var_dict['grad_free']) == tuple:\r\n mu, sigma = var_dict['grad_free']\r\n\r\n if mu is None:\r\n mu = np.zeros(var_dict['shape'])\r\n\r\n if sigma is None:\r\n sigma = 1.\r\n\r\n cma_opt = CMA(mu, sigma=sigma)\r\n\r\n else:\r\n mu = np.zeros(var_dict['shape'])\r\n sigma = 1.0\r\n\r\n opt_fn = ng.optimizers.registry[self.method]\r\n p = ng.p.Array(init=mu)#.set_mutation(sigma=sigma)\r\n ng_opt = opt_fn(parametrization=p, budget=budget)\r\n\r\n self.ng_optimizers[(var_dict['var_type'], var_name)] = ng_opt\r\n\r\n assert len(self.ng_optimizers.keys()) == 1, \\\r\n 'currently only a single input variable can be optimized via '+\\\r\n 'Nevergrad but got: {}'.format(self.ng_optimizers.keys())\r\n return\r\n\r\n\r\n @torch.no_grad()\r\n def ng_init(self, var_manager, num_samples):\r\n \"\"\"\r\n Args\r\n var_manager (VariableManger): instance of the variable manager\r\n num_samples (int): number of samples for mini-batch optimization\r\n \"\"\"\r\n if self.is_sequential:\r\n vars = var_manager.initialize(num_seeds=1)\r\n num_samples = 1\r\n else:\r\n vars = var_manager.initialize(num_samples=num_samples)\r\n\r\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\r\n ng_data = [ng_opt.ask() for _ in range(num_samples)]\r\n\r\n _ng_data = np.concatenate([x.args for x in ng_data])\r\n\r\n for i, d in enumerate(_ng_data):\r\n vars[var_type][var_name].data[i].data = \\\r\n torch.Tensor(d).data.type_as(\r\n vars[var_type][var_name].data[i].data)\r\n\r\n self._sampled[(var_type, var_name)] = ng_data\r\n\r\n return vars\r\n\r\n\r\n @torch.no_grad()\r\n def ng_update(self, variables, loss=None, inverted_loss=False):\r\n\r\n \"\"\"\r\n Updates NG distribution either with the provided loss or loss that\r\n is recomputed.\r\n\r\n Args:\r\n variables (dict): a dictionary instance generated from the\r\n variable manager.\r\n loss (array or list): a 1-dimensional array or list consisting of\r\n losses corresponding to each sample. If the loss is not\r\n provided, uses the variables to recompute the loss.\r\n [Default: None]\r\n inverted_loss (bool): if True, the loss is computed after inverting\r\n the generated images back to the original target. For example\r\n this is used to compute the loss on the original target.\r\n [Default: False]\r\n \"\"\"\r\n\r\n for (var_type, var_name), ng_opt in self.ng_optimizers.items():\r\n\r\n ng_data = self._sampled[(var_type, var_name)]\r\n\r\n if loss is None:\r\n out, loss, _ = self.step(variables, optimize=False)\r\n\r\n if inverted_loss and hasattr(variables, 'transform'):\r\n\r\n target_type = \\\r\n self.var_manager.variable_info['target']['var_type']\r\n weight_type = \\\r\n self.var_manager.variable_info['weight']['var_type']\r\n\r\n target = self.var_manager.variable_info['target']['default']\r\n weight = self.var_manager.variable_info['weight']['default']\r\n\r\n target = target.unsqueeze(0).type_as(out)\r\n weight = weight.unsqueeze(0).type_as(out)\r\n\r\n t_fn = self.transform_fns['target']['fn']\r\n t_param = torch.stack(variables.transform.t.data)\r\n out = t_fn(out, t_param, invert=True)\r\n\r\n loss = self.loss_fn(out, target, binarize(weight))\r\n loss = loss.cpu().detach().numpy()\r\n\r\n for d, l in zip(ng_data, loss):\r\n ng_opt.tell(d, l)\r\n\r\n return\r\n",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
# Patrick Vanegas - Final project
from tkinter import *
import frequency
import turtle
import math
import random
# intitalize a blank window
root = Tk()
# initialize turtle window
window = turtle.Screen()
# Create widgets to be viewed on the Tkinter window
label_1 = Label(root, text = "Enter a number less than 54 to get the Nth most frequent letters in Words.txt: ")
entry = Entry(root)
def drawPieChart(central_angles, angle_of_rest, probability_of_rest):
# reset turtle to redraw the piechart if the user enters a new value for N.
turtle.reset()
# set color mode to accept rgb values
window.colormode(255)
turtle.fillcolor('gray')
turtle.speed(10)
# draw base circle and fill it with color
turtle.begin_fill()
turtle.circle(120)
turtle.end_fill()
turtle.up()
angle_counter = 0
prev_angle = 0
# draw arc sectors for each probability in the circle
for index, (letter, angle, probability) in enumerate(central_angles):
if index == 0:
# turn radians to degrees
angle_counter += angle * (360 / math.pi)
turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255), random.randrange(0, 255)))
turtle.begin_fill()
turtle.goto(x = 0, y = 120)
turtle.setheading(angle_counter)
angle_counter += angle * (360 / math.pi)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle * (360 / math.pi))
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('{}, {}'.format(letter, round(probability, 3)), font = ("Arial", 10, "normal"))
turtle.backward(50)
turtle.setheading(angle * (360 / math.pi) + prev_angle)
turtle.goto(x = 0, y = 120)
turtle.end_fill()
prev_angle += angle_counter
# draw the arc for the remaining probabilites.
if index == len(central_angles) - 1:
turtle.fillcolor('gray')
turtle.begin_fill()
turtle.goto(x = 0, y = 120)
turtle.setheading(angle_counter)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle_of_rest * (180 / math.pi) )
angle_counter += angle_of_rest * (180 / math.pi)
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('All other letters, {}'.format(round(probability_of_rest, 3)), font = ("Arial", 10, "normal"))
turtle.backward(50)
turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)
turtle.goto(x = 0, y = 120)
turtle.end_fill()
def calculateFrequencies(arg = None):
# get the text value from the entry field
# if the value is not a valid integer, simply return and do nothing.
try:
result = int(entry.get())
# return if the input is greater than 54
if (result >= 54):
return
# delete the text in the entry field
entry.delete(0, END)
# calculate the most frequent characters
most_frequent_characters = frequency.getNthMostFrequentCharacters(result)
# calculate the probability of all other letters not included in the top N.
probability_of_other_characters = frequency.sumOfAllOtherProbabilites(most_frequent_characters)
# calculate the central angle of the rest of the letters.
angle_of_rest = probability_of_other_characters * 2 * math.pi
# calculate central angles of the most frequenct character's probabilities
central_angles = frequency.getCentralAngles(most_frequent_characters)
# draw pie chart
drawPieChart(central_angles, angle_of_rest, probability_of_other_characters)
except ValueError:
return
# When the user presses enter on the entry field, calculate frequencies
entry.bind('<Return>', calculateFrequencies)
# Position widgets on a grid layout
label_1.grid(row=0)
entry.grid(row=0, column=1)
# keep both the turtle and tkinter windows open until user presses the close button on either
root.mainloop()
window.exitonclick()
|
normal
|
{
"blob_id": "0ac99816248e3306ca6340f7bee8a518877bc3e9",
"index": 1186,
"step-1": "<mask token>\n\n\ndef drawPieChart(central_angles, angle_of_rest, probability_of_rest):\n turtle.reset()\n window.colormode(255)\n turtle.fillcolor('gray')\n turtle.speed(10)\n turtle.begin_fill()\n turtle.circle(120)\n turtle.end_fill()\n turtle.up()\n angle_counter = 0\n prev_angle = 0\n for index, (letter, angle, probability) in enumerate(central_angles):\n if index == 0:\n angle_counter += angle * (360 / math.pi)\n turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255\n ), random.randrange(0, 255)))\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n angle_counter += angle * (360 / math.pi)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle * (360 / math.pi))\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(\n 'Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle * (360 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n prev_angle += angle_counter\n if index == len(central_angles) - 1:\n turtle.fillcolor('gray')\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle_of_rest * (180 / math.pi))\n angle_counter += angle_of_rest * (180 / math.pi)\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('All other letters, {}'.format(round(\n probability_of_rest, 3)), font=('Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n\n\ndef calculateFrequencies(arg=None):\n try:\n result = int(entry.get())\n if result >= 54:\n return\n entry.delete(0, END)\n most_frequent_characters = frequency.getNthMostFrequentCharacters(\n result)\n probability_of_other_characters = frequency.sumOfAllOtherProbabilites(\n most_frequent_characters)\n angle_of_rest = probability_of_other_characters * 2 * math.pi\n central_angles = frequency.getCentralAngles(most_frequent_characters)\n drawPieChart(central_angles, angle_of_rest,\n probability_of_other_characters)\n except ValueError:\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef drawPieChart(central_angles, angle_of_rest, probability_of_rest):\n turtle.reset()\n window.colormode(255)\n turtle.fillcolor('gray')\n turtle.speed(10)\n turtle.begin_fill()\n turtle.circle(120)\n turtle.end_fill()\n turtle.up()\n angle_counter = 0\n prev_angle = 0\n for index, (letter, angle, probability) in enumerate(central_angles):\n if index == 0:\n angle_counter += angle * (360 / math.pi)\n turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255\n ), random.randrange(0, 255)))\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n angle_counter += angle * (360 / math.pi)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle * (360 / math.pi))\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(\n 'Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle * (360 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n prev_angle += angle_counter\n if index == len(central_angles) - 1:\n turtle.fillcolor('gray')\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle_of_rest * (180 / math.pi))\n angle_counter += angle_of_rest * (180 / math.pi)\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('All other letters, {}'.format(round(\n probability_of_rest, 3)), font=('Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n\n\ndef calculateFrequencies(arg=None):\n try:\n result = int(entry.get())\n if result >= 54:\n return\n entry.delete(0, END)\n most_frequent_characters = frequency.getNthMostFrequentCharacters(\n result)\n probability_of_other_characters = frequency.sumOfAllOtherProbabilites(\n most_frequent_characters)\n angle_of_rest = probability_of_other_characters * 2 * math.pi\n central_angles = frequency.getCentralAngles(most_frequent_characters)\n drawPieChart(central_angles, angle_of_rest,\n probability_of_other_characters)\n except ValueError:\n return\n\n\nentry.bind('<Return>', calculateFrequencies)\nlabel_1.grid(row=0)\nentry.grid(row=0, column=1)\nroot.mainloop()\nwindow.exitonclick()\n",
"step-3": "<mask token>\nroot = Tk()\nwindow = turtle.Screen()\nlabel_1 = Label(root, text=\n 'Enter a number less than 54 to get the Nth most frequent letters in Words.txt: '\n )\nentry = Entry(root)\n\n\ndef drawPieChart(central_angles, angle_of_rest, probability_of_rest):\n turtle.reset()\n window.colormode(255)\n turtle.fillcolor('gray')\n turtle.speed(10)\n turtle.begin_fill()\n turtle.circle(120)\n turtle.end_fill()\n turtle.up()\n angle_counter = 0\n prev_angle = 0\n for index, (letter, angle, probability) in enumerate(central_angles):\n if index == 0:\n angle_counter += angle * (360 / math.pi)\n turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255\n ), random.randrange(0, 255)))\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n angle_counter += angle * (360 / math.pi)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle * (360 / math.pi))\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(\n 'Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle * (360 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n prev_angle += angle_counter\n if index == len(central_angles) - 1:\n turtle.fillcolor('gray')\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle_of_rest * (180 / math.pi))\n angle_counter += angle_of_rest * (180 / math.pi)\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('All other letters, {}'.format(round(\n probability_of_rest, 3)), font=('Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n\n\ndef calculateFrequencies(arg=None):\n try:\n result = int(entry.get())\n if result >= 54:\n return\n entry.delete(0, END)\n most_frequent_characters = frequency.getNthMostFrequentCharacters(\n result)\n probability_of_other_characters = frequency.sumOfAllOtherProbabilites(\n most_frequent_characters)\n angle_of_rest = probability_of_other_characters * 2 * math.pi\n central_angles = frequency.getCentralAngles(most_frequent_characters)\n drawPieChart(central_angles, angle_of_rest,\n probability_of_other_characters)\n except ValueError:\n return\n\n\nentry.bind('<Return>', calculateFrequencies)\nlabel_1.grid(row=0)\nentry.grid(row=0, column=1)\nroot.mainloop()\nwindow.exitonclick()\n",
"step-4": "from tkinter import *\nimport frequency\nimport turtle\nimport math\nimport random\nroot = Tk()\nwindow = turtle.Screen()\nlabel_1 = Label(root, text=\n 'Enter a number less than 54 to get the Nth most frequent letters in Words.txt: '\n )\nentry = Entry(root)\n\n\ndef drawPieChart(central_angles, angle_of_rest, probability_of_rest):\n turtle.reset()\n window.colormode(255)\n turtle.fillcolor('gray')\n turtle.speed(10)\n turtle.begin_fill()\n turtle.circle(120)\n turtle.end_fill()\n turtle.up()\n angle_counter = 0\n prev_angle = 0\n for index, (letter, angle, probability) in enumerate(central_angles):\n if index == 0:\n angle_counter += angle * (360 / math.pi)\n turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255\n ), random.randrange(0, 255)))\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n angle_counter += angle * (360 / math.pi)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle * (360 / math.pi))\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(\n 'Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle * (360 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n prev_angle += angle_counter\n if index == len(central_angles) - 1:\n turtle.fillcolor('gray')\n turtle.begin_fill()\n turtle.goto(x=0, y=120)\n turtle.setheading(angle_counter)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle_of_rest * (180 / math.pi))\n angle_counter += angle_of_rest * (180 / math.pi)\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('All other letters, {}'.format(round(\n probability_of_rest, 3)), font=('Arial', 10, 'normal'))\n turtle.backward(50)\n turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)\n turtle.goto(x=0, y=120)\n turtle.end_fill()\n\n\ndef calculateFrequencies(arg=None):\n try:\n result = int(entry.get())\n if result >= 54:\n return\n entry.delete(0, END)\n most_frequent_characters = frequency.getNthMostFrequentCharacters(\n result)\n probability_of_other_characters = frequency.sumOfAllOtherProbabilites(\n most_frequent_characters)\n angle_of_rest = probability_of_other_characters * 2 * math.pi\n central_angles = frequency.getCentralAngles(most_frequent_characters)\n drawPieChart(central_angles, angle_of_rest,\n probability_of_other_characters)\n except ValueError:\n return\n\n\nentry.bind('<Return>', calculateFrequencies)\nlabel_1.grid(row=0)\nentry.grid(row=0, column=1)\nroot.mainloop()\nwindow.exitonclick()\n",
"step-5": "# Patrick Vanegas - Final project\n\nfrom tkinter import *\nimport frequency\nimport turtle\nimport math\nimport random\n\n# intitalize a blank window\nroot = Tk() \n\n# initialize turtle window\nwindow = turtle.Screen() \n\n# Create widgets to be viewed on the Tkinter window\nlabel_1 = Label(root, text = \"Enter a number less than 54 to get the Nth most frequent letters in Words.txt: \")\nentry = Entry(root)\n\ndef drawPieChart(central_angles, angle_of_rest, probability_of_rest):\n # reset turtle to redraw the piechart if the user enters a new value for N.\n turtle.reset()\n\n # set color mode to accept rgb values\n window.colormode(255)\n turtle.fillcolor('gray')\n turtle.speed(10)\n\n # draw base circle and fill it with color\n turtle.begin_fill()\n turtle.circle(120)\n turtle.end_fill()\n turtle.up()\n\n angle_counter = 0\n prev_angle = 0\n\n # draw arc sectors for each probability in the circle\n for index, (letter, angle, probability) in enumerate(central_angles):\n if index == 0:\n # turn radians to degrees\n angle_counter += angle * (360 / math.pi) \n turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255), random.randrange(0, 255)))\n turtle.begin_fill()\n turtle.goto(x = 0, y = 120)\n turtle.setheading(angle_counter)\n angle_counter += angle * (360 / math.pi)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle * (360 / math.pi))\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('{}, {}'.format(letter, round(probability, 3)), font = (\"Arial\", 10, \"normal\"))\n turtle.backward(50)\n turtle.setheading(angle * (360 / math.pi) + prev_angle)\n turtle.goto(x = 0, y = 120)\n turtle.end_fill()\n prev_angle += angle_counter\n\n # draw the arc for the remaining probabilites.\n if index == len(central_angles) - 1:\n turtle.fillcolor('gray')\n turtle.begin_fill()\n turtle.goto(x = 0, y = 120)\n turtle.setheading(angle_counter)\n turtle.forward(120)\n turtle.right(270)\n turtle.circle(120, angle_of_rest * (180 / math.pi) )\n angle_counter += angle_of_rest * (180 / math.pi)\n turtle.setheading(angle_counter)\n turtle.forward(50)\n turtle.write('All other letters, {}'.format(round(probability_of_rest, 3)), font = (\"Arial\", 10, \"normal\"))\n turtle.backward(50)\n turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)\n turtle.goto(x = 0, y = 120)\n turtle.end_fill()\n\ndef calculateFrequencies(arg = None):\n # get the text value from the entry field\n # if the value is not a valid integer, simply return and do nothing.\n try:\n result = int(entry.get())\n\n # return if the input is greater than 54\n if (result >= 54):\n return\n \n # delete the text in the entry field\n entry.delete(0, END)\n\n # calculate the most frequent characters\n most_frequent_characters = frequency.getNthMostFrequentCharacters(result)\n\n # calculate the probability of all other letters not included in the top N.\n probability_of_other_characters = frequency.sumOfAllOtherProbabilites(most_frequent_characters)\n\n # calculate the central angle of the rest of the letters.\n angle_of_rest = probability_of_other_characters * 2 * math.pi\n\n # calculate central angles of the most frequenct character's probabilities\n central_angles = frequency.getCentralAngles(most_frequent_characters)\n\n # draw pie chart\n drawPieChart(central_angles, angle_of_rest, probability_of_other_characters)\n except ValueError:\n return \n \n# When the user presses enter on the entry field, calculate frequencies\nentry.bind('<Return>', calculateFrequencies)\n\n# Position widgets on a grid layout\nlabel_1.grid(row=0)\nentry.grid(row=0, column=1)\n\n# keep both the turtle and tkinter windows open until user presses the close button on either\nroot.mainloop() \nwindow.exitonclick()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os, glob
import numpy as np
from ..algorithms.utils import get_file_manager
from ..algorithms.clustered_writes import *
from ..exp_utils import create_empty_dir
def test_get_entity_sizes():
# in C order
bytes_per_voxel = 1
R = (10,9,10)
cs = (5,3,2)
partition = (2,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
assert bs == 5*3*2
assert brs == 5*3*2*5
assert bss == 5*3*2*5*3
def test_get_strategy():
# in C order
bytes_per_voxel = 1
R = (20,9,10)
cs = (5,3,2)
partition = (4,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
test_case = {
5*2*3: 0, # 1 block
5*2*3*4: 0, # 4 blocks
5*2*3*5: 1, # 1 row
5*2*3*5*2: 1, # 2 rows
5*2*3*5*3: 2, # 1 slice
5*2*3*5*3*3: 2, # 3 slices
5*2*3*5*3*4: 2, # whole img
5*2*3*5*3*7: 2, # whole img (more mem than necessary)
}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
assert strategy == expected
def test_compute_buffers():
# in C order
bytes_per_voxel = 1
R = (20,9,10)
cs = (5,3,2)
partition = (4,3,5)
bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)
origarr_size = R[0]*R[1]*R[2]*bytes_per_voxel
test_case = {
5*2*3: 4*3*5, # 1 block
5*2*3*4: 4*3*2, # 4 blocks
5*2*3*5: 4*3, # 1 row
5*2*3*5*2: 4*2, # 2 rows
5*2*3*5*3: 4, # 1 slice
5*2*3*5*3*3: 2, # 3 slices
5*2*3*5*3*4: 1, # whole img
5*2*3*5*3*7: 1, # whole img (more mem than necessary)
}
for buffer_mem_size, expected in test_case.items():
strategy = get_strategy(buffer_mem_size, bs, brs, bss)
buffers = compute_buffers(buffer_mem_size, strategy, origarr_size, cs, bs, brs, bss, partition, R, bytes_per_voxel)
# test number of buffers
nb_buffers = len(buffers.values())
assert nb_buffers == expected
def test_clustered_writes():
bpv = 1
R = (20,9,10)
cs = (5,3,2)
ff = 'HDF5'
outdir_path = './outdir'
test_case = [
5*3*2, # 1 block
5*3*2*4, # 4 blocks
5*3*2*5, # 1 row
5*3*2*5*2, # 2 rows
5*3*2*5*3, # 1 slice
5*3*2*5*3*3, # 3 slices
5*3*2*5*3*4, # whole img
5*3*2*5*3*7, # whole img (more mem than necessary)
]
nb_chunks = 4*3*5
# create input array
origarr_filepath = './original_array.hdf5'
data = np.random.normal(size=R)
fm = get_file_manager(ff)
if os.path.isfile(origarr_filepath):
os.remove(origarr_filepath)
fm.write(origarr_filepath, data, R, _slices=None)
for m in test_case:
create_empty_dir(outdir_path)
clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)
workdir = os.getcwd()
os.chdir(outdir_path)
filenames = list()
for filename in glob.glob("*.hdf5"):
arr = fm.read_all(filename)
assert arr.shape == cs
filenames.append(filename)
assert len(filenames) == nb_chunks
os.chdir(workdir)
|
normal
|
{
"blob_id": "6dd11f71e514a46462bf0b97ddac9ea474e86ad0",
"index": 366,
"step-1": "<mask token>\n\n\ndef test_get_strategy():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *\n 2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):\n 2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_get_entity_sizes():\n bytes_per_voxel = 1\n R = 10, 9, 10\n cs = 5, 3, 2\n partition = 2, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n assert bs == 5 * 3 * 2\n assert brs == 5 * 3 * 2 * 5\n assert bss == 5 * 3 * 2 * 5 * 3\n\n\ndef test_get_strategy():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *\n 2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):\n 2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel\n test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *\n 3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,\n (5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *\n 5 * 3 * 7): 1}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,\n cs, bs, brs, bss, partition, R, bytes_per_voxel)\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_get_entity_sizes():\n bytes_per_voxel = 1\n R = 10, 9, 10\n cs = 5, 3, 2\n partition = 2, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n assert bs == 5 * 3 * 2\n assert brs == 5 * 3 * 2 * 5\n assert bss == 5 * 3 * 2 * 5 * 3\n\n\ndef test_get_strategy():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *\n 2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):\n 2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel\n test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *\n 3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,\n (5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *\n 5 * 3 * 7): 1}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,\n cs, bs, brs, bss, partition, R, bytes_per_voxel)\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\ndef test_clustered_writes():\n bpv = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n ff = 'HDF5'\n outdir_path = './outdir'\n test_case = [5 * 3 * 2, 5 * 3 * 2 * 4, 5 * 3 * 2 * 5, 5 * 3 * 2 * 5 * 2,\n 5 * 3 * 2 * 5 * 3, 5 * 3 * 2 * 5 * 3 * 3, 5 * 3 * 2 * 5 * 3 * 4, 5 *\n 3 * 2 * 5 * 3 * 7]\n nb_chunks = 4 * 3 * 5\n origarr_filepath = './original_array.hdf5'\n data = np.random.normal(size=R)\n fm = get_file_manager(ff)\n if os.path.isfile(origarr_filepath):\n os.remove(origarr_filepath)\n fm.write(origarr_filepath, data, R, _slices=None)\n for m in test_case:\n create_empty_dir(outdir_path)\n clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)\n workdir = os.getcwd()\n os.chdir(outdir_path)\n filenames = list()\n for filename in glob.glob('*.hdf5'):\n arr = fm.read_all(filename)\n assert arr.shape == cs\n filenames.append(filename)\n assert len(filenames) == nb_chunks\n os.chdir(workdir)\n",
"step-4": "import os, glob\nimport numpy as np\nfrom ..algorithms.utils import get_file_manager\nfrom ..algorithms.clustered_writes import *\nfrom ..exp_utils import create_empty_dir\n\n\ndef test_get_entity_sizes():\n bytes_per_voxel = 1\n R = 10, 9, 10\n cs = 5, 3, 2\n partition = 2, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n assert bs == 5 * 3 * 2\n assert brs == 5 * 3 * 2 * 5\n assert bss == 5 * 3 * 2 * 5 * 3\n\n\ndef test_get_strategy():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n test_case = {(5 * 2 * 3): 0, (5 * 2 * 3 * 4): 0, (5 * 2 * 3 * 5): 1, (5 *\n 2 * 3 * 5 * 2): 1, (5 * 2 * 3 * 5 * 3): 2, (5 * 2 * 3 * 5 * 3 * 3):\n 2, (5 * 2 * 3 * 5 * 3 * 4): 2, (5 * 2 * 3 * 5 * 3 * 7): 2}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n bytes_per_voxel = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n partition = 4, 3, 5\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0] * R[1] * R[2] * bytes_per_voxel\n test_case = {(5 * 2 * 3): 4 * 3 * 5, (5 * 2 * 3 * 4): 4 * 3 * 2, (5 * 2 *\n 3 * 5): 4 * 3, (5 * 2 * 3 * 5 * 2): 4 * 2, (5 * 2 * 3 * 5 * 3): 4,\n (5 * 2 * 3 * 5 * 3 * 3): 2, (5 * 2 * 3 * 5 * 3 * 4): 1, (5 * 2 * 3 *\n 5 * 3 * 7): 1}\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size,\n cs, bs, brs, bss, partition, R, bytes_per_voxel)\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\ndef test_clustered_writes():\n bpv = 1\n R = 20, 9, 10\n cs = 5, 3, 2\n ff = 'HDF5'\n outdir_path = './outdir'\n test_case = [5 * 3 * 2, 5 * 3 * 2 * 4, 5 * 3 * 2 * 5, 5 * 3 * 2 * 5 * 2,\n 5 * 3 * 2 * 5 * 3, 5 * 3 * 2 * 5 * 3 * 3, 5 * 3 * 2 * 5 * 3 * 4, 5 *\n 3 * 2 * 5 * 3 * 7]\n nb_chunks = 4 * 3 * 5\n origarr_filepath = './original_array.hdf5'\n data = np.random.normal(size=R)\n fm = get_file_manager(ff)\n if os.path.isfile(origarr_filepath):\n os.remove(origarr_filepath)\n fm.write(origarr_filepath, data, R, _slices=None)\n for m in test_case:\n create_empty_dir(outdir_path)\n clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)\n workdir = os.getcwd()\n os.chdir(outdir_path)\n filenames = list()\n for filename in glob.glob('*.hdf5'):\n arr = fm.read_all(filename)\n assert arr.shape == cs\n filenames.append(filename)\n assert len(filenames) == nb_chunks\n os.chdir(workdir)\n",
"step-5": "import os, glob\nimport numpy as np\n\nfrom ..algorithms.utils import get_file_manager\nfrom ..algorithms.clustered_writes import *\nfrom ..exp_utils import create_empty_dir\n\n\ndef test_get_entity_sizes():\n # in C order\n bytes_per_voxel = 1\n R = (10,9,10)\n cs = (5,3,2)\n partition = (2,3,5)\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n\n assert bs == 5*3*2\n assert brs == 5*3*2*5\n assert bss == 5*3*2*5*3\n\n\ndef test_get_strategy():\n # in C order\n bytes_per_voxel = 1\n R = (20,9,10)\n cs = (5,3,2)\n partition = (4,3,5)\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n \n test_case = {\n 5*2*3: 0, # 1 block \n 5*2*3*4: 0, # 4 blocks \n 5*2*3*5: 1, # 1 row \n 5*2*3*5*2: 1, # 2 rows\n 5*2*3*5*3: 2, # 1 slice \n 5*2*3*5*3*3: 2, # 3 slices \n 5*2*3*5*3*4: 2, # whole img\n 5*2*3*5*3*7: 2, # whole img (more mem than necessary)\n }\n\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n assert strategy == expected\n\n\ndef test_compute_buffers():\n # in C order\n bytes_per_voxel = 1\n R = (20,9,10)\n cs = (5,3,2)\n partition = (4,3,5)\n bs, brs, bss = get_entity_sizes(cs, bytes_per_voxel, partition)\n origarr_size = R[0]*R[1]*R[2]*bytes_per_voxel\n \n test_case = {\n 5*2*3: 4*3*5, # 1 block \n 5*2*3*4: 4*3*2, # 4 blocks \n 5*2*3*5: 4*3, # 1 row \n 5*2*3*5*2: 4*2, # 2 rows\n 5*2*3*5*3: 4, # 1 slice \n 5*2*3*5*3*3: 2, # 3 slices \n 5*2*3*5*3*4: 1, # whole img\n 5*2*3*5*3*7: 1, # whole img (more mem than necessary)\n }\n\n for buffer_mem_size, expected in test_case.items():\n strategy = get_strategy(buffer_mem_size, bs, brs, bss)\n buffers = compute_buffers(buffer_mem_size, strategy, origarr_size, cs, bs, brs, bss, partition, R, bytes_per_voxel)\n\n # test number of buffers\n nb_buffers = len(buffers.values())\n assert nb_buffers == expected\n\n\ndef test_clustered_writes():\n bpv = 1\n R = (20,9,10)\n cs = (5,3,2)\n ff = 'HDF5'\n outdir_path = './outdir'\n\n test_case = [\n 5*3*2, # 1 block \n 5*3*2*4, # 4 blocks \n 5*3*2*5, # 1 row \n 5*3*2*5*2, # 2 rows\n 5*3*2*5*3, # 1 slice \n 5*3*2*5*3*3, # 3 slices \n 5*3*2*5*3*4, # whole img\n 5*3*2*5*3*7, # whole img (more mem than necessary)\n ]\n\n nb_chunks = 4*3*5\n\n # create input array\n origarr_filepath = './original_array.hdf5'\n data = np.random.normal(size=R)\n fm = get_file_manager(ff)\n if os.path.isfile(origarr_filepath):\n os.remove(origarr_filepath)\n fm.write(origarr_filepath, data, R, _slices=None)\n \n for m in test_case:\n create_empty_dir(outdir_path)\n clustered_writes(origarr_filepath, R, cs, bpv, m, ff, outdir_path)\n\n workdir = os.getcwd()\n os.chdir(outdir_path)\n filenames = list()\n for filename in glob.glob(\"*.hdf5\"):\n arr = fm.read_all(filename)\n assert arr.shape == cs\n filenames.append(filename)\n\n assert len(filenames) == nb_chunks\n os.chdir(workdir)\n\n \n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import os
from app_web import sg
from sendgrid.helpers.mail import *
import pdfkit
from models.user import User
from models.expense import Expense
from models.statement import Statement
from models.category import Category
import tempfile
import subprocess
from .aws_uploader import upload_image_to_s3
import datetime
from peewee import fn
from flask import render_template
def create_statement(month=None):
def _get_pdfkit_config():
if os.getenv('FLASK_ENV') == 'production':
WKHTMLTOPDF_CMD = subprocess.Popen(
['which', os.environ.get(
'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')],
stdout=subprocess.PIPE).communicate()[0].strip()
return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)
else:
return pdfkit.configuration()
def create_pdf(pdf_content, filename):
options = {
'margin-top': '10mm',
'margin-bottom': '10mm',
'margin-left': '10mm',
'margin-right': '10mm',
'page-size': 'A4',
'page-width': '210mm',
'page-height': '296mm'
}
pdf = pdfkit.from_string(
pdf_content, False, configuration=_get_pdfkit_config(), options=options)
temp_file = tempfile.TemporaryFile()
temp_file.filename = filename
temp_file.content_type = "application/pdf"
temp_file.write(pdf)
temp_file.seek(0)
return temp_file
if month == None :
year = datetime.datetime.now().year
full_month = datetime.date.today().strftime("%B %Y") # current month
short_month = datetime.date.today().strftime("%b")
else:
# '2020-12' convert to 'December 2020'
year_month = month.split('-') # ['2020','12']
year = int(year_month[0])
short_month = datetime.datetime(year, int(year_month[1]), 1).strftime("%b")
full_month = datetime.datetime(year, int(year_month[1]), 1).strftime("%B %Y")
# select all user from database
users = User.select()
# get all expenses to render in template
for user in users:
record = Statement.get_or_none(Statement.user==user.id, Statement.month==full_month)
if not record:
expenses = Expense.select().where(Expense.cat in user.categories, Expense.month == short_month, Expense.created_at.year == year).order_by(Expense.created_at.asc())
# ttl = Expense.select(fn.SUM(Expense.amount).alias('total')).where(Expense.cat in user.categories, Expense.month == short_month, Expense.created_at.year == year)
total = 0
for exp in expenses:
total += exp.amount
html = render_template('expenses/statement.html', expenses=expenses, total=total, month=str(full_month))
pdf_name = (user.username).replace(" ", "-").lower() + "-" + str(full_month).replace(" ", "-")
temp_file = create_pdf(html, pdf_name)
statement_url = upload_image_to_s3(user.id ,temp_file)
print(statement_url)
statement = Statement(user=user.id, exp_url=statement_url, month=full_month)
statement.save()
'''
Send monthly statement email
'''
# message = Mail(
# from_email="leongjinqwen@gmail.com",
# to_emails=user.email,
# subject=f"{month} Expenses Statement",
# html_content=Content("text/html", f"<h1>Dear {user.username},</h1><br/>Here is your expenses statement PDF.<br/><a href={statement_url}>{month} Statement<a><br/><h1>Jw</h1>")
# )
# try:
# response = sg.send(message)
# print(response.body)
# except Exception as e:
# print(str(e))
else:
print('already exist!')
|
normal
|
{
"blob_id": "55df8d13ddf28f7b0477329bee743471a0780f24",
"index": 3253,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_statement(month=None):\n\n def _get_pdfkit_config():\n if os.getenv('FLASK_ENV') == 'production':\n WKHTMLTOPDF_CMD = subprocess.Popen(['which', os.environ.get(\n 'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')], stdout=\n subprocess.PIPE).communicate()[0].strip()\n return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)\n else:\n return pdfkit.configuration()\n\n def create_pdf(pdf_content, filename):\n options = {'margin-top': '10mm', 'margin-bottom': '10mm',\n 'margin-left': '10mm', 'margin-right': '10mm', 'page-size':\n 'A4', 'page-width': '210mm', 'page-height': '296mm'}\n pdf = pdfkit.from_string(pdf_content, False, configuration=\n _get_pdfkit_config(), options=options)\n temp_file = tempfile.TemporaryFile()\n temp_file.filename = filename\n temp_file.content_type = 'application/pdf'\n temp_file.write(pdf)\n temp_file.seek(0)\n return temp_file\n if month == None:\n year = datetime.datetime.now().year\n full_month = datetime.date.today().strftime('%B %Y')\n short_month = datetime.date.today().strftime('%b')\n else:\n year_month = month.split('-')\n year = int(year_month[0])\n short_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\n '%b')\n full_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\n '%B %Y')\n users = User.select()\n for user in users:\n record = Statement.get_or_none(Statement.user == user.id, Statement\n .month == full_month)\n if not record:\n expenses = Expense.select().where(Expense.cat in user.\n categories, Expense.month == short_month, Expense.\n created_at.year == year).order_by(Expense.created_at.asc())\n total = 0\n for exp in expenses:\n total += exp.amount\n html = render_template('expenses/statement.html', expenses=\n expenses, total=total, month=str(full_month))\n pdf_name = user.username.replace(' ', '-').lower() + '-' + str(\n full_month).replace(' ', '-')\n temp_file = create_pdf(html, pdf_name)\n statement_url = upload_image_to_s3(user.id, temp_file)\n print(statement_url)\n statement = Statement(user=user.id, exp_url=statement_url,\n month=full_month)\n statement.save()\n \"\"\"\n Send monthly statement email\n \"\"\"\n else:\n print('already exist!')\n",
"step-3": "import os\nfrom app_web import sg\nfrom sendgrid.helpers.mail import *\nimport pdfkit\nfrom models.user import User\nfrom models.expense import Expense\nfrom models.statement import Statement\nfrom models.category import Category\nimport tempfile\nimport subprocess\nfrom .aws_uploader import upload_image_to_s3\nimport datetime\nfrom peewee import fn\nfrom flask import render_template\n\n\ndef create_statement(month=None):\n\n def _get_pdfkit_config():\n if os.getenv('FLASK_ENV') == 'production':\n WKHTMLTOPDF_CMD = subprocess.Popen(['which', os.environ.get(\n 'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')], stdout=\n subprocess.PIPE).communicate()[0].strip()\n return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)\n else:\n return pdfkit.configuration()\n\n def create_pdf(pdf_content, filename):\n options = {'margin-top': '10mm', 'margin-bottom': '10mm',\n 'margin-left': '10mm', 'margin-right': '10mm', 'page-size':\n 'A4', 'page-width': '210mm', 'page-height': '296mm'}\n pdf = pdfkit.from_string(pdf_content, False, configuration=\n _get_pdfkit_config(), options=options)\n temp_file = tempfile.TemporaryFile()\n temp_file.filename = filename\n temp_file.content_type = 'application/pdf'\n temp_file.write(pdf)\n temp_file.seek(0)\n return temp_file\n if month == None:\n year = datetime.datetime.now().year\n full_month = datetime.date.today().strftime('%B %Y')\n short_month = datetime.date.today().strftime('%b')\n else:\n year_month = month.split('-')\n year = int(year_month[0])\n short_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\n '%b')\n full_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\n '%B %Y')\n users = User.select()\n for user in users:\n record = Statement.get_or_none(Statement.user == user.id, Statement\n .month == full_month)\n if not record:\n expenses = Expense.select().where(Expense.cat in user.\n categories, Expense.month == short_month, Expense.\n created_at.year == year).order_by(Expense.created_at.asc())\n total = 0\n for exp in expenses:\n total += exp.amount\n html = render_template('expenses/statement.html', expenses=\n expenses, total=total, month=str(full_month))\n pdf_name = user.username.replace(' ', '-').lower() + '-' + str(\n full_month).replace(' ', '-')\n temp_file = create_pdf(html, pdf_name)\n statement_url = upload_image_to_s3(user.id, temp_file)\n print(statement_url)\n statement = Statement(user=user.id, exp_url=statement_url,\n month=full_month)\n statement.save()\n \"\"\"\n Send monthly statement email\n \"\"\"\n else:\n print('already exist!')\n",
"step-4": "import os\nfrom app_web import sg\nfrom sendgrid.helpers.mail import *\nimport pdfkit\nfrom models.user import User\nfrom models.expense import Expense\nfrom models.statement import Statement\nfrom models.category import Category\nimport tempfile\nimport subprocess\nfrom .aws_uploader import upload_image_to_s3\nimport datetime\nfrom peewee import fn\nfrom flask import render_template\n\ndef create_statement(month=None):\n def _get_pdfkit_config():\n if os.getenv('FLASK_ENV') == 'production':\n WKHTMLTOPDF_CMD = subprocess.Popen(\n ['which', os.environ.get(\n 'WKHTMLTOPDF_BINARY', 'wkhtmltopdf-pack')],\n stdout=subprocess.PIPE).communicate()[0].strip()\n return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)\n else:\n return pdfkit.configuration()\n\n def create_pdf(pdf_content, filename):\n options = {\n 'margin-top': '10mm',\n 'margin-bottom': '10mm',\n 'margin-left': '10mm',\n 'margin-right': '10mm',\n 'page-size': 'A4',\n 'page-width': '210mm',\n 'page-height': '296mm'\n }\n pdf = pdfkit.from_string(\n pdf_content, False, configuration=_get_pdfkit_config(), options=options)\n temp_file = tempfile.TemporaryFile()\n temp_file.filename = filename\n temp_file.content_type = \"application/pdf\"\n temp_file.write(pdf)\n temp_file.seek(0)\n return temp_file\n\n if month == None :\n year = datetime.datetime.now().year\n full_month = datetime.date.today().strftime(\"%B %Y\") # current month\n short_month = datetime.date.today().strftime(\"%b\")\n else:\n # '2020-12' convert to 'December 2020'\n year_month = month.split('-') # ['2020','12']\n year = int(year_month[0])\n short_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\"%b\")\n full_month = datetime.datetime(year, int(year_month[1]), 1).strftime(\"%B %Y\")\n\n # select all user from database\n users = User.select()\n # get all expenses to render in template\n for user in users:\n record = Statement.get_or_none(Statement.user==user.id, Statement.month==full_month)\n if not record:\n expenses = Expense.select().where(Expense.cat in user.categories, Expense.month == short_month, Expense.created_at.year == year).order_by(Expense.created_at.asc())\n # ttl = Expense.select(fn.SUM(Expense.amount).alias('total')).where(Expense.cat in user.categories, Expense.month == short_month, Expense.created_at.year == year)\n total = 0\n for exp in expenses:\n total += exp.amount\n\n html = render_template('expenses/statement.html', expenses=expenses, total=total, month=str(full_month))\n pdf_name = (user.username).replace(\" \", \"-\").lower() + \"-\" + str(full_month).replace(\" \", \"-\")\n temp_file = create_pdf(html, pdf_name)\n statement_url = upload_image_to_s3(user.id ,temp_file)\n print(statement_url)\n\n statement = Statement(user=user.id, exp_url=statement_url, month=full_month)\n statement.save()\n '''\n Send monthly statement email\n '''\n # message = Mail(\n # from_email=\"leongjinqwen@gmail.com\",\n # to_emails=user.email,\n # subject=f\"{month} Expenses Statement\",\n # html_content=Content(\"text/html\", f\"<h1>Dear {user.username},</h1><br/>Here is your expenses statement PDF.<br/><a href={statement_url}>{month} Statement<a><br/><h1>Jw</h1>\")\n # )\n # try:\n # response = sg.send(message)\n # print(response.body)\n # except Exception as e:\n # print(str(e))\n else:\n print('already exist!')\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.