code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
"""Copied from http://svn.sourceforge.jp/svnroot/slothlib/CSharp/Version1/SlothLib/NLP/Filter/StopWord/word/Japanese.txt"""
STOP_WORDS = set(
"""
あそこ
あたり
あちら
あっち
あと
あな
あなた
あれ
いくつ
いつ
いま
いや
いろいろ
うち
おおまか
おまえ
おれ
がい
かく
かたち
かやの
から
がら
きた
くせ
ここ
こっち
こと
ごと
こちら
ごっちゃ
これ
これら
ごろ
さまざま
さらい
さん
しかた
しよう
すか
ずつ
すね
すべて
ぜんぶ
そう
そこ
そちら
そっち
そで
それ
それぞれ
それなり
たくさん
たち
たび
ため
だめ
ちゃ
ちゃん
てん
とおり
とき
どこ
どこか
ところ
どちら
どっか
どっち
どれ
なか
なかば
なに
など
なん
はじめ
はず
はるか
ひと
ひとつ
ふく
ぶり
べつ
へん
ぺん
ほう
ほか
まさ
まし
まとも
まま
みたい
みつ
みなさん
みんな
もと
もの
もん
やつ
よう
よそ
わけ
わたし
ハイ
上
中
下
字
年
月
日
時
分
秒
週
火
水
木
金
土
国
都
道
府
県
市
区
町
村
各
第
方
何
的
度
文
者
性
体
人
他
今
部
課
係
外
類
達
気
室
口
誰
用
界
会
首
男
女
別
話
私
屋
店
家
場
等
見
際
観
段
略
例
系
論
形
間
地
員
線
点
書
品
力
法
感
作
元
手
数
彼
彼女
子
内
楽
喜
怒
哀
輪
頃
化
境
俺
奴
高
校
婦
伸
紀
誌
レ
行
列
事
士
台
集
様
所
歴
器
名
情
連
毎
式
簿
回
匹
個
席
束
歳
目
通
面
円
玉
枚
前
後
左
右
次
先
春
夏
秋
冬
一
二
三
四
五
六
七
八
九
十
百
千
万
億
兆
下記
上記
時間
今回
前回
場合
一つ
年生
自分
ヶ所
ヵ所
カ所
箇所
ヶ月
ヵ月
カ月
箇月
名前
本当
確か
時点
全部
関係
近く
方法
我々
違い
多く
扱い
新た
その後
半ば
結局
様々
以前
以後
以降
未満
以上
以下
幾つ
毎日
自体
向こう
何人
手段
同じ
感じ
""".split()
)
|
normal
|
{
"blob_id": "254afebcc909c805d1e4972a0910eb4451d1e64e",
"index": 8704,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nSTOP_WORDS = set(\n \"\"\"\nあそこ\nあたり\nあちら\nあっち\nあと\nあな\nあなた\nあれ\nいくつ\nいつ\nいま\nいや\nいろいろ\nうち\nおおまか\nおまえ\nおれ\nがい\nかく\nかたち\nかやの\nから\nがら\nきた\nくせ\nここ\nこっち\nこと\nごと\nこちら\nごっちゃ\nこれ\nこれら\nごろ\nさまざま\nさらい\nさん\nしかた\nしよう\nすか\nずつ\nすね\nすべて\nぜんぶ\nそう\nそこ\nそちら\nそっち\nそで\nそれ\nそれぞれ\nそれなり\nたくさん\nたち\nたび\nため\nだめ\nちゃ\nちゃん\nてん\nとおり\nとき\nどこ\nどこか\nところ\nどちら\nどっか\nどっち\nどれ\nなか\nなかば\nなに\nなど\nなん\nはじめ\nはず\nはるか\nひと\nひとつ\nふく\nぶり\nべつ\nへん\nぺん\nほう\nほか\nまさ\nまし\nまとも\nまま\nみたい\nみつ\nみなさん\nみんな\nもと\nもの\nもん\nやつ\nよう\nよそ\nわけ\nわたし\nハイ\n上\n中\n下\n字\n年\n月\n日\n時\n分\n秒\n週\n火\n水\n木\n金\n土\n国\n都\n道\n府\n県\n市\n区\n町\n村\n\n\n各\n第\n方\n何\n的\n度\n文\n者\n性\n体\n人\n他\n今\n部\n課\n係\n外\n類\n達\n気\n室\n口\n誰\n用\n界\n会\n首\n男\n女\n別\n話\n私\n屋\n店\n家\n場\n等\n見\n際\n観\n段\n略\n例\n系\n論\n形\n間\n地\n員\n線\n点\n書\n品\n力\n法\n感\n作\n元\n手\n数\n彼\n彼女\n子\n内\n楽\n喜\n怒\n哀\n輪\n頃\n化\n境\n俺\n奴\n高\n校\n婦\n伸\n紀\n誌\nレ\n行\n列\n事\n士\n台\n集\n様\n所\n歴\n器\n名\n情\n連\n毎\n式\n簿\n\n\n\n\n回\n匹\n個\n席\n束\n歳\n目\n通\n面\n円\n玉\n枚\n\n前\n後\n左\n右\n次\n先\n\n春\n夏\n秋\n冬\n\n\n\n一\n二\n三\n四\n五\n六\n七\n八\n九\n十\n百\n千\n万\n億\n兆\n\n\n下記\n上記\n時間\n今回\n前回\n場合\n一つ\n年生\n自分\nヶ所\nヵ所\nカ所\n箇所\nヶ月\nヵ月\nカ月\n箇月\n名前\n本当\n確か\n時点\n全部\n関係\n近く\n方法\n我々\n違い\n多く\n扱い\n新た\nその後\n半ば\n結局\n様々\n以前\n以後\n以降\n未満\n以上\n以下\n幾つ\n毎日\n自体\n向こう\n何人\n手段\n同じ\n感じ\n\"\"\"\n .split())\n",
"step-3": "\"\"\"Copied from http://svn.sourceforge.jp/svnroot/slothlib/CSharp/Version1/SlothLib/NLP/Filter/StopWord/word/Japanese.txt\"\"\"\nSTOP_WORDS = set(\n \"\"\"\nあそこ\nあたり\nあちら\nあっち\nあと\nあな\nあなた\nあれ\nいくつ\nいつ\nいま\nいや\nいろいろ\nうち\nおおまか\nおまえ\nおれ\nがい\nかく\nかたち\nかやの\nから\nがら\nきた\nくせ\nここ\nこっち\nこと\nごと\nこちら\nごっちゃ\nこれ\nこれら\nごろ\nさまざま\nさらい\nさん\nしかた\nしよう\nすか\nずつ\nすね\nすべて\nぜんぶ\nそう\nそこ\nそちら\nそっち\nそで\nそれ\nそれぞれ\nそれなり\nたくさん\nたち\nたび\nため\nだめ\nちゃ\nちゃん\nてん\nとおり\nとき\nどこ\nどこか\nところ\nどちら\nどっか\nどっち\nどれ\nなか\nなかば\nなに\nなど\nなん\nはじめ\nはず\nはるか\nひと\nひとつ\nふく\nぶり\nべつ\nへん\nぺん\nほう\nほか\nまさ\nまし\nまとも\nまま\nみたい\nみつ\nみなさん\nみんな\nもと\nもの\nもん\nやつ\nよう\nよそ\nわけ\nわたし\nハイ\n上\n中\n下\n字\n年\n月\n日\n時\n分\n秒\n週\n火\n水\n木\n金\n土\n国\n都\n道\n府\n県\n市\n区\n町\n村\n\n\n各\n第\n方\n何\n的\n度\n文\n者\n性\n体\n人\n他\n今\n部\n課\n係\n外\n類\n達\n気\n室\n口\n誰\n用\n界\n会\n首\n男\n女\n別\n話\n私\n屋\n店\n家\n場\n等\n見\n際\n観\n段\n略\n例\n系\n論\n形\n間\n地\n員\n線\n点\n書\n品\n力\n法\n感\n作\n元\n手\n数\n彼\n彼女\n子\n内\n楽\n喜\n怒\n哀\n輪\n頃\n化\n境\n俺\n奴\n高\n校\n婦\n伸\n紀\n誌\nレ\n行\n列\n事\n士\n台\n集\n様\n所\n歴\n器\n名\n情\n連\n毎\n式\n簿\n\n\n\n\n回\n匹\n個\n席\n束\n歳\n目\n通\n面\n円\n玉\n枚\n\n前\n後\n左\n右\n次\n先\n\n春\n夏\n秋\n冬\n\n\n\n一\n二\n三\n四\n五\n六\n七\n八\n九\n十\n百\n千\n万\n億\n兆\n\n\n下記\n上記\n時間\n今回\n前回\n場合\n一つ\n年生\n自分\nヶ所\nヵ所\nカ所\n箇所\nヶ月\nヵ月\nカ月\n箇月\n名前\n本当\n確か\n時点\n全部\n関係\n近く\n方法\n我々\n違い\n多く\n扱い\n新た\nその後\n半ば\n結局\n様々\n以前\n以後\n以降\n未満\n以上\n以下\n幾つ\n毎日\n自体\n向こう\n何人\n手段\n同じ\n感じ\n\"\"\".split()\n)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from datetime import *
import datetime
import time
time_one = datetime.time(1, 2, 3)
print("Time One :: ", time_one)
time_two = datetime.time(hour=23, minute=59, second=59, microsecond=99)
print("Time Two :: ", time_two)
date_one = datetime.date(month=3, year=2019, day=31)
print("Date One :: ", date_one)
today = datetime.date.today()
print("Today :: ", today, today.timetuple())
print("Difference Between Time :: ", timedelta(time_two.second) - timedelta(time_one.second))
print("Today :: ", datetime.date.today())
print("Time.asctime() :: ", time.asctime())
now = time.gmtime()
print("time.asctime(time.gmtime) :: ", time.asctime(now))
start = time.time()
time.sleep(3)
stop = time.time()
print(stop - start)
|
normal
|
{
"blob_id": "1ed7dba63db38e53a1dc5fac3c36f0dd98075c1f",
"index": 4305,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Time One :: ', time_one)\n<mask token>\nprint('Time Two :: ', time_two)\n<mask token>\nprint('Date One :: ', date_one)\n<mask token>\nprint('Today :: ', today, today.timetuple())\nprint('Difference Between Time :: ', timedelta(time_two.second) - timedelta\n (time_one.second))\nprint('Today :: ', datetime.date.today())\nprint('Time.asctime() :: ', time.asctime())\n<mask token>\nprint('time.asctime(time.gmtime) :: ', time.asctime(now))\n<mask token>\ntime.sleep(3)\n<mask token>\nprint(stop - start)\n",
"step-3": "<mask token>\ntime_one = datetime.time(1, 2, 3)\nprint('Time One :: ', time_one)\ntime_two = datetime.time(hour=23, minute=59, second=59, microsecond=99)\nprint('Time Two :: ', time_two)\ndate_one = datetime.date(month=3, year=2019, day=31)\nprint('Date One :: ', date_one)\ntoday = datetime.date.today()\nprint('Today :: ', today, today.timetuple())\nprint('Difference Between Time :: ', timedelta(time_two.second) - timedelta\n (time_one.second))\nprint('Today :: ', datetime.date.today())\nprint('Time.asctime() :: ', time.asctime())\nnow = time.gmtime()\nprint('time.asctime(time.gmtime) :: ', time.asctime(now))\nstart = time.time()\ntime.sleep(3)\nstop = time.time()\nprint(stop - start)\n",
"step-4": "from datetime import *\nimport datetime\nimport time\ntime_one = datetime.time(1, 2, 3)\nprint('Time One :: ', time_one)\ntime_two = datetime.time(hour=23, minute=59, second=59, microsecond=99)\nprint('Time Two :: ', time_two)\ndate_one = datetime.date(month=3, year=2019, day=31)\nprint('Date One :: ', date_one)\ntoday = datetime.date.today()\nprint('Today :: ', today, today.timetuple())\nprint('Difference Between Time :: ', timedelta(time_two.second) - timedelta\n (time_one.second))\nprint('Today :: ', datetime.date.today())\nprint('Time.asctime() :: ', time.asctime())\nnow = time.gmtime()\nprint('time.asctime(time.gmtime) :: ', time.asctime(now))\nstart = time.time()\ntime.sleep(3)\nstop = time.time()\nprint(stop - start)\n",
"step-5": "from datetime import *\nimport datetime\nimport time\ntime_one = datetime.time(1, 2, 3)\nprint(\"Time One :: \", time_one)\n\ntime_two = datetime.time(hour=23, minute=59, second=59, microsecond=99)\nprint(\"Time Two :: \", time_two)\n\ndate_one = datetime.date(month=3, year=2019, day=31)\nprint(\"Date One :: \", date_one)\n\ntoday = datetime.date.today()\nprint(\"Today :: \", today, today.timetuple())\n\nprint(\"Difference Between Time :: \", timedelta(time_two.second) - timedelta(time_one.second))\nprint(\"Today :: \", datetime.date.today())\n\nprint(\"Time.asctime() :: \", time.asctime())\nnow = time.gmtime()\nprint(\"time.asctime(time.gmtime) :: \", time.asctime(now))\n\nstart = time.time()\ntime.sleep(3)\nstop = time.time()\nprint(stop - start)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Carl Bunge
Washington State University
June 2018
Adapted from @author: Luka Denies from TU Delft.
Changelog:
11/2017 - Integration of CoolProp
06/2018 - Update to OpenFOAM-5.x (Mass-based thermodynamics (for example: cpMcv to CpMCv))
03/2019 - Update to include parahydrogen properties from Refprop
"""
import CoolProp.CoolProp as CP
import numpy as np
import matplotlib.pyplot as plt
#Fluid for thermodynamic properties (rho, Cp, CpMcv, H, S, c, E, thermal conductivity)
CP.set_reference_state('parahydrogen','NBP')
fluid_thermo ='parahydrogen'
#Fluid for transport model (viscosity)
CP.set_reference_state('hydrogen','NBP')
fluid_transport = 'hydrogen'
#****************************************************************************************
#Temperature limits (set within subcritical region for saturation tables)
T0 = 15 #Temperature start (K)
TMax = 32 #Temperature end (K)
#Pressure limits
p0 = 0.1e5 #Pa
pMax = 5.5e5 #Pa
#****************************************************************************************
Tcrit = CP.PropsSI("Tcrit",fluid_thermo)
Ts = []
ps = []
pRange = []
rho = []
mu = []
mu_l = []
mu_v = []
kappa = []
kappa_l = []
kappa_v = []
Cp = []
Cp_l = []
Cp_v = []
H = []
H_l = []
H_v = []
CpMCv = []
CpMCv_l = []
CpMCv_v = []
E = []
E_l = []
E_v = []
S = []
S_l = []
S_v = []
c = []
c_l = []
c_v = []
pSat = []
i = 0
j = 0
p = p0
T = T0
#Build (p, T) tables
while p<pMax:
pRange.append(p)
TRange = []
T = T0
rho.append([0])
Cp.append([0])
Cp_l.append([0])
Cp_v.append([0])
mu.append([0])
mu_l.append([0])
mu_v.append([0])
kappa.append[0])
kappa_l.append([0])
kappa_v.append([0])
CpMCv.append([0])
CpMCv_l.append([0])
CpMCv_v.append([0])
H.append([0])
H_l.append([0])
H_v.append([0])
E.append([0])
E_l.append([0])
E_v.append([0])
S.append([0])
S_l.append([0])
S_v.append([0])
c.append([0])
c_l.append([0])
c_v.append([0])
pSat.append([0])
rho[i][0] = rhoCur = CP.PropsSI('D','T',T,'P',p,fluid_thermo)
CpCur = CP.PropsSI('C','D',rhoCur,'T',T,fluid_thermo)
Cp[i][0] = CpCur
Cp_l[i][0] = CP.PropsSI('C','T',T,'Q',0,fluid_thermo)
Cp_v[i][0] = CP.PropsSI('C','T',T,'Q',1,fluid_thermo)
mu_l[i][0] = CP.PropsSI('V','T',T,'Q',0,fluid_transport)
mu_v[i][0] = CP.PropsSI('V','T',T,'Q',1,fluid_transport)
mu[i][0] = CP.PropsSI('V','D',rhoCur,'T',T,fluid_transport)
kappa_l[i][0] = CP.PropsSI('L','T',T,'Q',0,'REFPROP::parahydrogen')
kappa_v[i][0] = CP.PropsSI('L','T',T,'Q',1,'REFPROP::parahydrogen')
kappa[i][0] = CP.PropsSI('L','D',rhoCur,'T',T,'REFPROP::parahydrogen')
CpMCv_l[i][0] = CP.PropsSI('O','T',T,'Q',0,fluid_thermo)
CpMCv_v[i][0] = CP.PropsSI('O','T',T,'Q',1,fluid_thermo)
CpMCv[i][0] = CpCur-CP.PropsSI('O','D',rhoCur,'T',T,fluid_thermo)
H_l[i][0] = CP.PropsSI('H','T',T,'Q',0,fluid_thermo)
H_v[i][0] = CP.PropsSI('H','T',T,'Q',1,fluid_thermo)
H[i][0] = CP.PropsSI('H','D',rhoCur,'T',T,fluid_thermo)
E_l[i][0] = CP.PropsSI('U','T',T,'Q',0,fluid_thermo)
E_v[i][0] = CP.PropsSI('U','T',T,'Q',1,fluid_thermo)
E[i][0] = CP.PropsSI('U','D',rhoCur,'T',T,fluid_thermo)
S_l[i][0] = CP.PropsSI('S','T',T,'Q',0,fluid_thermo)
S_v[i][0] = CP.PropsSI('S','T',T,'Q',1,fluid_thermo)
S[i][0] = CP.PropsSI('S','D',rhoCur,'T',T,fluid_thermo)
c_l[i][0] = CP.PropsSI('A','T',T,'Q',0,fluid_thermo)
c_v[i][0] = CP.PropsSI('A','T',T,'Q',1,fluid_thermo)
c[i][0] = CP.PropsSI('A','D',rhoCur,'T',T,fluid_thermo)
pSat[i][0] = CP.PropsSI('P','T',T,'Q',0,fluid_thermo)
TRange.append(T)
while T<TMax:
j += 1
dT = 1 # Tstep [K] **************************************************************
T += dT
rhoCur = CP.PropsSI('D','T',T,'P',p,fluid_thermo)
rho[i].append(rhoCur)
CpCur = CP.PropsSI('C','D',rhoCur,'T',T,fluid_thermo)
CpCur_l = CP.PropsSI('C','T',T,'Q',0,fluid_thermo)
CpCur_v = CP.PropsSI('C','T',T,'Q',1,fluid_thermo))
Cp_l[i].append(CP.PropsSI('C','T',T,'Q',0,fluid_thermo))
Cp_v[i].append(CP.PropsSI('C','T',T,'Q',1,fluid_thermo))
Cp[i].append(CpCur)
mu_l[i].append(CP.PropsSI('V','T',T,'Q',0,fluid_transport))
mu_v[i].append(CP.PropsSI('V','T',T,'Q',1,fluid_transport))
mu[i].append(CP.PropsSI('V','D',rhoCur,'T',T,fluid_transport))
kappa_l[i].append(CP.PropsSI('L','T',T,'Q',0,'REFPROP::parahydrogen'))
kappa_v[i].append(CP.PropsSI('L','T',T,'Q',1,'REFPROP::parahydrogen'))
kappa[i].append(CP.PropsSI('L','D',rhoCur,'T',T,'REFPROP::parahydrogen'))
CpMCv_l[i].append((CP.PropsSI('C','T',T,'Q',0,fluid_thermo))-(CP.PropsSI('O','T',T,'Q',0,fluid_thermo)))
CpMCv_v[i].append((CP.PropsSI('C','T',T,'Q',1,fluid_thermo))-(CP.PropsSI('O','T',T,'Q',1,fluid_thermo)))
CpMCv[i].append((CpCur-CP.PropsSI('O','D',rhoCur,'T',T,fluid_thermo)))
H_l[i].append(CP.PropsSI('H','T',T,'Q',0,fluid_thermo))
H_v[i].append(CP.PropsSI('H','T',T,'Q',1,fluid_thermo))
H[i].append(CP.PropsSI('H','D',rhoCur,'T',T,fluid_thermo))
E_l[i].append(CP.PropsSI('U','T',T,'Q',0,fluid_thermo))
E_v[i].append(CP.PropsSI('U','T',T,'Q',1,fluid_thermo))
E[i].append(CP.PropsSI('U','D',rhoCur,'T',T,fluid_thermo))
S_l[i].append(CP.PropsSI('S','T',T,'Q',0,fluid_thermo))
S_v[i].append(CP.PropsSI('S','T',T,'Q',1,fluid_thermo))
S[i].append(CP.PropsSI('S','D',rhoCur,'T',T,fluid_thermo))
c_l[i].append(CP.PropsSI('A','T',T,'Q',0,fluid_thermo))
c_v[i].append(CP.PropsSI('A','T',T,'Q',1,fluid_thermo))
c[i].append(CP.PropsSI('A','D',rhoCur,'T',T,fluid_thermo))
pSat[i].append(CP.PropsSI('P','T',T,'Q',0,fluid_thermo))
TRange.append(T)
i += 1
ps.append([p]*len(TRange))
rhoPseudoCrit = CP.PropsSI('D','T',Tcrit,'P',p,fluid_thermo)
dp = 0.5e5 # Pstep [Pa] ****************************************************************
p += dp
print p
Ts.append(TRange)
print "Calculations done, now writing"
pSatFile = open("pSat","w")
for i,p in enumerate(pRange):
sList = ["\t" + str(pSat[i][j]) + " " + str(Ts[i][j]) + "\n" for j in range(len(Ts[i]))]
pSatFile.write("".join(sList))
pSatFile.write("")
pSatFile.close()
mu_lFile = open("mu_l","w")
for i,p in enumerate(pRange):
sList = ["\t" + str(mu_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
mu_lFile.write("".join(sList))
mu_lFile.write("")
mu_lFile.close()
mu_vFile = open("mu_v","w")
for i,p in enumerate(pRange):
sList = ["\t" + str(mu_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
mu_vFile.write("".join(sList))
mu_vFile.write("")
mu_vFile.close()
muFile = open("mu","w")
for i,p in enumerate(pRange):
sList = ["\t" + str(mu[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
muFile.write("".join(sList))
muFile.write("")
muFile.close()
rhoFile = open("rho","w")
rhoFile.write("\n")
for i,p in enumerate(pRange):
rhoFile.write("")
sList = ["\t" + str(rho[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
rhoFile.write("".join(sList))
rhoFile.write("")
rhoFile.close()
Cp_lFile = open("Cp_l","w")
CpFile.write("\n")
for i,p in enumerate(pRange):
Cp_lFile.write("")
sList = ["\t" + str(Cp_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
Cp_lFile.write("".join(sList))
Cp_lFile.write("")
Cp_lFile.close()
Cp_vFile = open("Cp_v","w")
Cp_vFile.write("\n")
for i,p in enumerate(pRange):
Cp_vFile.write("")
sList = ["\t" + str(Cp_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
Cp_vFile.write("".join(sList))
Cp_vFile.write("")
Cp_vFile.close()
CpFile = open("Cp","w")
CpFile.write("\n")
for i,p in enumerate(pRange):
CpFile.write("")
sList = ["\t" + str(Cp[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
CpFile.write("".join(sList))
CpFile.write("")
CpFile.close()
kappa_lFile = open("kappa_l","w")
kappa_lFile.write("\n")
for i,p in enumerate(pRange):
kappa_lFile.write("")
sList = ["\t" + str(kappa_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
kappa_lFile.write("".join(sList))
kappa_lFile.write("")
kappa_lFile.close()
kappa_vFile = open("kappa_v","w")
kappa_vFile.write("\n")
for i,p in enumerate(pRange):
kappa_vFile.write("")
sList = ["\t" + str(kappa_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
kappa_vFile.write("".join(sList))
kappa_vFile.write("")
kappa_vFile.close()
kappaFile = open("kappa","w")
kappaFile.write("\n")
for i,p in enumerate(pRange):
kappaFile.write("")
sList = ["\t" + str(kappa[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
kappaFile.write("".join(sList))
kappaFile.write("")
kappaFile.close()
CpMCv_lFile = open("CpMCv_l","w")
CpMCv_lFile.write("\n")
for i,p in enumerate(pRange):
CpMCv_lFile.write("")
sList = ["\t" + str(CpMCv_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
CpMCv_lFile.write("".join(sList))
CpMCv_lFile.write("")
CpMCv_lFile.close()
CpMCv_vFile = open("CpMCv_v","w")
CpMCv_vFile.write("\n")
for i,p in enumerate(pRange):
CpMCv_vFile.write("")
sList = ["\t" + str(CpMCv_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
CpMCv_vFile.write("".join(sList))
CpMCv_vFile.write("")
CpMCv_vFile.close()
CpMCvFile = open("CpMCv","w")
CpMCvFile.write("\n")
for i,p in enumerate(pRange):
CpMCvFile.write("")
sList = ["\t" + str(CpMCv[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
CpMCvFile.write("".join(sList))
CpMCvFile.write("")
CpMCvFile.close()
H_lFile = open("H_l","w")
H_lFile.write("\n")
for i,p in enumerate(pRange):
H_lFile.write("")
sList = ["\t" + str(H_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
H_lFile.write("".join(sList))
H_lFile.write("")
H_lFile.close()
H_vFile = open("H_v","w")
H_vFile.write("\n")
for i,p in enumerate(pRange):
H_vFile.write("")
sList = ["\t" + str(H_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
H_vFile.write("".join(sList))
H_vFile.write("")
H_vFile.close()
HFile = open("H","w")
HFile.write("\n")
for i,p in enumerate(pRange):
HFile.write("")
sList = ["\t" + str(H[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
HFile.write("".join(sList))
HFile.write("")
HFile.close()
E_lFile = open("E_l","w")
E_lFile.write("\n")
for i,p in enumerate(pRange):
E_lFile.write("")
sList = ["\t" + str(E_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
E_lFile.write("".join(sList))
E_lFile.write("")
E_lFile.close()
E_vFile = open("E_v","w")
E_vFile.write("\n")
for i,p in enumerate(pRange):
E_vFile.write("")
sList = ["\t" + str(E_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
E_vFile.write("".join(sList))
E_vFile.write("")
E_vFile.close()
EFile = open("E","w")
EFile.write("\n")
for i,p in enumerate(pRange):
EFile.write("")
sList = ["\t" + str(E[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
EFile.write("".join(sList))
EFile.write("")
EFile.close()
S_lFile = open("S_l","w")
S_lFile.write("\n")
for i,p in enumerate(pRange):
S_lFile.write("")
sList = ["\t" + str(S_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
S_lFile.write("".join(sList))
S_lFile.write("")
S_lFile.close()
S_vFile = open("S_v","w")
S_vFile.write("\n")
for i,p in enumerate(pRange):
S_vFile.write("")
sList = ["\t" + str(S_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
S_vFile.write("".join(sList))
S_vFile.write("")
S_vFile.close()
SFile = open("S","w")
SFile.write("\n")
for i,p in enumerate(pRange):
SFile.write("")
sList = ["\t" + str(S[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
SFile.write("".join(sList))
SFile.write("")
SFile.close()
c_lFile = open("c_l","w")
c_lFile.write("\n")
for i,p in enumerate(pRange):
c_lFile.write("")
sList = ["\t" + str(c_l[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
c_lFile.write("".join(sList))
c_lFile.write("")
c_lFile.close()
c_vFile = open("c_v","w")
c_vFile.write("\n")
for i,p in enumerate(pRange):
c_vFile.write("")
sList = ["\t" + str(c_v[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
c_vFile.write("".join(sList))
c_vFile.write("")
c_vFile.close()
cFile = open("c","w")
cFile.write("\n")
for i,p in enumerate(pRange):
cFile.write("")
sList = ["\t" + str(c[i][j]) + " " + str(Ts[i][j]) + " " + str(p) + "\n" for j in range(len(Ts[i]))]
cFile.write("".join(sList))
cFile.write("")
cFile.close()
#Previous dT method to save computational time:
#dT = drho/CP.PropsSI('d(D)/d(P)|T','D',rhoCur,'T',T,fluid_thermo)*CP.PropsSI('d(P)/d(T)|D','D',rhoCur,'T',T,fluid_thermo)
#Previous dP method to save computational time:
#drho/CP.PropsSI('d(D)/d(P)|T','D',rhoPseudoCrit,'T',Tcrit,fluid_thermo)
|
normal
|
{
"blob_id": "7ac15f422ca2cd0d30e936b7dd17c96e1f3abff0",
"index": 8429,
"step-1": "\"\"\"\nCarl Bunge\nWashington State University\nJune 2018\n\nAdapted from @author: Luka Denies from TU Delft.\n\nChangelog:\n11/2017 - Integration of CoolProp\n06/2018 - Update to OpenFOAM-5.x (Mass-based thermodynamics (for example: cpMcv to CpMCv))\n03/2019 - Update to include parahydrogen properties from Refprop\n\n\"\"\"\n\nimport CoolProp.CoolProp as CP\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#Fluid for thermodynamic properties (rho, Cp, CpMcv, H, S, c, E, thermal conductivity)\nCP.set_reference_state('parahydrogen','NBP')\nfluid_thermo ='parahydrogen'\n\n#Fluid for transport model (viscosity)\nCP.set_reference_state('hydrogen','NBP')\nfluid_transport = 'hydrogen'\n\n#****************************************************************************************\n\n#Temperature limits (set within subcritical region for saturation tables)\nT0 = 15 #Temperature start (K)\nTMax = 32 #Temperature end (K)\n\n#Pressure limits\np0 = 0.1e5 #Pa\npMax = 5.5e5 #Pa\n\n#****************************************************************************************\n\nTcrit = CP.PropsSI(\"Tcrit\",fluid_thermo)\n\nTs = []\nps = []\npRange = []\n\nrho = []\nmu = []\nmu_l = []\nmu_v = []\nkappa = []\nkappa_l = []\nkappa_v = []\nCp = []\nCp_l = []\nCp_v = []\nH = []\nH_l = []\nH_v = []\nCpMCv = []\nCpMCv_l = []\nCpMCv_v = []\nE = []\nE_l = []\nE_v = []\nS = []\nS_l = []\nS_v = []\nc = []\nc_l = []\nc_v = []\npSat = []\n\ni = 0\nj = 0\n\np = p0\nT = T0\n\n#Build (p, T) tables\nwhile p<pMax:\n pRange.append(p)\n TRange = []\n T = T0\n rho.append([0])\n Cp.append([0])\n Cp_l.append([0])\n Cp_v.append([0])\n mu.append([0])\n mu_l.append([0])\n mu_v.append([0])\n kappa.append[0])\n kappa_l.append([0])\n kappa_v.append([0])\n CpMCv.append([0])\n CpMCv_l.append([0])\n CpMCv_v.append([0])\n H.append([0])\n H_l.append([0])\n H_v.append([0])\n E.append([0])\n E_l.append([0])\n E_v.append([0])\n S.append([0])\n S_l.append([0])\n S_v.append([0])\n c.append([0])\n c_l.append([0])\n c_v.append([0])\n pSat.append([0])\n rho[i][0] = rhoCur = CP.PropsSI('D','T',T,'P',p,fluid_thermo)\n CpCur = CP.PropsSI('C','D',rhoCur,'T',T,fluid_thermo) \n Cp[i][0] = CpCur\n Cp_l[i][0] = CP.PropsSI('C','T',T,'Q',0,fluid_thermo) \n Cp_v[i][0] = CP.PropsSI('C','T',T,'Q',1,fluid_thermo)\n mu_l[i][0] = CP.PropsSI('V','T',T,'Q',0,fluid_transport) \n mu_v[i][0] = CP.PropsSI('V','T',T,'Q',1,fluid_transport)\n mu[i][0] = CP.PropsSI('V','D',rhoCur,'T',T,fluid_transport)\n kappa_l[i][0] = CP.PropsSI('L','T',T,'Q',0,'REFPROP::parahydrogen') \n kappa_v[i][0] = CP.PropsSI('L','T',T,'Q',1,'REFPROP::parahydrogen')\n kappa[i][0] = CP.PropsSI('L','D',rhoCur,'T',T,'REFPROP::parahydrogen') \n CpMCv_l[i][0] = CP.PropsSI('O','T',T,'Q',0,fluid_thermo) \n CpMCv_v[i][0] = CP.PropsSI('O','T',T,'Q',1,fluid_thermo)\n CpMCv[i][0] = CpCur-CP.PropsSI('O','D',rhoCur,'T',T,fluid_thermo) \n H_l[i][0] = CP.PropsSI('H','T',T,'Q',0,fluid_thermo) \n H_v[i][0] = CP.PropsSI('H','T',T,'Q',1,fluid_thermo)\n H[i][0] = CP.PropsSI('H','D',rhoCur,'T',T,fluid_thermo)\n E_l[i][0] = CP.PropsSI('U','T',T,'Q',0,fluid_thermo) \n E_v[i][0] = CP.PropsSI('U','T',T,'Q',1,fluid_thermo)\n E[i][0] = CP.PropsSI('U','D',rhoCur,'T',T,fluid_thermo) \n S_l[i][0] = CP.PropsSI('S','T',T,'Q',0,fluid_thermo) \n S_v[i][0] = CP.PropsSI('S','T',T,'Q',1,fluid_thermo)\n S[i][0] = CP.PropsSI('S','D',rhoCur,'T',T,fluid_thermo)\n c_l[i][0] = CP.PropsSI('A','T',T,'Q',0,fluid_thermo) \n c_v[i][0] = CP.PropsSI('A','T',T,'Q',1,fluid_thermo) \n c[i][0] = CP.PropsSI('A','D',rhoCur,'T',T,fluid_thermo)\n pSat[i][0] = CP.PropsSI('P','T',T,'Q',0,fluid_thermo)\n TRange.append(T)\n while T<TMax:\n j += 1\n dT = 1 # Tstep [K] **************************************************************\n T += dT\n rhoCur = CP.PropsSI('D','T',T,'P',p,fluid_thermo)\n rho[i].append(rhoCur)\n CpCur = CP.PropsSI('C','D',rhoCur,'T',T,fluid_thermo) \n CpCur_l = CP.PropsSI('C','T',T,'Q',0,fluid_thermo)\n CpCur_v = CP.PropsSI('C','T',T,'Q',1,fluid_thermo)) \n Cp_l[i].append(CP.PropsSI('C','T',T,'Q',0,fluid_thermo))\n Cp_v[i].append(CP.PropsSI('C','T',T,'Q',1,fluid_thermo))\n Cp[i].append(CpCur)\n mu_l[i].append(CP.PropsSI('V','T',T,'Q',0,fluid_transport))\n mu_v[i].append(CP.PropsSI('V','T',T,'Q',1,fluid_transport))\n mu[i].append(CP.PropsSI('V','D',rhoCur,'T',T,fluid_transport))\n kappa_l[i].append(CP.PropsSI('L','T',T,'Q',0,'REFPROP::parahydrogen'))\n kappa_v[i].append(CP.PropsSI('L','T',T,'Q',1,'REFPROP::parahydrogen'))\n kappa[i].append(CP.PropsSI('L','D',rhoCur,'T',T,'REFPROP::parahydrogen'))\n CpMCv_l[i].append((CP.PropsSI('C','T',T,'Q',0,fluid_thermo))-(CP.PropsSI('O','T',T,'Q',0,fluid_thermo)))\n CpMCv_v[i].append((CP.PropsSI('C','T',T,'Q',1,fluid_thermo))-(CP.PropsSI('O','T',T,'Q',1,fluid_thermo)))\n CpMCv[i].append((CpCur-CP.PropsSI('O','D',rhoCur,'T',T,fluid_thermo)))\n H_l[i].append(CP.PropsSI('H','T',T,'Q',0,fluid_thermo))\n H_v[i].append(CP.PropsSI('H','T',T,'Q',1,fluid_thermo))\n H[i].append(CP.PropsSI('H','D',rhoCur,'T',T,fluid_thermo))\n E_l[i].append(CP.PropsSI('U','T',T,'Q',0,fluid_thermo))\n E_v[i].append(CP.PropsSI('U','T',T,'Q',1,fluid_thermo))\n E[i].append(CP.PropsSI('U','D',rhoCur,'T',T,fluid_thermo))\n S_l[i].append(CP.PropsSI('S','T',T,'Q',0,fluid_thermo))\n S_v[i].append(CP.PropsSI('S','T',T,'Q',1,fluid_thermo))\n S[i].append(CP.PropsSI('S','D',rhoCur,'T',T,fluid_thermo))\n c_l[i].append(CP.PropsSI('A','T',T,'Q',0,fluid_thermo))\n c_v[i].append(CP.PropsSI('A','T',T,'Q',1,fluid_thermo))\n c[i].append(CP.PropsSI('A','D',rhoCur,'T',T,fluid_thermo))\n pSat[i].append(CP.PropsSI('P','T',T,'Q',0,fluid_thermo))\n TRange.append(T)\n i += 1\n ps.append([p]*len(TRange)) \n rhoPseudoCrit = CP.PropsSI('D','T',Tcrit,'P',p,fluid_thermo)\n dp = 0.5e5 # Pstep [Pa] ****************************************************************\n p += dp\n print p\n Ts.append(TRange)\nprint \"Calculations done, now writing\"\n\npSatFile = open(\"pSat\",\"w\")\n\nfor i,p in enumerate(pRange):\n sList = [\"\\t\" + str(pSat[i][j]) + \" \" + str(Ts[i][j]) + \"\\n\" for j in range(len(Ts[i]))]\n pSatFile.write(\"\".join(sList)) \npSatFile.write(\"\")\npSatFile.close()\n\nmu_lFile = open(\"mu_l\",\"w\")\n\nfor i,p in enumerate(pRange):\n sList = [\"\\t\" + str(mu_l[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n mu_lFile.write(\"\".join(sList)) \nmu_lFile.write(\"\")\nmu_lFile.close()\n\nmu_vFile = open(\"mu_v\",\"w\")\n\nfor i,p in enumerate(pRange):\n sList = [\"\\t\" + str(mu_v[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n mu_vFile.write(\"\".join(sList)) \nmu_vFile.write(\"\")\nmu_vFile.close()\n\nmuFile = open(\"mu\",\"w\")\n\nfor i,p in enumerate(pRange):\n sList = [\"\\t\" + str(mu[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n muFile.write(\"\".join(sList)) \nmuFile.write(\"\")\nmuFile.close()\n\nrhoFile = open(\"rho\",\"w\")\nrhoFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n rhoFile.write(\"\")\n sList = [\"\\t\" + str(rho[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n rhoFile.write(\"\".join(sList))\nrhoFile.write(\"\")\nrhoFile.close()\n\nCp_lFile = open(\"Cp_l\",\"w\")\nCpFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n Cp_lFile.write(\"\")\n sList = [\"\\t\" + str(Cp_l[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n Cp_lFile.write(\"\".join(sList))\nCp_lFile.write(\"\")\nCp_lFile.close()\n\nCp_vFile = open(\"Cp_v\",\"w\")\nCp_vFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n Cp_vFile.write(\"\")\n sList = [\"\\t\" + str(Cp_v[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n Cp_vFile.write(\"\".join(sList))\nCp_vFile.write(\"\")\nCp_vFile.close()\n\nCpFile = open(\"Cp\",\"w\")\nCpFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n CpFile.write(\"\")\n sList = [\"\\t\" + str(Cp[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n CpFile.write(\"\".join(sList))\nCpFile.write(\"\")\nCpFile.close()\n\nkappa_lFile = open(\"kappa_l\",\"w\")\nkappa_lFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n kappa_lFile.write(\"\")\n sList = [\"\\t\" + str(kappa_l[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n kappa_lFile.write(\"\".join(sList))\nkappa_lFile.write(\"\")\nkappa_lFile.close()\n\nkappa_vFile = open(\"kappa_v\",\"w\")\nkappa_vFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n kappa_vFile.write(\"\")\n sList = [\"\\t\" + str(kappa_v[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n kappa_vFile.write(\"\".join(sList))\nkappa_vFile.write(\"\")\nkappa_vFile.close()\n\nkappaFile = open(\"kappa\",\"w\")\nkappaFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n kappaFile.write(\"\")\n sList = [\"\\t\" + str(kappa[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n kappaFile.write(\"\".join(sList))\nkappaFile.write(\"\")\nkappaFile.close()\n\nCpMCv_lFile = open(\"CpMCv_l\",\"w\")\nCpMCv_lFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n CpMCv_lFile.write(\"\")\n sList = [\"\\t\" + str(CpMCv_l[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n CpMCv_lFile.write(\"\".join(sList))\nCpMCv_lFile.write(\"\")\nCpMCv_lFile.close()\n\nCpMCv_vFile = open(\"CpMCv_v\",\"w\")\nCpMCv_vFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n CpMCv_vFile.write(\"\")\n sList = [\"\\t\" + str(CpMCv_v[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n CpMCv_vFile.write(\"\".join(sList))\nCpMCv_vFile.write(\"\")\nCpMCv_vFile.close()\n\nCpMCvFile = open(\"CpMCv\",\"w\")\nCpMCvFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n CpMCvFile.write(\"\")\n sList = [\"\\t\" + str(CpMCv[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n CpMCvFile.write(\"\".join(sList))\nCpMCvFile.write(\"\")\nCpMCvFile.close()\n\nH_lFile = open(\"H_l\",\"w\")\nH_lFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n H_lFile.write(\"\")\n sList = [\"\\t\" + str(H_l[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n H_lFile.write(\"\".join(sList))\nH_lFile.write(\"\")\nH_lFile.close()\n\nH_vFile = open(\"H_v\",\"w\")\nH_vFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n H_vFile.write(\"\")\n sList = [\"\\t\" + str(H_v[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n H_vFile.write(\"\".join(sList))\nH_vFile.write(\"\")\nH_vFile.close()\n\nHFile = open(\"H\",\"w\")\nHFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n HFile.write(\"\")\n sList = [\"\\t\" + str(H[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n HFile.write(\"\".join(sList))\nHFile.write(\"\")\nHFile.close()\n\nE_lFile = open(\"E_l\",\"w\")\nE_lFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n E_lFile.write(\"\")\n sList = [\"\\t\" + str(E_l[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n E_lFile.write(\"\".join(sList))\nE_lFile.write(\"\")\nE_lFile.close()\n\nE_vFile = open(\"E_v\",\"w\")\nE_vFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n E_vFile.write(\"\")\n sList = [\"\\t\" + str(E_v[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n E_vFile.write(\"\".join(sList))\nE_vFile.write(\"\")\nE_vFile.close()\n\nEFile = open(\"E\",\"w\")\nEFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n EFile.write(\"\")\n sList = [\"\\t\" + str(E[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n EFile.write(\"\".join(sList))\nEFile.write(\"\")\nEFile.close()\n\nS_lFile = open(\"S_l\",\"w\")\nS_lFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n S_lFile.write(\"\")\n sList = [\"\\t\" + str(S_l[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n S_lFile.write(\"\".join(sList))\nS_lFile.write(\"\")\nS_lFile.close()\n\nS_vFile = open(\"S_v\",\"w\")\nS_vFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n S_vFile.write(\"\")\n sList = [\"\\t\" + str(S_v[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n S_vFile.write(\"\".join(sList))\nS_vFile.write(\"\")\nS_vFile.close()\n\nSFile = open(\"S\",\"w\")\nSFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n SFile.write(\"\")\n sList = [\"\\t\" + str(S[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n SFile.write(\"\".join(sList))\nSFile.write(\"\")\nSFile.close()\n\nc_lFile = open(\"c_l\",\"w\")\nc_lFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n c_lFile.write(\"\")\n sList = [\"\\t\" + str(c_l[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n c_lFile.write(\"\".join(sList))\nc_lFile.write(\"\")\nc_lFile.close()\n\nc_vFile = open(\"c_v\",\"w\")\nc_vFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n c_vFile.write(\"\")\n sList = [\"\\t\" + str(c_v[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n c_vFile.write(\"\".join(sList))\nc_vFile.write(\"\")\nc_vFile.close()\n\ncFile = open(\"c\",\"w\")\ncFile.write(\"\\n\")\n\nfor i,p in enumerate(pRange):\n cFile.write(\"\")\n sList = [\"\\t\" + str(c[i][j]) + \" \" + str(Ts[i][j]) + \" \" + str(p) + \"\\n\" for j in range(len(Ts[i]))]\n cFile.write(\"\".join(sList))\ncFile.write(\"\")\ncFile.close()\n\n#Previous dT method to save computational time:\n#dT = drho/CP.PropsSI('d(D)/d(P)|T','D',rhoCur,'T',T,fluid_thermo)*CP.PropsSI('d(P)/d(T)|D','D',rhoCur,'T',T,fluid_thermo)\n\n#Previous dP method to save computational time:\n#drho/CP.PropsSI('d(D)/d(P)|T','D',rhoPseudoCrit,'T',Tcrit,fluid_thermo)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 6 12:20:45 2017
@author: 7
"""
from os import listdir
from PIL import Image as PImage
from scipy import misc
import numpy as np
from Image_loader import LoadImages
"""
def LoadImages(path):
# return array of images
imagesList = listdir(path)
loadedImages = []
for image in imagesList:
img = misc.imread(path + image)
loadedImages.append(img)
return loadedImages
"""
def ModifyImages(path,path1):
# modify images to same scale
imagesList = listdir(path)
for image in imagesList:
old_img = PImage.open(path + image)
old_size = old_img.size
new_size = (540,420)
new_img = PImage.new("L", new_size)
new_img.paste(old_img,((new_size[0]-old_size[0])//2,(new_size[1]-old_size[1])//2))
new_img.save(path1 + image)
"""
path = "train\\"
path1 = "train_modified\\"
ModifyImages(path,path1)
imgs = LoadImages(path1)
a = np.array( imgs )
print (a.shape)
print("finished")
path = "test\\"
path1 = "test_modified\\"
ModifyImages(path,path1)
imgs = LoadImages(path1)
a = np.array( imgs )
print (a.shape)
print("finished")
path = "train_cleaned\\"
path1 = "train_cleaned_modified\\"
ModifyImages(path,path1)
imgs = LoadImages(path1)
a = np.array( imgs )
print (a.shape)
print("finished")
"""
|
normal
|
{
"blob_id": "9cad36de6231f310ef9022f16f6ed0da83a003b3",
"index": 9757,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef ModifyImages(path, path1):\n imagesList = listdir(path)\n for image in imagesList:\n old_img = PImage.open(path + image)\n old_size = old_img.size\n new_size = 540, 420\n new_img = PImage.new('L', new_size)\n new_img.paste(old_img, ((new_size[0] - old_size[0]) // 2, (new_size\n [1] - old_size[1]) // 2))\n new_img.save(path1 + image)\n\n\n<mask token>\n",
"step-3": "<mask token>\nfrom os import listdir\nfrom PIL import Image as PImage\nfrom scipy import misc\nimport numpy as np\nfrom Image_loader import LoadImages\n<mask token>\n\n\ndef ModifyImages(path, path1):\n imagesList = listdir(path)\n for image in imagesList:\n old_img = PImage.open(path + image)\n old_size = old_img.size\n new_size = 540, 420\n new_img = PImage.new('L', new_size)\n new_img.paste(old_img, ((new_size[0] - old_size[0]) // 2, (new_size\n [1] - old_size[1]) // 2))\n new_img.save(path1 + image)\n\n\n<mask token>\n",
"step-4": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 6 12:20:45 2017\r\n\r\n@author: 7\r\n\"\"\"\r\n\r\nfrom os import listdir\r\nfrom PIL import Image as PImage\r\nfrom scipy import misc\r\nimport numpy as np\r\nfrom Image_loader import LoadImages\r\n\"\"\"\r\ndef LoadImages(path):\r\n # return array of images\r\n imagesList = listdir(path)\r\n loadedImages = []\r\n for image in imagesList:\r\n img = misc.imread(path + image)\r\n loadedImages.append(img)\r\n return loadedImages\r\n\"\"\"\r\n\r\n\r\ndef ModifyImages(path,path1):\r\n # modify images to same scale\r\n\r\n imagesList = listdir(path)\r\n for image in imagesList:\r\n old_img = PImage.open(path + image)\r\n old_size = old_img.size\r\n new_size = (540,420)\r\n new_img = PImage.new(\"L\", new_size) \r\n new_img.paste(old_img,((new_size[0]-old_size[0])//2,(new_size[1]-old_size[1])//2))\r\n new_img.save(path1 + image)\r\n\r\n\"\"\"\r\npath = \"train\\\\\"\r\npath1 = \"train_modified\\\\\"\r\nModifyImages(path,path1)\r\nimgs = LoadImages(path1)\r\na = np.array( imgs )\r\nprint (a.shape)\r\nprint(\"finished\")\r\n\r\n\r\npath = \"test\\\\\"\r\npath1 = \"test_modified\\\\\"\r\n\r\nModifyImages(path,path1)\r\nimgs = LoadImages(path1)\r\na = np.array( imgs )\r\nprint (a.shape)\r\nprint(\"finished\")\r\n\r\npath = \"train_cleaned\\\\\"\r\npath1 = \"train_cleaned_modified\\\\\"\r\n\r\nModifyImages(path,path1)\r\nimgs = LoadImages(path1)\r\na = np.array( imgs )\r\nprint (a.shape)\r\nprint(\"finished\")\r\n\"\"\"",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Python shell for Diofant.
This is just a normal Python shell (IPython shell if you have the
IPython package installed), that adds default imports and run
some initialization code.
"""
import argparse
import ast
import atexit
import code
import os
import readline
import rlcompleter
from diofant.interactive.session import (AutomaticSymbols,
IntegerDivisionWrapper,
unicode_identifiers)
__all__ = ()
parser = argparse.ArgumentParser(description=__doc__,
prog='python -m diofant')
parser.add_argument('--no-wrap-division',
help="Don't wrap integer divisions with Fraction",
action='store_true')
parser.add_argument('-a', '--auto-symbols',
help="Automatically create missing Symbol's",
action='store_true')
parser.add_argument('--no-ipython', help="Don't use IPython",
action='store_true')
parser.add_argument('--unicode-identifiers',
help='Allow any unicode identifiers',
action='store_true')
def main():
args, ipython_args = parser.parse_known_args()
lines = ['from diofant import *',
'init_printing()',
"a, b, c, d, t, x, y, z = symbols('a:d t x:z')",
"k, m, n = symbols('k m n', integer=True)",
"f, g, h = symbols('f g h', cls=Function)",
'init_printing(pretty_print=True, use_unicode=True)']
try:
import IPython
import traitlets
except ImportError:
args.no_ipython = True
if not args.no_ipython:
config = traitlets.config.loader.Config()
shell = config.InteractiveShell
ast_transformers = shell.ast_transformers
if not args.no_wrap_division:
ast_transformers.append(IntegerDivisionWrapper())
shell.confirm_exit = False
config.TerminalIPythonApp.display_banner = False
config.TerminalInteractiveShell.autoformatter = None
app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)
app.initialize(ipython_args)
shell = app.shell
for l in lines:
shell.run_cell(l, silent=True)
if args.auto_symbols:
shell.run_cell('from diofant.interactive.session import AutomaticSymbols')
shell.run_cell('ip = get_ipython()')
shell.run_cell('ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')
shell.run_cell('del ip')
if args.unicode_identifiers:
shell.run_cell('from diofant.interactive.session import unicode_identifiers')
shell.run_cell('ip = get_ipython()')
shell.run_cell('ip.input_transformers_cleanup.append(unicode_identifiers)')
shell.run_cell('del ip')
app.start()
else:
ast_transformers = []
source_transformers = []
ns = {}
if not args.no_wrap_division:
ast_transformers.append(IntegerDivisionWrapper())
if args.auto_symbols:
ast_transformers.append(AutomaticSymbols(ns))
if args.unicode_identifiers:
source_transformers.append(unicode_identifiers)
class DiofantConsole(code.InteractiveConsole):
"""An interactive console with readline support."""
def __init__(self, ast_transformers=[],
source_transformers=[], **kwargs):
super().__init__(**kwargs)
readline.set_completer(rlcompleter.Completer(ns).complete)
readline.parse_and_bind('tab: complete')
history = os.path.expanduser('~/.python_history')
readline.read_history_file(history)
atexit.register(readline.write_history_file, history)
self.ast_transformers = ast_transformers
self.source_transformers = source_transformers
def runsource(self, source, filename='<input>', symbol='single'):
for t in self.source_transformers:
source = '\n'.join(t(source.splitlines()))
try:
tree = ast.parse(source)
except SyntaxError:
return True
for t in self.ast_transformers:
tree = t.visit(tree)
ast.fix_missing_locations(tree)
source = ast.unparse(tree)
source = source.split('\n')
source = ';'.join(source)
return super().runsource(source, filename=filename, symbol=symbol)
c = DiofantConsole(ast_transformers=ast_transformers,
source_transformers=source_transformers, locals=ns)
for l in lines:
c.push(l)
c.interact('', '')
if __name__ == '__main__': # pragma: no branch
main()
|
normal
|
{
"blob_id": "80e395715d3ae216beb17e7caed1d8d03c5c56de",
"index": 9943,
"step-1": "<mask token>\n\n\ndef main():\n args, ipython_args = parser.parse_known_args()\n lines = ['from diofant import *', 'init_printing()',\n \"a, b, c, d, t, x, y, z = symbols('a:d t x:z')\",\n \"k, m, n = symbols('k m n', integer=True)\",\n \"f, g, h = symbols('f g h', cls=Function)\",\n 'init_printing(pretty_print=True, use_unicode=True)']\n try:\n import IPython\n import traitlets\n except ImportError:\n args.no_ipython = True\n if not args.no_ipython:\n config = traitlets.config.loader.Config()\n shell = config.InteractiveShell\n ast_transformers = shell.ast_transformers\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n shell.confirm_exit = False\n config.TerminalIPythonApp.display_banner = False\n config.TerminalInteractiveShell.autoformatter = None\n app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)\n app.initialize(ipython_args)\n shell = app.shell\n for l in lines:\n shell.run_cell(l, silent=True)\n if args.auto_symbols:\n shell.run_cell(\n 'from diofant.interactive.session import AutomaticSymbols')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')\n shell.run_cell('del ip')\n if args.unicode_identifiers:\n shell.run_cell(\n 'from diofant.interactive.session import unicode_identifiers')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.input_transformers_cleanup.append(unicode_identifiers)')\n shell.run_cell('del ip')\n app.start()\n else:\n ast_transformers = []\n source_transformers = []\n ns = {}\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n if args.auto_symbols:\n ast_transformers.append(AutomaticSymbols(ns))\n if args.unicode_identifiers:\n source_transformers.append(unicode_identifiers)\n\n\n class DiofantConsole(code.InteractiveConsole):\n \"\"\"An interactive console with readline support.\"\"\"\n\n def __init__(self, ast_transformers=[], source_transformers=[],\n **kwargs):\n super().__init__(**kwargs)\n readline.set_completer(rlcompleter.Completer(ns).complete)\n readline.parse_and_bind('tab: complete')\n history = os.path.expanduser('~/.python_history')\n readline.read_history_file(history)\n atexit.register(readline.write_history_file, history)\n self.ast_transformers = ast_transformers\n self.source_transformers = source_transformers\n\n def runsource(self, source, filename='<input>', symbol='single'):\n for t in self.source_transformers:\n source = '\\n'.join(t(source.splitlines()))\n try:\n tree = ast.parse(source)\n except SyntaxError:\n return True\n for t in self.ast_transformers:\n tree = t.visit(tree)\n ast.fix_missing_locations(tree)\n source = ast.unparse(tree)\n source = source.split('\\n')\n source = ';'.join(source)\n return super().runsource(source, filename=filename, symbol=\n symbol)\n c = DiofantConsole(ast_transformers=ast_transformers,\n source_transformers=source_transformers, locals=ns)\n for l in lines:\n c.push(l)\n c.interact('', '')\n\n\n<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('--no-wrap-division', help=\n \"Don't wrap integer divisions with Fraction\", action='store_true')\nparser.add_argument('-a', '--auto-symbols', help=\n \"Automatically create missing Symbol's\", action='store_true')\nparser.add_argument('--no-ipython', help=\"Don't use IPython\", action=\n 'store_true')\nparser.add_argument('--unicode-identifiers', help=\n 'Allow any unicode identifiers', action='store_true')\n\n\ndef main():\n args, ipython_args = parser.parse_known_args()\n lines = ['from diofant import *', 'init_printing()',\n \"a, b, c, d, t, x, y, z = symbols('a:d t x:z')\",\n \"k, m, n = symbols('k m n', integer=True)\",\n \"f, g, h = symbols('f g h', cls=Function)\",\n 'init_printing(pretty_print=True, use_unicode=True)']\n try:\n import IPython\n import traitlets\n except ImportError:\n args.no_ipython = True\n if not args.no_ipython:\n config = traitlets.config.loader.Config()\n shell = config.InteractiveShell\n ast_transformers = shell.ast_transformers\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n shell.confirm_exit = False\n config.TerminalIPythonApp.display_banner = False\n config.TerminalInteractiveShell.autoformatter = None\n app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)\n app.initialize(ipython_args)\n shell = app.shell\n for l in lines:\n shell.run_cell(l, silent=True)\n if args.auto_symbols:\n shell.run_cell(\n 'from diofant.interactive.session import AutomaticSymbols')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')\n shell.run_cell('del ip')\n if args.unicode_identifiers:\n shell.run_cell(\n 'from diofant.interactive.session import unicode_identifiers')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.input_transformers_cleanup.append(unicode_identifiers)')\n shell.run_cell('del ip')\n app.start()\n else:\n ast_transformers = []\n source_transformers = []\n ns = {}\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n if args.auto_symbols:\n ast_transformers.append(AutomaticSymbols(ns))\n if args.unicode_identifiers:\n source_transformers.append(unicode_identifiers)\n\n\n class DiofantConsole(code.InteractiveConsole):\n \"\"\"An interactive console with readline support.\"\"\"\n\n def __init__(self, ast_transformers=[], source_transformers=[],\n **kwargs):\n super().__init__(**kwargs)\n readline.set_completer(rlcompleter.Completer(ns).complete)\n readline.parse_and_bind('tab: complete')\n history = os.path.expanduser('~/.python_history')\n readline.read_history_file(history)\n atexit.register(readline.write_history_file, history)\n self.ast_transformers = ast_transformers\n self.source_transformers = source_transformers\n\n def runsource(self, source, filename='<input>', symbol='single'):\n for t in self.source_transformers:\n source = '\\n'.join(t(source.splitlines()))\n try:\n tree = ast.parse(source)\n except SyntaxError:\n return True\n for t in self.ast_transformers:\n tree = t.visit(tree)\n ast.fix_missing_locations(tree)\n source = ast.unparse(tree)\n source = source.split('\\n')\n source = ';'.join(source)\n return super().runsource(source, filename=filename, symbol=\n symbol)\n c = DiofantConsole(ast_transformers=ast_transformers,\n source_transformers=source_transformers, locals=ns)\n for l in lines:\n c.push(l)\n c.interact('', '')\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\n__all__ = ()\nparser = argparse.ArgumentParser(description=__doc__, prog='python -m diofant')\nparser.add_argument('--no-wrap-division', help=\n \"Don't wrap integer divisions with Fraction\", action='store_true')\nparser.add_argument('-a', '--auto-symbols', help=\n \"Automatically create missing Symbol's\", action='store_true')\nparser.add_argument('--no-ipython', help=\"Don't use IPython\", action=\n 'store_true')\nparser.add_argument('--unicode-identifiers', help=\n 'Allow any unicode identifiers', action='store_true')\n\n\ndef main():\n args, ipython_args = parser.parse_known_args()\n lines = ['from diofant import *', 'init_printing()',\n \"a, b, c, d, t, x, y, z = symbols('a:d t x:z')\",\n \"k, m, n = symbols('k m n', integer=True)\",\n \"f, g, h = symbols('f g h', cls=Function)\",\n 'init_printing(pretty_print=True, use_unicode=True)']\n try:\n import IPython\n import traitlets\n except ImportError:\n args.no_ipython = True\n if not args.no_ipython:\n config = traitlets.config.loader.Config()\n shell = config.InteractiveShell\n ast_transformers = shell.ast_transformers\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n shell.confirm_exit = False\n config.TerminalIPythonApp.display_banner = False\n config.TerminalInteractiveShell.autoformatter = None\n app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)\n app.initialize(ipython_args)\n shell = app.shell\n for l in lines:\n shell.run_cell(l, silent=True)\n if args.auto_symbols:\n shell.run_cell(\n 'from diofant.interactive.session import AutomaticSymbols')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')\n shell.run_cell('del ip')\n if args.unicode_identifiers:\n shell.run_cell(\n 'from diofant.interactive.session import unicode_identifiers')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.input_transformers_cleanup.append(unicode_identifiers)')\n shell.run_cell('del ip')\n app.start()\n else:\n ast_transformers = []\n source_transformers = []\n ns = {}\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n if args.auto_symbols:\n ast_transformers.append(AutomaticSymbols(ns))\n if args.unicode_identifiers:\n source_transformers.append(unicode_identifiers)\n\n\n class DiofantConsole(code.InteractiveConsole):\n \"\"\"An interactive console with readline support.\"\"\"\n\n def __init__(self, ast_transformers=[], source_transformers=[],\n **kwargs):\n super().__init__(**kwargs)\n readline.set_completer(rlcompleter.Completer(ns).complete)\n readline.parse_and_bind('tab: complete')\n history = os.path.expanduser('~/.python_history')\n readline.read_history_file(history)\n atexit.register(readline.write_history_file, history)\n self.ast_transformers = ast_transformers\n self.source_transformers = source_transformers\n\n def runsource(self, source, filename='<input>', symbol='single'):\n for t in self.source_transformers:\n source = '\\n'.join(t(source.splitlines()))\n try:\n tree = ast.parse(source)\n except SyntaxError:\n return True\n for t in self.ast_transformers:\n tree = t.visit(tree)\n ast.fix_missing_locations(tree)\n source = ast.unparse(tree)\n source = source.split('\\n')\n source = ';'.join(source)\n return super().runsource(source, filename=filename, symbol=\n symbol)\n c = DiofantConsole(ast_transformers=ast_transformers,\n source_transformers=source_transformers, locals=ns)\n for l in lines:\n c.push(l)\n c.interact('', '')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport argparse\nimport ast\nimport atexit\nimport code\nimport os\nimport readline\nimport rlcompleter\nfrom diofant.interactive.session import AutomaticSymbols, IntegerDivisionWrapper, unicode_identifiers\n__all__ = ()\nparser = argparse.ArgumentParser(description=__doc__, prog='python -m diofant')\nparser.add_argument('--no-wrap-division', help=\n \"Don't wrap integer divisions with Fraction\", action='store_true')\nparser.add_argument('-a', '--auto-symbols', help=\n \"Automatically create missing Symbol's\", action='store_true')\nparser.add_argument('--no-ipython', help=\"Don't use IPython\", action=\n 'store_true')\nparser.add_argument('--unicode-identifiers', help=\n 'Allow any unicode identifiers', action='store_true')\n\n\ndef main():\n args, ipython_args = parser.parse_known_args()\n lines = ['from diofant import *', 'init_printing()',\n \"a, b, c, d, t, x, y, z = symbols('a:d t x:z')\",\n \"k, m, n = symbols('k m n', integer=True)\",\n \"f, g, h = symbols('f g h', cls=Function)\",\n 'init_printing(pretty_print=True, use_unicode=True)']\n try:\n import IPython\n import traitlets\n except ImportError:\n args.no_ipython = True\n if not args.no_ipython:\n config = traitlets.config.loader.Config()\n shell = config.InteractiveShell\n ast_transformers = shell.ast_transformers\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n shell.confirm_exit = False\n config.TerminalIPythonApp.display_banner = False\n config.TerminalInteractiveShell.autoformatter = None\n app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)\n app.initialize(ipython_args)\n shell = app.shell\n for l in lines:\n shell.run_cell(l, silent=True)\n if args.auto_symbols:\n shell.run_cell(\n 'from diofant.interactive.session import AutomaticSymbols')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')\n shell.run_cell('del ip')\n if args.unicode_identifiers:\n shell.run_cell(\n 'from diofant.interactive.session import unicode_identifiers')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell(\n 'ip.input_transformers_cleanup.append(unicode_identifiers)')\n shell.run_cell('del ip')\n app.start()\n else:\n ast_transformers = []\n source_transformers = []\n ns = {}\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n if args.auto_symbols:\n ast_transformers.append(AutomaticSymbols(ns))\n if args.unicode_identifiers:\n source_transformers.append(unicode_identifiers)\n\n\n class DiofantConsole(code.InteractiveConsole):\n \"\"\"An interactive console with readline support.\"\"\"\n\n def __init__(self, ast_transformers=[], source_transformers=[],\n **kwargs):\n super().__init__(**kwargs)\n readline.set_completer(rlcompleter.Completer(ns).complete)\n readline.parse_and_bind('tab: complete')\n history = os.path.expanduser('~/.python_history')\n readline.read_history_file(history)\n atexit.register(readline.write_history_file, history)\n self.ast_transformers = ast_transformers\n self.source_transformers = source_transformers\n\n def runsource(self, source, filename='<input>', symbol='single'):\n for t in self.source_transformers:\n source = '\\n'.join(t(source.splitlines()))\n try:\n tree = ast.parse(source)\n except SyntaxError:\n return True\n for t in self.ast_transformers:\n tree = t.visit(tree)\n ast.fix_missing_locations(tree)\n source = ast.unparse(tree)\n source = source.split('\\n')\n source = ';'.join(source)\n return super().runsource(source, filename=filename, symbol=\n symbol)\n c = DiofantConsole(ast_transformers=ast_transformers,\n source_transformers=source_transformers, locals=ns)\n for l in lines:\n c.push(l)\n c.interact('', '')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nPython shell for Diofant.\n\nThis is just a normal Python shell (IPython shell if you have the\nIPython package installed), that adds default imports and run\nsome initialization code.\n\"\"\"\n\nimport argparse\nimport ast\nimport atexit\nimport code\nimport os\nimport readline\nimport rlcompleter\n\nfrom diofant.interactive.session import (AutomaticSymbols,\n IntegerDivisionWrapper,\n unicode_identifiers)\n\n\n__all__ = ()\n\n\nparser = argparse.ArgumentParser(description=__doc__,\n prog='python -m diofant')\nparser.add_argument('--no-wrap-division',\n help=\"Don't wrap integer divisions with Fraction\",\n action='store_true')\nparser.add_argument('-a', '--auto-symbols',\n help=\"Automatically create missing Symbol's\",\n action='store_true')\nparser.add_argument('--no-ipython', help=\"Don't use IPython\",\n action='store_true')\nparser.add_argument('--unicode-identifiers',\n help='Allow any unicode identifiers',\n action='store_true')\n\n\ndef main():\n args, ipython_args = parser.parse_known_args()\n\n lines = ['from diofant import *',\n 'init_printing()',\n \"a, b, c, d, t, x, y, z = symbols('a:d t x:z')\",\n \"k, m, n = symbols('k m n', integer=True)\",\n \"f, g, h = symbols('f g h', cls=Function)\",\n 'init_printing(pretty_print=True, use_unicode=True)']\n\n try:\n import IPython\n import traitlets\n except ImportError:\n args.no_ipython = True\n\n if not args.no_ipython:\n config = traitlets.config.loader.Config()\n shell = config.InteractiveShell\n ast_transformers = shell.ast_transformers\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n shell.confirm_exit = False\n config.TerminalIPythonApp.display_banner = False\n config.TerminalInteractiveShell.autoformatter = None\n\n app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)\n app.initialize(ipython_args)\n shell = app.shell\n for l in lines:\n shell.run_cell(l, silent=True)\n if args.auto_symbols:\n shell.run_cell('from diofant.interactive.session import AutomaticSymbols')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell('ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')\n shell.run_cell('del ip')\n if args.unicode_identifiers:\n shell.run_cell('from diofant.interactive.session import unicode_identifiers')\n shell.run_cell('ip = get_ipython()')\n shell.run_cell('ip.input_transformers_cleanup.append(unicode_identifiers)')\n shell.run_cell('del ip')\n app.start()\n else:\n ast_transformers = []\n source_transformers = []\n ns = {}\n\n if not args.no_wrap_division:\n ast_transformers.append(IntegerDivisionWrapper())\n if args.auto_symbols:\n ast_transformers.append(AutomaticSymbols(ns))\n if args.unicode_identifiers:\n source_transformers.append(unicode_identifiers)\n\n class DiofantConsole(code.InteractiveConsole):\n \"\"\"An interactive console with readline support.\"\"\"\n\n def __init__(self, ast_transformers=[],\n source_transformers=[], **kwargs):\n super().__init__(**kwargs)\n\n readline.set_completer(rlcompleter.Completer(ns).complete)\n readline.parse_and_bind('tab: complete')\n\n history = os.path.expanduser('~/.python_history')\n readline.read_history_file(history)\n atexit.register(readline.write_history_file, history)\n self.ast_transformers = ast_transformers\n self.source_transformers = source_transformers\n\n def runsource(self, source, filename='<input>', symbol='single'):\n for t in self.source_transformers:\n source = '\\n'.join(t(source.splitlines()))\n\n try:\n tree = ast.parse(source)\n except SyntaxError:\n return True\n\n for t in self.ast_transformers:\n tree = t.visit(tree)\n ast.fix_missing_locations(tree)\n\n source = ast.unparse(tree)\n source = source.split('\\n')\n source = ';'.join(source)\n return super().runsource(source, filename=filename, symbol=symbol)\n\n c = DiofantConsole(ast_transformers=ast_transformers,\n source_transformers=source_transformers, locals=ns)\n\n for l in lines:\n c.push(l)\n c.interact('', '')\n\n\nif __name__ == '__main__': # pragma: no branch\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import numpy as np
from flask import Flask,request,render_template
import pickle
from werkzeug.serving import run_simple
app=Flask(__name__,template_folder='template')
model=pickle.load(open("model.pkl",'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
arr=[int(x) for x in request.form.values()]
arr2=[np.array(arr)]
output=model.predict(arr2)
# o2=round(output)
return render_template('index.html',prediction_text=output)
if __name__ == "__main__":
run_simple('localhost',8001,app,use_reloader=False)
|
normal
|
{
"blob_id": "02b760b16cdcd42f8d8d7222b439da87fb8076a3",
"index": 4959,
"step-1": "<mask token>\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n arr = [int(x) for x in request.form.values()]\n arr2 = [np.array(arr)]\n output = model.predict(arr2)\n return render_template('index.html', prediction_text=output)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n arr = [int(x) for x in request.form.values()]\n arr2 = [np.array(arr)]\n output = model.predict(arr2)\n return render_template('index.html', prediction_text=output)\n\n\nif __name__ == '__main__':\n run_simple('localhost', 8001, app, use_reloader=False)\n",
"step-3": "<mask token>\napp = Flask(__name__, template_folder='template')\nmodel = pickle.load(open('model.pkl', 'rb'))\n\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n arr = [int(x) for x in request.form.values()]\n arr2 = [np.array(arr)]\n output = model.predict(arr2)\n return render_template('index.html', prediction_text=output)\n\n\nif __name__ == '__main__':\n run_simple('localhost', 8001, app, use_reloader=False)\n",
"step-4": "import numpy as np\nfrom flask import Flask, request, render_template\nimport pickle\nfrom werkzeug.serving import run_simple\napp = Flask(__name__, template_folder='template')\nmodel = pickle.load(open('model.pkl', 'rb'))\n\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n arr = [int(x) for x in request.form.values()]\n arr2 = [np.array(arr)]\n output = model.predict(arr2)\n return render_template('index.html', prediction_text=output)\n\n\nif __name__ == '__main__':\n run_simple('localhost', 8001, app, use_reloader=False)\n",
"step-5": "import numpy as np\r\nfrom flask import Flask,request,render_template\r\nimport pickle\r\nfrom werkzeug.serving import run_simple\r\n\r\napp=Flask(__name__,template_folder='template')\r\nmodel=pickle.load(open(\"model.pkl\",'rb'))\r\n\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('index.html')\r\n\r\n@app.route('/predict',methods=['POST'])\r\ndef predict():\r\n arr=[int(x) for x in request.form.values()]\r\n arr2=[np.array(arr)]\r\n output=model.predict(arr2)\r\n # o2=round(output)\r\n return render_template('index.html',prediction_text=output)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run_simple('localhost',8001,app,use_reloader=False)",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import numpy as np
import os
import sys
file_path = sys.argv[1]
triplets = np.loadtxt(os.path.join(file_path, "kaggle_visible_evaluation_triplets.txt"),
delimiter="\t", dtype="str")
enum_users = np.ndenumerate(np.unique(triplets[:, 0]))
print(enum_users)
triplets[triplets[:, 0] == user_id[user_nr[0]], 0] = user_nr + 1
print(triplets)
|
normal
|
{
"blob_id": "f3d9e783491916e684cda659afa73ce5a6a5894a",
"index": 4063,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(enum_users)\n<mask token>\nprint(triplets)\n",
"step-3": "<mask token>\nfile_path = sys.argv[1]\ntriplets = np.loadtxt(os.path.join(file_path,\n 'kaggle_visible_evaluation_triplets.txt'), delimiter='\\t', dtype='str')\nenum_users = np.ndenumerate(np.unique(triplets[:, 0]))\nprint(enum_users)\ntriplets[triplets[:, 0] == user_id[user_nr[0]], 0] = user_nr + 1\nprint(triplets)\n",
"step-4": "import numpy as np\nimport os\nimport sys\nfile_path = sys.argv[1]\ntriplets = np.loadtxt(os.path.join(file_path,\n 'kaggle_visible_evaluation_triplets.txt'), delimiter='\\t', dtype='str')\nenum_users = np.ndenumerate(np.unique(triplets[:, 0]))\nprint(enum_users)\ntriplets[triplets[:, 0] == user_id[user_nr[0]], 0] = user_nr + 1\nprint(triplets)\n",
"step-5": "import numpy as np\n\nimport os\nimport sys\n\nfile_path = sys.argv[1]\n\ntriplets = np.loadtxt(os.path.join(file_path, \"kaggle_visible_evaluation_triplets.txt\"),\n delimiter=\"\\t\", dtype=\"str\")\n\nenum_users = np.ndenumerate(np.unique(triplets[:, 0]))\n\nprint(enum_users)\n\ntriplets[triplets[:, 0] == user_id[user_nr[0]], 0] = user_nr + 1\n\nprint(triplets)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def maxProduct(self, A):
size= len(A)
if size==1:
return A[0]
Max=[A[0]]
Min=[A[0]]
for i in range(1,size):
Max.append(max(max(Max[i-1]*A[i],Min[i-1]*A[i]),A[i]))
Min.append(min(min(Max[i-1]*A[i],Min[i-1]*A[i]),A[i]))
tmax=Max[0]
for i in range(0,size):
if Max[i]>tmax:
tmax=Max[i]
return tmax
|
normal
|
{
"blob_id": "1fafbc1e415b5089afcd2976d4f0dc2aa1c5a144",
"index": 1077,
"step-1": " def maxProduct(self, A):\n size= len(A)\n if size==1:\n return A[0]\n Max=[A[0]]\n Min=[A[0]]\n for i in range(1,size):\n Max.append(max(max(Max[i-1]*A[i],Min[i-1]*A[i]),A[i]))\n Min.append(min(min(Max[i-1]*A[i],Min[i-1]*A[i]),A[i]))\n tmax=Max[0]\n for i in range(0,size):\n if Max[i]>tmax:\n tmax=Max[i]\n return tmax\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Алексей Головлев, группа БСБО-07-19
def lucky(ticket):
def sum_(number):
number = str(number)
while len(number) != 6:
number = '0' + number
x = list(map(int, number))
return sum(x[:3]) == sum(x[3:])
return 'Счастливый' if sum_(ticket) == sum_(lastTicket) else 'Несчастливый'
lastTicket = 123456
print(lucky(100001))
lastTicket = 123321
print(lucky(100001))
|
normal
|
{
"blob_id": "85ac851e28dba3816f18fefb727001b8e396cc2b",
"index": 5278,
"step-1": "<mask token>\n",
"step-2": "def lucky(ticket):\n\n def sum_(number):\n number = str(number)\n while len(number) != 6:\n number = '0' + number\n x = list(map(int, number))\n return sum(x[:3]) == sum(x[3:])\n return 'Счастливый' if sum_(ticket) == sum_(lastTicket) else 'Несчастливый'\n\n\n<mask token>\n",
"step-3": "def lucky(ticket):\n\n def sum_(number):\n number = str(number)\n while len(number) != 6:\n number = '0' + number\n x = list(map(int, number))\n return sum(x[:3]) == sum(x[3:])\n return 'Счастливый' if sum_(ticket) == sum_(lastTicket) else 'Несчастливый'\n\n\n<mask token>\nprint(lucky(100001))\n<mask token>\nprint(lucky(100001))\n",
"step-4": "def lucky(ticket):\n\n def sum_(number):\n number = str(number)\n while len(number) != 6:\n number = '0' + number\n x = list(map(int, number))\n return sum(x[:3]) == sum(x[3:])\n return 'Счастливый' if sum_(ticket) == sum_(lastTicket) else 'Несчастливый'\n\n\nlastTicket = 123456\nprint(lucky(100001))\nlastTicket = 123321\nprint(lucky(100001))\n",
"step-5": "# Алексей Головлев, группа БСБО-07-19\n\ndef lucky(ticket):\n def sum_(number):\n number = str(number)\n while len(number) != 6:\n number = '0' + number\n x = list(map(int, number))\n return sum(x[:3]) == sum(x[3:])\n\n return 'Счастливый' if sum_(ticket) == sum_(lastTicket) else 'Несчастливый'\n\n\nlastTicket = 123456\nprint(lucky(100001))\n\nlastTicket = 123321\nprint(lucky(100001))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
def main():
# String to format output
format_string = "%s %s %s %s %s %s %s %s %s\n"
while True:
# Read 14 lines at a time from stdin for wikipedia dataset
edit = [sys.stdin.readline() for i in range(14)]
# Break if we've reached the end of stdin
if edit[13] == "":
break
# Parse data from revision line
revision = edit[0].split(' ')
article_id,rev_id,title,timestamp,username,user_id = 'a'+revision[1],'e'+revision[2],revision[3],revision[4],revision[5],'u'+revision[6].strip()
# Ignore anonymous edits
if user_id.startswith('uip'):
continue
# Parse article category
category_line = edit[1].split(' ')
if len(category_line) != 1:
category = category_line[1].strip()
else:
category = ""
# Parse whether edit is minor and number of words edited
minor = edit[11].split(' ')[1].strip()
word_count = edit[12].split(' ')[1].strip()
# Create output line and write to stdout
outline = format_string % (article_id,rev_id,user_id,username,title,timestamp,category,minor,word_count)
sys.stdout.write(outline)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "f6b2169a4644f4f39bbdebd9bb9c7cc637b54f8b",
"index": 9920,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n format_string = '%s %s %s %s %s %s %s %s %s\\n'\n while True:\n edit = [sys.stdin.readline() for i in range(14)]\n if edit[13] == '':\n break\n revision = edit[0].split(' ')\n article_id, rev_id, title, timestamp, username, user_id = ('a' +\n revision[1], 'e' + revision[2], revision[3], revision[4],\n revision[5], 'u' + revision[6].strip())\n if user_id.startswith('uip'):\n continue\n category_line = edit[1].split(' ')\n if len(category_line) != 1:\n category = category_line[1].strip()\n else:\n category = ''\n minor = edit[11].split(' ')[1].strip()\n word_count = edit[12].split(' ')[1].strip()\n outline = format_string % (article_id, rev_id, user_id, username,\n title, timestamp, category, minor, word_count)\n sys.stdout.write(outline)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n format_string = '%s %s %s %s %s %s %s %s %s\\n'\n while True:\n edit = [sys.stdin.readline() for i in range(14)]\n if edit[13] == '':\n break\n revision = edit[0].split(' ')\n article_id, rev_id, title, timestamp, username, user_id = ('a' +\n revision[1], 'e' + revision[2], revision[3], revision[4],\n revision[5], 'u' + revision[6].strip())\n if user_id.startswith('uip'):\n continue\n category_line = edit[1].split(' ')\n if len(category_line) != 1:\n category = category_line[1].strip()\n else:\n category = ''\n minor = edit[11].split(' ')[1].strip()\n word_count = edit[12].split(' ')[1].strip()\n outline = format_string % (article_id, rev_id, user_id, username,\n title, timestamp, category, minor, word_count)\n sys.stdout.write(outline)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\n\n\ndef main():\n format_string = '%s %s %s %s %s %s %s %s %s\\n'\n while True:\n edit = [sys.stdin.readline() for i in range(14)]\n if edit[13] == '':\n break\n revision = edit[0].split(' ')\n article_id, rev_id, title, timestamp, username, user_id = ('a' +\n revision[1], 'e' + revision[2], revision[3], revision[4],\n revision[5], 'u' + revision[6].strip())\n if user_id.startswith('uip'):\n continue\n category_line = edit[1].split(' ')\n if len(category_line) != 1:\n category = category_line[1].strip()\n else:\n category = ''\n minor = edit[11].split(' ')[1].strip()\n word_count = edit[12].split(' ')[1].strip()\n outline = format_string % (article_id, rev_id, user_id, username,\n title, timestamp, category, minor, word_count)\n sys.stdout.write(outline)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import sys\n\ndef main():\n\t# String to format output\n\tformat_string = \"%s %s %s %s %s %s %s %s %s\\n\"\n\twhile True:\n\t\t# Read 14 lines at a time from stdin for wikipedia dataset\n\t\tedit = [sys.stdin.readline() for i in range(14)]\n\t\t# Break if we've reached the end of stdin\n\t\tif edit[13] == \"\":\n\t\t\tbreak\n\t\t# Parse data from revision line\n\t\trevision = edit[0].split(' ')\n\t\tarticle_id,rev_id,title,timestamp,username,user_id = 'a'+revision[1],'e'+revision[2],revision[3],revision[4],revision[5],'u'+revision[6].strip()\n\t\t# Ignore anonymous edits\n\t\tif user_id.startswith('uip'):\n\t\t\tcontinue\n\t\t# Parse article category\n\t\tcategory_line = edit[1].split(' ')\n\t\tif len(category_line) != 1:\n\t\t\tcategory = category_line[1].strip()\n\t\telse:\n\t\t\tcategory = \"\"\n\t\t# Parse whether edit is minor and number of words edited\n\t\tminor = edit[11].split(' ')[1].strip()\n\t\tword_count = edit[12].split(' ')[1].strip()\n\t\t# Create output line and write to stdout\n\t\toutline = format_string % (article_id,rev_id,user_id,username,title,timestamp,category,minor,word_count)\n\t\tsys.stdout.write(outline)\n\nif __name__ == '__main__':\n\tmain()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db import models
class TamLicense(models.Model):
license = models.TextField("Inserisci qui il tuo codice licenza.")
|
normal
|
{
"blob_id": "1daecce86769e36a17fe2935f89b9266a0197cf0",
"index": 3942,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TamLicense(models.Model):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TamLicense(models.Model):\n license = models.TextField('Inserisci qui il tuo codice licenza.')\n",
"step-4": "from django.db import models\n\n\nclass TamLicense(models.Model):\n license = models.TextField('Inserisci qui il tuo codice licenza.')\n",
"step-5": "from django.db import models\n\n\nclass TamLicense(models.Model):\n license = models.TextField(\"Inserisci qui il tuo codice licenza.\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
button6 = Button(tk,text=" ",font=('Times 26 bold'), heigh = 4, width = 8, command=lambda:checker(button6))
button6.grid(row=2, column=2,sticky = S+N+E+W)
button7 = Button(tk,text=" ",font=('Times 26 bold'), heigh = 4, width = 8, command=lambda:checker(button7))
button7.grid(row=3, column=0,sticky = S+N+E+W)
button8 = Button(tk,text=" ",font=('Times 26 bold'), heigh = 4, width = 8, command=lambda:checker(button8))
button8.grid(row=3, column=1,sticky = S+N+E+W)
button9 = Button(tk,text=" ",font=('Times 26 bold'), heigh = 4, width = 8, command=lambda:checker(button9))
button9.grid(row=3, column=2,sticky = S+N+E+W)
tk.mainloop()
|
normal
|
{
"blob_id": "e543c7f7f1b249e53b8ebf82641ec398abf557af",
"index": 477,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nbutton6.grid(row=2, column=2, sticky=S + N + E + W)\n<mask token>\nbutton7.grid(row=3, column=0, sticky=S + N + E + W)\n<mask token>\nbutton8.grid(row=3, column=1, sticky=S + N + E + W)\n<mask token>\nbutton9.grid(row=3, column=2, sticky=S + N + E + W)\ntk.mainloop()\n",
"step-3": "button6 = Button(tk, text=' ', font='Times 26 bold', heigh=4, width=8,\n command=lambda : checker(button6))\nbutton6.grid(row=2, column=2, sticky=S + N + E + W)\nbutton7 = Button(tk, text=' ', font='Times 26 bold', heigh=4, width=8,\n command=lambda : checker(button7))\nbutton7.grid(row=3, column=0, sticky=S + N + E + W)\nbutton8 = Button(tk, text=' ', font='Times 26 bold', heigh=4, width=8,\n command=lambda : checker(button8))\nbutton8.grid(row=3, column=1, sticky=S + N + E + W)\nbutton9 = Button(tk, text=' ', font='Times 26 bold', heigh=4, width=8,\n command=lambda : checker(button9))\nbutton9.grid(row=3, column=2, sticky=S + N + E + W)\ntk.mainloop()\n",
"step-4": "button6 = Button(tk,text=\" \",font=('Times 26 bold'), heigh = 4, width = 8, command=lambda:checker(button6))\nbutton6.grid(row=2, column=2,sticky = S+N+E+W)\nbutton7 = Button(tk,text=\" \",font=('Times 26 bold'), heigh = 4, width = 8, command=lambda:checker(button7))\nbutton7.grid(row=3, column=0,sticky = S+N+E+W)\nbutton8 = Button(tk,text=\" \",font=('Times 26 bold'), heigh = 4, width = 8, command=lambda:checker(button8))\nbutton8.grid(row=3, column=1,sticky = S+N+E+W)\nbutton9 = Button(tk,text=\" \",font=('Times 26 bold'), heigh = 4, width = 8, command=lambda:checker(button9))\nbutton9.grid(row=3, column=2,sticky = S+N+E+W)\ntk.mainloop()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
G = 1000000000
M = 1000000
K = 1000
|
normal
|
{
"blob_id": "f765f54a89a98a5f61c70a37379860f170444c0a",
"index": 4069,
"step-1": "<mask token>\n",
"step-2": "G = 1000000000\nM = 1000000\nK = 1000\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
no=int(input("enter no:"))
rev=0
while no!=0:
r=no%10
no=no//10
rev=rev*10+r
print("reverse no is:",rev)
|
normal
|
{
"blob_id": "b2371f9c774c605a52ff1a4fae2dd44a856076aa",
"index": 5522,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile no != 0:\n r = no % 10\n no = no // 10\n rev = rev * 10 + r\nprint('reverse no is:', rev)\n",
"step-3": "no = int(input('enter no:'))\nrev = 0\nwhile no != 0:\n r = no % 10\n no = no // 10\n rev = rev * 10 + r\nprint('reverse no is:', rev)\n",
"step-4": "no=int(input(\"enter no:\"))\nrev=0\nwhile no!=0:\n r=no%10\n no=no//10\n rev=rev*10+r\nprint(\"reverse no is:\",rev)\n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def minutes to hours(minutes) :
hours = minutes/60
return hours
print(minutes to hours(70))
|
normal
|
{
"blob_id": "a1b33d0a8a074bc7a2a3e2085b1ff01267e00d3b",
"index": 8815,
"step-1": "def minutes to hours(minutes) :\r\n hours = minutes/60\r\n return hours\r\n\r\nprint(minutes to hours(70))\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import random
import datetime
import os
import time
import json
#
l_target_path = "E:/code/PYTHON_TRAINING/Training/Apr2020/BillingSystem/bills/"
while True:
l_store_id = random.randint(1, 4)
now = datetime.datetime.now()
l_bill_id = now.strftime("%Y%m%d%H%M%S")
# Generate Random Date
start_date = datetime.date(2000, 1, 1)
end_date = datetime.date(2020, 1, 1)
time_between_dates = end_date - start_date
days_between_dates = time_between_dates.days
random_number_of_days = random.randrange(days_between_dates)
l_date = start_date + datetime.timedelta(days=random_number_of_days)
l_bill_details = {}
for i in range(random.randint(1, 25)):
l_prod_id = random.randint(1,25)
l_qty = random.randint(1,20)
l_bill_details[l_prod_id] = l_qty
l_data = { "bill_id":l_bill_id
,"store_id":l_store_id
,"bill_date":l_date
,"bill_details":l_bill_details}
print(l_data) #json.dumps(l_data)
new_file = open(l_target_path + l_bill_id + ".json", "w")
new_file.write(str(l_data))
new_file.close()
time.sleep(3)
|
normal
|
{
"blob_id": "fad2ad89e4d0f04fad61e27048397a5702870ca9",
"index": 6177,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n l_store_id = random.randint(1, 4)\n now = datetime.datetime.now()\n l_bill_id = now.strftime('%Y%m%d%H%M%S')\n start_date = datetime.date(2000, 1, 1)\n end_date = datetime.date(2020, 1, 1)\n time_between_dates = end_date - start_date\n days_between_dates = time_between_dates.days\n random_number_of_days = random.randrange(days_between_dates)\n l_date = start_date + datetime.timedelta(days=random_number_of_days)\n l_bill_details = {}\n for i in range(random.randint(1, 25)):\n l_prod_id = random.randint(1, 25)\n l_qty = random.randint(1, 20)\n l_bill_details[l_prod_id] = l_qty\n l_data = {'bill_id': l_bill_id, 'store_id': l_store_id, 'bill_date':\n l_date, 'bill_details': l_bill_details}\n print(l_data)\n new_file = open(l_target_path + l_bill_id + '.json', 'w')\n new_file.write(str(l_data))\n new_file.close()\n time.sleep(3)\n",
"step-3": "<mask token>\nl_target_path = 'E:/code/PYTHON_TRAINING/Training/Apr2020/BillingSystem/bills/'\nwhile True:\n l_store_id = random.randint(1, 4)\n now = datetime.datetime.now()\n l_bill_id = now.strftime('%Y%m%d%H%M%S')\n start_date = datetime.date(2000, 1, 1)\n end_date = datetime.date(2020, 1, 1)\n time_between_dates = end_date - start_date\n days_between_dates = time_between_dates.days\n random_number_of_days = random.randrange(days_between_dates)\n l_date = start_date + datetime.timedelta(days=random_number_of_days)\n l_bill_details = {}\n for i in range(random.randint(1, 25)):\n l_prod_id = random.randint(1, 25)\n l_qty = random.randint(1, 20)\n l_bill_details[l_prod_id] = l_qty\n l_data = {'bill_id': l_bill_id, 'store_id': l_store_id, 'bill_date':\n l_date, 'bill_details': l_bill_details}\n print(l_data)\n new_file = open(l_target_path + l_bill_id + '.json', 'w')\n new_file.write(str(l_data))\n new_file.close()\n time.sleep(3)\n",
"step-4": "import random\nimport datetime\nimport os\nimport time\nimport json\nl_target_path = 'E:/code/PYTHON_TRAINING/Training/Apr2020/BillingSystem/bills/'\nwhile True:\n l_store_id = random.randint(1, 4)\n now = datetime.datetime.now()\n l_bill_id = now.strftime('%Y%m%d%H%M%S')\n start_date = datetime.date(2000, 1, 1)\n end_date = datetime.date(2020, 1, 1)\n time_between_dates = end_date - start_date\n days_between_dates = time_between_dates.days\n random_number_of_days = random.randrange(days_between_dates)\n l_date = start_date + datetime.timedelta(days=random_number_of_days)\n l_bill_details = {}\n for i in range(random.randint(1, 25)):\n l_prod_id = random.randint(1, 25)\n l_qty = random.randint(1, 20)\n l_bill_details[l_prod_id] = l_qty\n l_data = {'bill_id': l_bill_id, 'store_id': l_store_id, 'bill_date':\n l_date, 'bill_details': l_bill_details}\n print(l_data)\n new_file = open(l_target_path + l_bill_id + '.json', 'w')\n new_file.write(str(l_data))\n new_file.close()\n time.sleep(3)\n",
"step-5": "import random\nimport datetime\nimport os\nimport time\nimport json\n\n#\nl_target_path = \"E:/code/PYTHON_TRAINING/Training/Apr2020/BillingSystem/bills/\"\n\n\nwhile True:\n\n l_store_id = random.randint(1, 4)\n now = datetime.datetime.now()\n l_bill_id = now.strftime(\"%Y%m%d%H%M%S\")\n\n\n # Generate Random Date\n start_date = datetime.date(2000, 1, 1)\n end_date = datetime.date(2020, 1, 1)\n time_between_dates = end_date - start_date\n days_between_dates = time_between_dates.days\n random_number_of_days = random.randrange(days_between_dates)\n\n l_date = start_date + datetime.timedelta(days=random_number_of_days)\n\n l_bill_details = {}\n\n for i in range(random.randint(1, 25)):\n\n l_prod_id = random.randint(1,25)\n l_qty = random.randint(1,20)\n l_bill_details[l_prod_id] = l_qty\n\n l_data = { \"bill_id\":l_bill_id\n ,\"store_id\":l_store_id\n ,\"bill_date\":l_date\n ,\"bill_details\":l_bill_details}\n \n print(l_data) #json.dumps(l_data)\n\n new_file = open(l_target_path + l_bill_id + \".json\", \"w\")\n new_file.write(str(l_data))\n new_file.close()\n\n\n time.sleep(3)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.apps import AppConfig
class GerenciaLedsConfig(AppConfig):
name = 'gerencia_leds'
|
normal
|
{
"blob_id": "0754103c2d8cef0fd23b03a8f64ade8f049bce48",
"index": 4890,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass GerenciaLedsConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass GerenciaLedsConfig(AppConfig):\n name = 'gerencia_leds'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass GerenciaLedsConfig(AppConfig):\n name = 'gerencia_leds'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.db import models
from datetime import datetime
class Message(models.Model):
text = models.CharField(max_length=200)
votes = models.IntegerField()
date_added = models.DateTimeField(default=datetime.now)
score = models.BigIntegerField()
next_vote = models.IntegerField(default=3600) # 86400 seconds in a day
def __unicode__(self):
return self.text + ' : '+ str(self.votes) + ' : '+str(self.date_added) + ' : ' + str(self.score) + ' : '+str(self.next_vote) + '\n'
|
normal
|
{
"blob_id": "7159b447ed6fcb2005f63c7b7359970defbc9d43",
"index": 1496,
"step-1": "<mask token>\n\n\nclass Message(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Message(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return self.text + ' : ' + str(self.votes) + ' : ' + str(self.\n date_added) + ' : ' + str(self.score) + ' : ' + str(self.next_vote\n ) + '\\n'\n",
"step-3": "<mask token>\n\n\nclass Message(models.Model):\n text = models.CharField(max_length=200)\n votes = models.IntegerField()\n date_added = models.DateTimeField(default=datetime.now)\n score = models.BigIntegerField()\n next_vote = models.IntegerField(default=3600)\n\n def __unicode__(self):\n return self.text + ' : ' + str(self.votes) + ' : ' + str(self.\n date_added) + ' : ' + str(self.score) + ' : ' + str(self.next_vote\n ) + '\\n'\n",
"step-4": "from django.db import models\nfrom datetime import datetime\n\n\nclass Message(models.Model):\n text = models.CharField(max_length=200)\n votes = models.IntegerField()\n date_added = models.DateTimeField(default=datetime.now)\n score = models.BigIntegerField()\n next_vote = models.IntegerField(default=3600)\n\n def __unicode__(self):\n return self.text + ' : ' + str(self.votes) + ' : ' + str(self.\n date_added) + ' : ' + str(self.score) + ' : ' + str(self.next_vote\n ) + '\\n'\n",
"step-5": "from django.db import models\nfrom datetime import datetime\n\nclass Message(models.Model):\n text = models.CharField(max_length=200)\n votes = models.IntegerField()\n date_added = models.DateTimeField(default=datetime.now)\n score = models.BigIntegerField()\n next_vote = models.IntegerField(default=3600) # 86400 seconds in a day\n\n def __unicode__(self):\n return self.text + ' : '+ str(self.votes) + ' : '+str(self.date_added) + ' : ' + str(self.score) + ' : '+str(self.next_vote) + '\\n'\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
# Author:sen
# Date:2020/4/2 14:15
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def find(root, val):
if not root:
return None
if val < root.val:
return find(root.left, val)
elif val > root.val:
return find(root.right, val)
else:
return root
def find_min(root):
if root:
while root.left:
root = root.left
return root
def find_max(root):
if root:
while root.right:
root = root.right
return root
def insert(root, val):
if not root:
root = TreeNode(val)
elif val < root.val:
root.left = insert(root.left, val)
elif val > root.val:
root.right = insert(root.right, val)
else:
pass # val==root.val val已经在树中,什么都不做
return root
def delete(root, val):
if not root:
return None
elif val < root.val:
root.left = delete(root.left, val) # 返回左子树的根
elif val > root.val:
root.right = delete(root.right, val)
else: # 执行删除操作
if root.left and root.right: # 两个孩子节点的情况
tmp = find_min(root.right)
root.val = tmp.val
root.right = delete(root.right, tmp.val)
else: # 0个或1个
root = root.left if root.left else root.right
return root
def height(root):
if root is None:
return -1
else:
return 1 + max(height(root.left), height(root.right))
if __name__ == '__main__':
vals = [1, 2, 3, 4, 5, 6, 7, 8]
root = None
from DataStructure.tree import in_order
for v in vals:
root = insert(root, v)
tree_in_order = in_order(root)
assert vals == tree_in_order, "构建树出错"
# vals.append(9)
# root = insert(root, 9)
# tree_in_order = in_order(root)
# assert vals == tree_in_order, "插入出错"
#
# vals.remove(6)
# root = delete(root, 6)
# tree_in_order = in_order(root)
# assert vals == tree_in_order, "删除出错"
print(height(root))
|
normal
|
{
"blob_id": "9e525eccbf10a710d6f37c903370cc10f7d2c62b",
"index": 8475,
"step-1": "class TreeNode:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class TreeNode:\n\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\ndef find(root, val):\n if not root:\n return None\n if val < root.val:\n return find(root.left, val)\n elif val > root.val:\n return find(root.right, val)\n else:\n return root\n\n\ndef find_min(root):\n if root:\n while root.left:\n root = root.left\n return root\n\n\n<mask token>\n\n\ndef insert(root, val):\n if not root:\n root = TreeNode(val)\n elif val < root.val:\n root.left = insert(root.left, val)\n elif val > root.val:\n root.right = insert(root.right, val)\n else:\n pass\n return root\n\n\n<mask token>\n\n\ndef height(root):\n if root is None:\n return -1\n else:\n return 1 + max(height(root.left), height(root.right))\n\n\n<mask token>\n",
"step-3": "class TreeNode:\n\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\ndef find(root, val):\n if not root:\n return None\n if val < root.val:\n return find(root.left, val)\n elif val > root.val:\n return find(root.right, val)\n else:\n return root\n\n\ndef find_min(root):\n if root:\n while root.left:\n root = root.left\n return root\n\n\ndef find_max(root):\n if root:\n while root.right:\n root = root.right\n return root\n\n\ndef insert(root, val):\n if not root:\n root = TreeNode(val)\n elif val < root.val:\n root.left = insert(root.left, val)\n elif val > root.val:\n root.right = insert(root.right, val)\n else:\n pass\n return root\n\n\n<mask token>\n\n\ndef height(root):\n if root is None:\n return -1\n else:\n return 1 + max(height(root.left), height(root.right))\n\n\n<mask token>\n",
"step-4": "class TreeNode:\n\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\ndef find(root, val):\n if not root:\n return None\n if val < root.val:\n return find(root.left, val)\n elif val > root.val:\n return find(root.right, val)\n else:\n return root\n\n\ndef find_min(root):\n if root:\n while root.left:\n root = root.left\n return root\n\n\ndef find_max(root):\n if root:\n while root.right:\n root = root.right\n return root\n\n\ndef insert(root, val):\n if not root:\n root = TreeNode(val)\n elif val < root.val:\n root.left = insert(root.left, val)\n elif val > root.val:\n root.right = insert(root.right, val)\n else:\n pass\n return root\n\n\ndef delete(root, val):\n if not root:\n return None\n elif val < root.val:\n root.left = delete(root.left, val)\n elif val > root.val:\n root.right = delete(root.right, val)\n elif root.left and root.right:\n tmp = find_min(root.right)\n root.val = tmp.val\n root.right = delete(root.right, tmp.val)\n else:\n root = root.left if root.left else root.right\n return root\n\n\ndef height(root):\n if root is None:\n return -1\n else:\n return 1 + max(height(root.left), height(root.right))\n\n\nif __name__ == '__main__':\n vals = [1, 2, 3, 4, 5, 6, 7, 8]\n root = None\n from DataStructure.tree import in_order\n for v in vals:\n root = insert(root, v)\n tree_in_order = in_order(root)\n assert vals == tree_in_order, '构建树出错'\n print(height(root))\n",
"step-5": "# -*- coding: utf-8 -*-\n# Author:sen\n# Date:2020/4/2 14:15\n\nclass TreeNode:\n def __init__(self, val):\n self.val = val \n self.left = None\n self.right = None\n\ndef find(root, val):\n if not root:\n return None\n if val < root.val:\n return find(root.left, val)\n elif val > root.val:\n return find(root.right, val)\n else:\n return root\n \n\ndef find_min(root):\n if root:\n while root.left:\n root = root.left\n return root\n \n\ndef find_max(root):\n if root:\n while root.right:\n root = root.right\n return root\n\ndef insert(root, val):\n if not root:\n root = TreeNode(val)\n elif val < root.val:\n root.left = insert(root.left, val)\n elif val > root.val:\n root.right = insert(root.right, val)\n else:\n pass # val==root.val val已经在树中,什么都不做\n return root\n\n\ndef delete(root, val):\n if not root:\n return None\n elif val < root.val:\n root.left = delete(root.left, val) # 返回左子树的根\n elif val > root.val:\n root.right = delete(root.right, val)\n else: # 执行删除操作\n if root.left and root.right: # 两个孩子节点的情况\n tmp = find_min(root.right)\n root.val = tmp.val\n root.right = delete(root.right, tmp.val)\n else: # 0个或1个\n root = root.left if root.left else root.right\n return root\n\ndef height(root):\n if root is None:\n return -1\n else:\n return 1 + max(height(root.left), height(root.right))\n\nif __name__ == '__main__':\n vals = [1, 2, 3, 4, 5, 6, 7, 8]\n root = None\n from DataStructure.tree import in_order\n for v in vals:\n root = insert(root, v)\n tree_in_order = in_order(root)\n assert vals == tree_in_order, \"构建树出错\"\n # vals.append(9)\n # root = insert(root, 9)\n # tree_in_order = in_order(root)\n # assert vals == tree_in_order, \"插入出错\"\n # \n # vals.remove(6)\n # root = delete(root, 6)\n # tree_in_order = in_order(root)\n # assert vals == tree_in_order, \"删除出错\"\n \n print(height(root))\n ",
"step-ids": [
1,
6,
7,
9,
10
]
}
|
[
1,
6,
7,
9,
10
] |
'''
A linear regression learning algorithm example using TensorFlow library.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
from __future__ import print_function
import tensorflow as tf
import argparse
import numpy
rng = numpy.random
#"python tf_cnn_benchmarks.py --device=cpu --data_format=NHWC --num_warmup_batches=0 --model=lenet --batch_size=32 --num_intra_threads=19 --num_batches=3750"
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', help='batch_size', required=False, default=32)
parser.add_argument('--data_size', help='data_size', required=False, default=1700)
parser.add_argument('--num_intra_threads', help='num_intra_threads', required=False, default=19)
parser.add_argument('--num_batches', help='num_batches', required=False, default=5000000)
parser.add_argument('--device', help='device', required=False, default='gpu')
args = vars(parser.parse_args())
batch_size = int(args['batch_size'])
data_size = int(args['data_size'])
num_intra_threads =int(args['num_intra_threads'])
num_batches =int(args['num_batches'])
device =args['device']
# Parameters
learning_rate = 0.01
training_epochs = num_batches
display_step = 50
# Training Data
#train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167, 7.042,10.791,5.313,7.997,5.654,9.27,3.1])
#train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 2.827,3.465,1.65,2.904,2.42,2.94,1.3])
#n_samples = train_X.shape[0]
n_samples=data_size
train_X=rng.rand(1,n_samples)
train_Y=rng.rand(1,n_samples)
with tf.device('/'+device+':0'):
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
pred = tf.add(tf.multiply(X, W), b)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
# Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# gpu share
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
# Launch the graph
newConfig = tf.ConfigProto()
newConfig.intra_op_parallelism_threads = num_intra_threads
with tf.Session(config=newConfig) as sess:
# with tf.Session() as sess:
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
|
normal
|
{
"blob_id": "2e8d39d6d72672de8e4eac8295b90d68b1dff938",
"index": 9007,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('--batch_size', help='batch_size', required=False,\n default=32)\nparser.add_argument('--data_size', help='data_size', required=False,\n default=1700)\nparser.add_argument('--num_intra_threads', help='num_intra_threads',\n required=False, default=19)\nparser.add_argument('--num_batches', help='num_batches', required=False,\n default=5000000)\nparser.add_argument('--device', help='device', required=False, default='gpu')\n<mask token>\nwith tf.device('/' + device + ':0'):\n X = tf.placeholder('float')\n Y = tf.placeholder('float')\n W = tf.Variable(rng.randn(), name='weight')\n b = tf.Variable(rng.randn(), name='bias')\n pred = tf.add(tf.multiply(X, W), b)\n cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n init = tf.global_variables_initializer()\n<mask token>\nwith tf.Session(config=newConfig) as sess:\n sess.run(init)\n for epoch in range(training_epochs):\n for x, y in zip(train_X, train_Y):\n sess.run(optimizer, feed_dict={X: x, Y: y})\n",
"step-3": "<mask token>\nrng = numpy.random\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', help='batch_size', required=False,\n default=32)\nparser.add_argument('--data_size', help='data_size', required=False,\n default=1700)\nparser.add_argument('--num_intra_threads', help='num_intra_threads',\n required=False, default=19)\nparser.add_argument('--num_batches', help='num_batches', required=False,\n default=5000000)\nparser.add_argument('--device', help='device', required=False, default='gpu')\nargs = vars(parser.parse_args())\nbatch_size = int(args['batch_size'])\ndata_size = int(args['data_size'])\nnum_intra_threads = int(args['num_intra_threads'])\nnum_batches = int(args['num_batches'])\ndevice = args['device']\nlearning_rate = 0.01\ntraining_epochs = num_batches\ndisplay_step = 50\nn_samples = data_size\ntrain_X = rng.rand(1, n_samples)\ntrain_Y = rng.rand(1, n_samples)\nwith tf.device('/' + device + ':0'):\n X = tf.placeholder('float')\n Y = tf.placeholder('float')\n W = tf.Variable(rng.randn(), name='weight')\n b = tf.Variable(rng.randn(), name='bias')\n pred = tf.add(tf.multiply(X, W), b)\n cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n init = tf.global_variables_initializer()\nnewConfig = tf.ConfigProto()\nnewConfig.intra_op_parallelism_threads = num_intra_threads\nwith tf.Session(config=newConfig) as sess:\n sess.run(init)\n for epoch in range(training_epochs):\n for x, y in zip(train_X, train_Y):\n sess.run(optimizer, feed_dict={X: x, Y: y})\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nimport tensorflow as tf\nimport argparse\nimport numpy\nrng = numpy.random\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', help='batch_size', required=False,\n default=32)\nparser.add_argument('--data_size', help='data_size', required=False,\n default=1700)\nparser.add_argument('--num_intra_threads', help='num_intra_threads',\n required=False, default=19)\nparser.add_argument('--num_batches', help='num_batches', required=False,\n default=5000000)\nparser.add_argument('--device', help='device', required=False, default='gpu')\nargs = vars(parser.parse_args())\nbatch_size = int(args['batch_size'])\ndata_size = int(args['data_size'])\nnum_intra_threads = int(args['num_intra_threads'])\nnum_batches = int(args['num_batches'])\ndevice = args['device']\nlearning_rate = 0.01\ntraining_epochs = num_batches\ndisplay_step = 50\nn_samples = data_size\ntrain_X = rng.rand(1, n_samples)\ntrain_Y = rng.rand(1, n_samples)\nwith tf.device('/' + device + ':0'):\n X = tf.placeholder('float')\n Y = tf.placeholder('float')\n W = tf.Variable(rng.randn(), name='weight')\n b = tf.Variable(rng.randn(), name='bias')\n pred = tf.add(tf.multiply(X, W), b)\n cost = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n init = tf.global_variables_initializer()\nnewConfig = tf.ConfigProto()\nnewConfig.intra_op_parallelism_threads = num_intra_threads\nwith tf.Session(config=newConfig) as sess:\n sess.run(init)\n for epoch in range(training_epochs):\n for x, y in zip(train_X, train_Y):\n sess.run(optimizer, feed_dict={X: x, Y: y})\n",
"step-5": "'''\nA linear regression learning algorithm example using TensorFlow library.\n\nAuthor: Aymeric Damien\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n'''\n\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport argparse\n\nimport numpy\nrng = numpy.random\n\n#\"python tf_cnn_benchmarks.py --device=cpu --data_format=NHWC --num_warmup_batches=0 --model=lenet --batch_size=32 --num_intra_threads=19 --num_batches=3750\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', help='batch_size', required=False, default=32)\nparser.add_argument('--data_size', help='data_size', required=False, default=1700)\nparser.add_argument('--num_intra_threads', help='num_intra_threads', required=False, default=19)\nparser.add_argument('--num_batches', help='num_batches', required=False, default=5000000)\nparser.add_argument('--device', help='device', required=False, default='gpu')\n\nargs = vars(parser.parse_args())\n\nbatch_size = int(args['batch_size'])\ndata_size = int(args['data_size'])\nnum_intra_threads =int(args['num_intra_threads'])\nnum_batches =int(args['num_batches'])\ndevice =args['device']\n\n# Parameters\nlearning_rate = 0.01\ntraining_epochs = num_batches\ndisplay_step = 50\n\n# Training Data\n#train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167, 7.042,10.791,5.313,7.997,5.654,9.27,3.1]) \n#train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 2.827,3.465,1.65,2.904,2.42,2.94,1.3])\n#n_samples = train_X.shape[0]\n\nn_samples=data_size\ntrain_X=rng.rand(1,n_samples)\ntrain_Y=rng.rand(1,n_samples)\n\n\nwith tf.device('/'+device+':0'):\n # tf Graph Input\n X = tf.placeholder(\"float\")\n Y = tf.placeholder(\"float\")\n\n # Set model weights\n W = tf.Variable(rng.randn(), name=\"weight\")\n b = tf.Variable(rng.randn(), name=\"bias\")\n\n # Construct a linear model\n pred = tf.add(tf.multiply(X, W), b)\n\n # Mean squared error\n cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)\n # Gradient descent\n # Note, minimize() knows to modify W and b because Variable objects are trainable=True by default\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n\n # Initializing the variables\n init = tf.global_variables_initializer()\n\n # gpu share\n#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)\n\n# Launch the graph\nnewConfig = tf.ConfigProto()\nnewConfig.intra_op_parallelism_threads = num_intra_threads\nwith tf.Session(config=newConfig) as sess:\n# with tf.Session() as sess:\n sess.run(init)\n # Fit all training data\n for epoch in range(training_epochs):\n for (x, y) in zip(train_X, train_Y):\n sess.run(optimizer, feed_dict={X: x, Y: y})",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The GIFT module provides basic functions for interfacing with some of the GIFT tools.
In order to use the standalone MCR version of GIFT, you need to ensure that
the following commands are executed at the beginning of your script::
from nipype.interfaces import gift
matlab_cmd = '/path/to/run_groupica.sh /path/to/compiler_runtime/v901/ '
gift.GICACommand.set_mlab_paths(matlab_cmd=matlab_cmd,use_mcr=True)
"""
__docformat__ = 'restructuredtext'
# Standard library imports
import os
# Local imports
from ..base import (BaseInterface, traits, isdefined, InputMultiPath,
BaseInterfaceInputSpec, Directory, Undefined)
from ..matlab import MatlabCommand
class GIFTCommandInputSpec(BaseInterfaceInputSpec):
matlab_cmd = traits.Str(desc='matlab command to use')
paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath')
mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True)
use_mcr = traits.Bool(desc='Run m-code using GIFT MCR')
class GIFTCommandOutputSpec( BaseInterfaceInputSpec):
matlab_output = traits.Str( )
class GIFTCommand(BaseInterface):
"""Extends `BaseInterface` class to implement GIFT specific interfaces.
WARNING: Pseudo prototype class, meant to be subclassed
"""
input_spec = GIFTCommandInputSpec
output_spec = GIFTCommandOutputSpec
_matlab_cmd = None
_paths = None
_use_mcr = None
def __init__(self, **inputs):
super(GIFTCommand, self).__init__(**inputs)
self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd','mfile','paths','use_mcr'])
self._find_mlab_cmd_defaults()
self._check_mlab_inputs()
self._matlab_cmd_update()
@classmethod
def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):
cls._matlab_cmd = matlab_cmd
cls._paths = paths
cls._use_mcr = use_mcr
def _find_mlab_cmd_defaults(self):
# check if the user has set environment variables to enforce
# the standalone (MCR) version of GIFT
if self._use_mcr:
self._use_mcr = True
def _matlab_cmd_update(self):
# MatlabCommand has to be created here,
# because matlab_cmb is not a proper input
# and can be set only during init
matlab_cmd_str = self.inputs.matlab_cmd
if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:
if not matlab_cmd_str[-1] == " ":
matlab_cmd_str = matlab_cmd_str + " "
self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str,
mfile=self.inputs.mfile,
paths=self.inputs.paths)
self.mlab.inputs.script_file = 'pyscript_%s.m' % self.__class__.__name__.split('.')[-1].lower()
if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:
self.mlab.inputs.nodesktop = Undefined
self.mlab.inputs.nosplash = Undefined
self.mlab.inputs.single_comp_thread = Undefined
self.mlab.inputs.uses_mcr = True
self.mlab.inputs.mfile = True
def _check_mlab_inputs(self):
if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:
self.inputs.matlab_cmd = self._matlab_cmd
if not isdefined(self.inputs.paths) and self._paths:
self.inputs.paths = self._paths
if not isdefined(self.inputs.use_mcr) and self._use_mcr:
self.inputs.use_mcr = self._use_mcr
def _run_interface(self, runtime):
"""Executes the GIFT function using MATLAB."""
self.mlab.inputs.script = self._make_matlab_command()
results = self.mlab.run()
runtime.returncode = results.runtime.returncode
if self.mlab.inputs.uses_mcr:
if 'Skipped' in results.runtime.stdout:
self.raise_exception(runtime)
runtime.stdout = results.runtime.stdout
runtime.stderr = results.runtime.stderr
runtime.merged = results.runtime.merged
return runtime
def _list_outputs(self):
"""Determine the expected outputs based on inputs."""
outputs = self._outputs().get()
return outputs
def _make_matlab_command(self):
"""Generates a mfile to build job structure
Returns
-------
mscript : string
contents of a script called by matlab
"""
raise NotImplementedError
|
normal
|
{
"blob_id": "fef1cf75de8358807f29cd06d2338e087d6f2d23",
"index": 9162,
"step-1": "<mask token>\n\n\nclass GIFTCommand(BaseInterface):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, **inputs):\n super(GIFTCommand, self).__init__(**inputs)\n self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd',\n 'mfile', 'paths', 'use_mcr'])\n self._find_mlab_cmd_defaults()\n self._check_mlab_inputs()\n self._matlab_cmd_update()\n\n @classmethod\n def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):\n cls._matlab_cmd = matlab_cmd\n cls._paths = paths\n cls._use_mcr = use_mcr\n\n def _find_mlab_cmd_defaults(self):\n if self._use_mcr:\n self._use_mcr = True\n\n def _matlab_cmd_update(self):\n matlab_cmd_str = self.inputs.matlab_cmd\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n if not matlab_cmd_str[-1] == ' ':\n matlab_cmd_str = matlab_cmd_str + ' '\n self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str, mfile=self.\n inputs.mfile, paths=self.inputs.paths)\n self.mlab.inputs.script_file = ('pyscript_%s.m' % self.__class__.\n __name__.split('.')[-1].lower())\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n self.mlab.inputs.nodesktop = Undefined\n self.mlab.inputs.nosplash = Undefined\n self.mlab.inputs.single_comp_thread = Undefined\n self.mlab.inputs.uses_mcr = True\n self.mlab.inputs.mfile = True\n\n def _check_mlab_inputs(self):\n if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:\n self.inputs.matlab_cmd = self._matlab_cmd\n if not isdefined(self.inputs.paths) and self._paths:\n self.inputs.paths = self._paths\n if not isdefined(self.inputs.use_mcr) and self._use_mcr:\n self.inputs.use_mcr = self._use_mcr\n\n def _run_interface(self, runtime):\n \"\"\"Executes the GIFT function using MATLAB.\"\"\"\n self.mlab.inputs.script = self._make_matlab_command()\n results = self.mlab.run()\n runtime.returncode = results.runtime.returncode\n if self.mlab.inputs.uses_mcr:\n if 'Skipped' in results.runtime.stdout:\n self.raise_exception(runtime)\n runtime.stdout = results.runtime.stdout\n runtime.stderr = results.runtime.stderr\n runtime.merged = results.runtime.merged\n return runtime\n\n def _list_outputs(self):\n \"\"\"Determine the expected outputs based on inputs.\"\"\"\n outputs = self._outputs().get()\n return outputs\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GIFTCommand(BaseInterface):\n <mask token>\n input_spec = GIFTCommandInputSpec\n output_spec = GIFTCommandOutputSpec\n _matlab_cmd = None\n _paths = None\n _use_mcr = None\n\n def __init__(self, **inputs):\n super(GIFTCommand, self).__init__(**inputs)\n self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd',\n 'mfile', 'paths', 'use_mcr'])\n self._find_mlab_cmd_defaults()\n self._check_mlab_inputs()\n self._matlab_cmd_update()\n\n @classmethod\n def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):\n cls._matlab_cmd = matlab_cmd\n cls._paths = paths\n cls._use_mcr = use_mcr\n\n def _find_mlab_cmd_defaults(self):\n if self._use_mcr:\n self._use_mcr = True\n\n def _matlab_cmd_update(self):\n matlab_cmd_str = self.inputs.matlab_cmd\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n if not matlab_cmd_str[-1] == ' ':\n matlab_cmd_str = matlab_cmd_str + ' '\n self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str, mfile=self.\n inputs.mfile, paths=self.inputs.paths)\n self.mlab.inputs.script_file = ('pyscript_%s.m' % self.__class__.\n __name__.split('.')[-1].lower())\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n self.mlab.inputs.nodesktop = Undefined\n self.mlab.inputs.nosplash = Undefined\n self.mlab.inputs.single_comp_thread = Undefined\n self.mlab.inputs.uses_mcr = True\n self.mlab.inputs.mfile = True\n\n def _check_mlab_inputs(self):\n if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:\n self.inputs.matlab_cmd = self._matlab_cmd\n if not isdefined(self.inputs.paths) and self._paths:\n self.inputs.paths = self._paths\n if not isdefined(self.inputs.use_mcr) and self._use_mcr:\n self.inputs.use_mcr = self._use_mcr\n\n def _run_interface(self, runtime):\n \"\"\"Executes the GIFT function using MATLAB.\"\"\"\n self.mlab.inputs.script = self._make_matlab_command()\n results = self.mlab.run()\n runtime.returncode = results.runtime.returncode\n if self.mlab.inputs.uses_mcr:\n if 'Skipped' in results.runtime.stdout:\n self.raise_exception(runtime)\n runtime.stdout = results.runtime.stdout\n runtime.stderr = results.runtime.stderr\n runtime.merged = results.runtime.merged\n return runtime\n\n def _list_outputs(self):\n \"\"\"Determine the expected outputs based on inputs.\"\"\"\n outputs = self._outputs().get()\n return outputs\n\n def _make_matlab_command(self):\n \"\"\"Generates a mfile to build job structure\n \n Returns\n -------\n mscript : string\n contents of a script called by matlab\n\n \"\"\"\n raise NotImplementedError\n",
"step-3": "<mask token>\n\n\nclass GIFTCommandInputSpec(BaseInterfaceInputSpec):\n matlab_cmd = traits.Str(desc='matlab command to use')\n paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath')\n mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True)\n use_mcr = traits.Bool(desc='Run m-code using GIFT MCR')\n\n\nclass GIFTCommandOutputSpec(BaseInterfaceInputSpec):\n matlab_output = traits.Str()\n\n\nclass GIFTCommand(BaseInterface):\n \"\"\"Extends `BaseInterface` class to implement GIFT specific interfaces.\n\n WARNING: Pseudo prototype class, meant to be subclassed\n \"\"\"\n input_spec = GIFTCommandInputSpec\n output_spec = GIFTCommandOutputSpec\n _matlab_cmd = None\n _paths = None\n _use_mcr = None\n\n def __init__(self, **inputs):\n super(GIFTCommand, self).__init__(**inputs)\n self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd',\n 'mfile', 'paths', 'use_mcr'])\n self._find_mlab_cmd_defaults()\n self._check_mlab_inputs()\n self._matlab_cmd_update()\n\n @classmethod\n def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):\n cls._matlab_cmd = matlab_cmd\n cls._paths = paths\n cls._use_mcr = use_mcr\n\n def _find_mlab_cmd_defaults(self):\n if self._use_mcr:\n self._use_mcr = True\n\n def _matlab_cmd_update(self):\n matlab_cmd_str = self.inputs.matlab_cmd\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n if not matlab_cmd_str[-1] == ' ':\n matlab_cmd_str = matlab_cmd_str + ' '\n self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str, mfile=self.\n inputs.mfile, paths=self.inputs.paths)\n self.mlab.inputs.script_file = ('pyscript_%s.m' % self.__class__.\n __name__.split('.')[-1].lower())\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n self.mlab.inputs.nodesktop = Undefined\n self.mlab.inputs.nosplash = Undefined\n self.mlab.inputs.single_comp_thread = Undefined\n self.mlab.inputs.uses_mcr = True\n self.mlab.inputs.mfile = True\n\n def _check_mlab_inputs(self):\n if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:\n self.inputs.matlab_cmd = self._matlab_cmd\n if not isdefined(self.inputs.paths) and self._paths:\n self.inputs.paths = self._paths\n if not isdefined(self.inputs.use_mcr) and self._use_mcr:\n self.inputs.use_mcr = self._use_mcr\n\n def _run_interface(self, runtime):\n \"\"\"Executes the GIFT function using MATLAB.\"\"\"\n self.mlab.inputs.script = self._make_matlab_command()\n results = self.mlab.run()\n runtime.returncode = results.runtime.returncode\n if self.mlab.inputs.uses_mcr:\n if 'Skipped' in results.runtime.stdout:\n self.raise_exception(runtime)\n runtime.stdout = results.runtime.stdout\n runtime.stderr = results.runtime.stderr\n runtime.merged = results.runtime.merged\n return runtime\n\n def _list_outputs(self):\n \"\"\"Determine the expected outputs based on inputs.\"\"\"\n outputs = self._outputs().get()\n return outputs\n\n def _make_matlab_command(self):\n \"\"\"Generates a mfile to build job structure\n \n Returns\n -------\n mscript : string\n contents of a script called by matlab\n\n \"\"\"\n raise NotImplementedError\n",
"step-4": "<mask token>\n__docformat__ = 'restructuredtext'\n<mask token>\n\n\nclass GIFTCommandInputSpec(BaseInterfaceInputSpec):\n matlab_cmd = traits.Str(desc='matlab command to use')\n paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath')\n mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True)\n use_mcr = traits.Bool(desc='Run m-code using GIFT MCR')\n\n\nclass GIFTCommandOutputSpec(BaseInterfaceInputSpec):\n matlab_output = traits.Str()\n\n\nclass GIFTCommand(BaseInterface):\n \"\"\"Extends `BaseInterface` class to implement GIFT specific interfaces.\n\n WARNING: Pseudo prototype class, meant to be subclassed\n \"\"\"\n input_spec = GIFTCommandInputSpec\n output_spec = GIFTCommandOutputSpec\n _matlab_cmd = None\n _paths = None\n _use_mcr = None\n\n def __init__(self, **inputs):\n super(GIFTCommand, self).__init__(**inputs)\n self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd',\n 'mfile', 'paths', 'use_mcr'])\n self._find_mlab_cmd_defaults()\n self._check_mlab_inputs()\n self._matlab_cmd_update()\n\n @classmethod\n def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):\n cls._matlab_cmd = matlab_cmd\n cls._paths = paths\n cls._use_mcr = use_mcr\n\n def _find_mlab_cmd_defaults(self):\n if self._use_mcr:\n self._use_mcr = True\n\n def _matlab_cmd_update(self):\n matlab_cmd_str = self.inputs.matlab_cmd\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n if not matlab_cmd_str[-1] == ' ':\n matlab_cmd_str = matlab_cmd_str + ' '\n self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str, mfile=self.\n inputs.mfile, paths=self.inputs.paths)\n self.mlab.inputs.script_file = ('pyscript_%s.m' % self.__class__.\n __name__.split('.')[-1].lower())\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n self.mlab.inputs.nodesktop = Undefined\n self.mlab.inputs.nosplash = Undefined\n self.mlab.inputs.single_comp_thread = Undefined\n self.mlab.inputs.uses_mcr = True\n self.mlab.inputs.mfile = True\n\n def _check_mlab_inputs(self):\n if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:\n self.inputs.matlab_cmd = self._matlab_cmd\n if not isdefined(self.inputs.paths) and self._paths:\n self.inputs.paths = self._paths\n if not isdefined(self.inputs.use_mcr) and self._use_mcr:\n self.inputs.use_mcr = self._use_mcr\n\n def _run_interface(self, runtime):\n \"\"\"Executes the GIFT function using MATLAB.\"\"\"\n self.mlab.inputs.script = self._make_matlab_command()\n results = self.mlab.run()\n runtime.returncode = results.runtime.returncode\n if self.mlab.inputs.uses_mcr:\n if 'Skipped' in results.runtime.stdout:\n self.raise_exception(runtime)\n runtime.stdout = results.runtime.stdout\n runtime.stderr = results.runtime.stderr\n runtime.merged = results.runtime.merged\n return runtime\n\n def _list_outputs(self):\n \"\"\"Determine the expected outputs based on inputs.\"\"\"\n outputs = self._outputs().get()\n return outputs\n\n def _make_matlab_command(self):\n \"\"\"Generates a mfile to build job structure\n \n Returns\n -------\n mscript : string\n contents of a script called by matlab\n\n \"\"\"\n raise NotImplementedError\n",
"step-5": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"The GIFT module provides basic functions for interfacing with some of the GIFT tools.\n\nIn order to use the standalone MCR version of GIFT, you need to ensure that\nthe following commands are executed at the beginning of your script::\n\n from nipype.interfaces import gift \n matlab_cmd = '/path/to/run_groupica.sh /path/to/compiler_runtime/v901/ '\n gift.GICACommand.set_mlab_paths(matlab_cmd=matlab_cmd,use_mcr=True)\n\"\"\"\n\n__docformat__ = 'restructuredtext'\n\n# Standard library imports\nimport os\n\n# Local imports\nfrom ..base import (BaseInterface, traits, isdefined, InputMultiPath,\n BaseInterfaceInputSpec, Directory, Undefined)\nfrom ..matlab import MatlabCommand\n\nclass GIFTCommandInputSpec(BaseInterfaceInputSpec):\n matlab_cmd = traits.Str(desc='matlab command to use')\n paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath')\n mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True)\n use_mcr = traits.Bool(desc='Run m-code using GIFT MCR') \n\t\nclass GIFTCommandOutputSpec( BaseInterfaceInputSpec):\n matlab_output = traits.Str( )\t\n\nclass GIFTCommand(BaseInterface):\n \"\"\"Extends `BaseInterface` class to implement GIFT specific interfaces.\n\n WARNING: Pseudo prototype class, meant to be subclassed\n \"\"\"\n input_spec = GIFTCommandInputSpec\n output_spec = GIFTCommandOutputSpec\n \n _matlab_cmd = None\n _paths = None\n _use_mcr = None\n\n def __init__(self, **inputs):\n super(GIFTCommand, self).__init__(**inputs)\n self.inputs.on_trait_change(self._matlab_cmd_update, ['matlab_cmd','mfile','paths','use_mcr'])\n self._find_mlab_cmd_defaults()\n self._check_mlab_inputs()\n self._matlab_cmd_update()\n\n @classmethod\n def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None):\n cls._matlab_cmd = matlab_cmd\n cls._paths = paths\n cls._use_mcr = use_mcr\n\n def _find_mlab_cmd_defaults(self):\n # check if the user has set environment variables to enforce\n # the standalone (MCR) version of GIFT \n if self._use_mcr:\n self._use_mcr = True\n \n\n def _matlab_cmd_update(self):\n # MatlabCommand has to be created here,\n # because matlab_cmb is not a proper input\n # and can be set only during init\t\n matlab_cmd_str = self.inputs.matlab_cmd\t\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n if not matlab_cmd_str[-1] == \" \":\n matlab_cmd_str = matlab_cmd_str + \" \"\n self.mlab = MatlabCommand(matlab_cmd=matlab_cmd_str,\n mfile=self.inputs.mfile,\n paths=self.inputs.paths) \n self.mlab.inputs.script_file = 'pyscript_%s.m' % self.__class__.__name__.split('.')[-1].lower()\n if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr:\n self.mlab.inputs.nodesktop = Undefined\n self.mlab.inputs.nosplash = Undefined\n self.mlab.inputs.single_comp_thread = Undefined\n self.mlab.inputs.uses_mcr = True\n self.mlab.inputs.mfile = True\n \n def _check_mlab_inputs(self):\n if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd:\n self.inputs.matlab_cmd = self._matlab_cmd\n if not isdefined(self.inputs.paths) and self._paths:\n self.inputs.paths = self._paths\n if not isdefined(self.inputs.use_mcr) and self._use_mcr:\n self.inputs.use_mcr = self._use_mcr\n\n def _run_interface(self, runtime):\n \"\"\"Executes the GIFT function using MATLAB.\"\"\"\n self.mlab.inputs.script = self._make_matlab_command() \t\n results = self.mlab.run()\n runtime.returncode = results.runtime.returncode\n if self.mlab.inputs.uses_mcr:\t\t\n if 'Skipped' in results.runtime.stdout:\n self.raise_exception(runtime)\n runtime.stdout = results.runtime.stdout\n runtime.stderr = results.runtime.stderr\n runtime.merged = results.runtime.merged\n return runtime\n\n def _list_outputs(self):\n \"\"\"Determine the expected outputs based on inputs.\"\"\"\n \n outputs = self._outputs().get()\n return outputs\n\n \n def _make_matlab_command(self):\n \"\"\"Generates a mfile to build job structure\n \n Returns\n -------\n mscript : string\n contents of a script called by matlab\n\n \"\"\"\n \n raise NotImplementedError\n\n",
"step-ids": [
8,
10,
15,
16,
18
]
}
|
[
8,
10,
15,
16,
18
] |
import os
import sys
import glob
import shutil
import json
import codecs
from collections import OrderedDict
def getRegionClass(image_path, data_id, imgName):
region_class = ['nosmoke_background', 'nosmoke_face', 'nosmoke_suspect', 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']
label_class = ['nosmoke_bg', 'nosmoke_face', 'nosmoke_susp', 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']
select_class = None
for class_id in range(len(region_class)):
cur_class = region_class[class_id]
cur_label_class = label_class[class_id]
check_file_name = os.path.join(image_path, data_id, cur_class, imgName)
if os.path.isfile(check_file_name):
select_class = cur_label_class
#print check_file_name
break
return select_class
def add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir):
if not os.path.exists(dst_json_dir):
os.makedirs(dst_json_dir)
smoke_hand_num, smoke_nohand_num, smoke_hard_num = 0, 0, 0
nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num = 0, 0, 0, 0
for json_file_name in glob.glob(org_json_dir + '/*.json'):
json_file = open(json_file_name, 'r')
base_file_id = os.path.basename(json_file_name)[:-5]
print(base_file_id + '.json')
json_lines = json_file.read().splitlines()
dst_json_lines = []
new_json_file = codecs.open(dst_json_dir + '/' + base_file_id + '.json', "w", "utf-8")
new_json_file.close()
new_json_file = codecs.open(dst_json_dir + '/' + base_file_id + '.json', "a+", 'utf-8')
for line in json_lines:
if line[0] == '#':
new_json_file.write(line + '\n')
continue
js = json.loads(line, object_pairs_hook=OrderedDict)
#new_js_line = json.dumps(js) + "\n"
#new_json_file.write(new_js_line)
#continue
imgName = js["image_key"]
select_class = getRegionClass(done_root_dir, base_file_id, imgName)
if select_class == None:
new_json_file.write(line + '\n') #
#print('Not Found: ', done_root_dir, base_file_id, imgName)
continue
#print select_class
new_common_box = {}
new_attrs = {}
new_attrs['ignore'] = 'no'
new_attrs['type'] = 'smoke_region'
new_attrs['class'] = select_class
new_common_box['attrs'] = new_attrs
if select_class == 'smoke_hard':
new_attrs['ignore'] = 'yes'
# statistic
if select_class == 'smoke_hand':
smoke_hand_num += 1
elif select_class == 'smoke_nohand':
smoke_nohand_num += 1
elif select_class == 'smoke_hard':
smoke_hard_num += 1
elif select_class == 'nosmoke_bg':
nosmoke_bg_num += 1
elif select_class == 'nosmoke_face':
nosmoke_face_num += 1
elif select_class == 'nosmoke_susp':
nosmoke_susp_num += 1
elif select_class == 'nosmoke_cover':
nosmoke_cover_num += 1
else:
print('Invalid smoke class.', select_class)
# common box, like phone, hand
if 'common_box' in js:
js['common_box'].append(new_common_box)
else:
js['common_box'] = [new_common_box]
new_js_line = json.dumps(js) + "\n"
new_json_file.write(new_js_line)
new_json_file.close()
print('write ' + base_file_id + '.json')
print('add_common_box_smoke_region done.')
print('smoke_hand:%d, smoke_nohand:%d, smoke_hard:%d'%(smoke_hand_num, smoke_nohand_num, smoke_hard_num))
print('nosmoke_bg:%d, nosmoke_face:%d, nosmoke_susp:%d, nosmoke_cover:%d'%(nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num))
if __name__ == '__main__':
if len(sys.argv) < 2:
print('useage: add_common_box_smoke_region.py org_json_dir dst_json_dir done_root_dir')
exit()
org_json_dir = sys.argv[1]
dst_json_dir = sys.argv[2]
done_root_dir = sys.argv[3]
add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir)
|
normal
|
{
"blob_id": "75833617996549167fa157ff78cc1a11f870784f",
"index": 8639,
"step-1": "<mask token>\n\n\ndef getRegionClass(image_path, data_id, imgName):\n region_class = ['nosmoke_background', 'nosmoke_face', 'nosmoke_suspect',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n label_class = ['nosmoke_bg', 'nosmoke_face', 'nosmoke_susp',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n select_class = None\n for class_id in range(len(region_class)):\n cur_class = region_class[class_id]\n cur_label_class = label_class[class_id]\n check_file_name = os.path.join(image_path, data_id, cur_class, imgName)\n if os.path.isfile(check_file_name):\n select_class = cur_label_class\n break\n return select_class\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getRegionClass(image_path, data_id, imgName):\n region_class = ['nosmoke_background', 'nosmoke_face', 'nosmoke_suspect',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n label_class = ['nosmoke_bg', 'nosmoke_face', 'nosmoke_susp',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n select_class = None\n for class_id in range(len(region_class)):\n cur_class = region_class[class_id]\n cur_label_class = label_class[class_id]\n check_file_name = os.path.join(image_path, data_id, cur_class, imgName)\n if os.path.isfile(check_file_name):\n select_class = cur_label_class\n break\n return select_class\n\n\ndef add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir):\n if not os.path.exists(dst_json_dir):\n os.makedirs(dst_json_dir)\n smoke_hand_num, smoke_nohand_num, smoke_hard_num = 0, 0, 0\n (nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num\n ) = 0, 0, 0, 0\n for json_file_name in glob.glob(org_json_dir + '/*.json'):\n json_file = open(json_file_name, 'r')\n base_file_id = os.path.basename(json_file_name)[:-5]\n print(base_file_id + '.json')\n json_lines = json_file.read().splitlines()\n dst_json_lines = []\n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id +\n '.json', 'w', 'utf-8')\n new_json_file.close()\n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id +\n '.json', 'a+', 'utf-8')\n for line in json_lines:\n if line[0] == '#':\n new_json_file.write(line + '\\n')\n continue\n js = json.loads(line, object_pairs_hook=OrderedDict)\n imgName = js['image_key']\n select_class = getRegionClass(done_root_dir, base_file_id, imgName)\n if select_class == None:\n new_json_file.write(line + '\\n')\n continue\n new_common_box = {}\n new_attrs = {}\n new_attrs['ignore'] = 'no'\n new_attrs['type'] = 'smoke_region'\n new_attrs['class'] = select_class\n new_common_box['attrs'] = new_attrs\n if select_class == 'smoke_hard':\n new_attrs['ignore'] = 'yes'\n if select_class == 'smoke_hand':\n smoke_hand_num += 1\n elif select_class == 'smoke_nohand':\n smoke_nohand_num += 1\n elif select_class == 'smoke_hard':\n smoke_hard_num += 1\n elif select_class == 'nosmoke_bg':\n nosmoke_bg_num += 1\n elif select_class == 'nosmoke_face':\n nosmoke_face_num += 1\n elif select_class == 'nosmoke_susp':\n nosmoke_susp_num += 1\n elif select_class == 'nosmoke_cover':\n nosmoke_cover_num += 1\n else:\n print('Invalid smoke class.', select_class)\n if 'common_box' in js:\n js['common_box'].append(new_common_box)\n else:\n js['common_box'] = [new_common_box]\n new_js_line = json.dumps(js) + '\\n'\n new_json_file.write(new_js_line)\n new_json_file.close()\n print('write ' + base_file_id + '.json')\n print('add_common_box_smoke_region done.')\n print('smoke_hand:%d, smoke_nohand:%d, smoke_hard:%d' % (smoke_hand_num,\n smoke_nohand_num, smoke_hard_num))\n print(\n 'nosmoke_bg:%d, nosmoke_face:%d, nosmoke_susp:%d, nosmoke_cover:%d' %\n (nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num)\n )\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef getRegionClass(image_path, data_id, imgName):\n region_class = ['nosmoke_background', 'nosmoke_face', 'nosmoke_suspect',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n label_class = ['nosmoke_bg', 'nosmoke_face', 'nosmoke_susp',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n select_class = None\n for class_id in range(len(region_class)):\n cur_class = region_class[class_id]\n cur_label_class = label_class[class_id]\n check_file_name = os.path.join(image_path, data_id, cur_class, imgName)\n if os.path.isfile(check_file_name):\n select_class = cur_label_class\n break\n return select_class\n\n\ndef add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir):\n if not os.path.exists(dst_json_dir):\n os.makedirs(dst_json_dir)\n smoke_hand_num, smoke_nohand_num, smoke_hard_num = 0, 0, 0\n (nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num\n ) = 0, 0, 0, 0\n for json_file_name in glob.glob(org_json_dir + '/*.json'):\n json_file = open(json_file_name, 'r')\n base_file_id = os.path.basename(json_file_name)[:-5]\n print(base_file_id + '.json')\n json_lines = json_file.read().splitlines()\n dst_json_lines = []\n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id +\n '.json', 'w', 'utf-8')\n new_json_file.close()\n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id +\n '.json', 'a+', 'utf-8')\n for line in json_lines:\n if line[0] == '#':\n new_json_file.write(line + '\\n')\n continue\n js = json.loads(line, object_pairs_hook=OrderedDict)\n imgName = js['image_key']\n select_class = getRegionClass(done_root_dir, base_file_id, imgName)\n if select_class == None:\n new_json_file.write(line + '\\n')\n continue\n new_common_box = {}\n new_attrs = {}\n new_attrs['ignore'] = 'no'\n new_attrs['type'] = 'smoke_region'\n new_attrs['class'] = select_class\n new_common_box['attrs'] = new_attrs\n if select_class == 'smoke_hard':\n new_attrs['ignore'] = 'yes'\n if select_class == 'smoke_hand':\n smoke_hand_num += 1\n elif select_class == 'smoke_nohand':\n smoke_nohand_num += 1\n elif select_class == 'smoke_hard':\n smoke_hard_num += 1\n elif select_class == 'nosmoke_bg':\n nosmoke_bg_num += 1\n elif select_class == 'nosmoke_face':\n nosmoke_face_num += 1\n elif select_class == 'nosmoke_susp':\n nosmoke_susp_num += 1\n elif select_class == 'nosmoke_cover':\n nosmoke_cover_num += 1\n else:\n print('Invalid smoke class.', select_class)\n if 'common_box' in js:\n js['common_box'].append(new_common_box)\n else:\n js['common_box'] = [new_common_box]\n new_js_line = json.dumps(js) + '\\n'\n new_json_file.write(new_js_line)\n new_json_file.close()\n print('write ' + base_file_id + '.json')\n print('add_common_box_smoke_region done.')\n print('smoke_hand:%d, smoke_nohand:%d, smoke_hard:%d' % (smoke_hand_num,\n smoke_nohand_num, smoke_hard_num))\n print(\n 'nosmoke_bg:%d, nosmoke_face:%d, nosmoke_susp:%d, nosmoke_cover:%d' %\n (nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num)\n )\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\n 'useage: add_common_box_smoke_region.py org_json_dir dst_json_dir done_root_dir'\n )\n exit()\n org_json_dir = sys.argv[1]\n dst_json_dir = sys.argv[2]\n done_root_dir = sys.argv[3]\n add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir)\n",
"step-4": "import os\nimport sys\nimport glob\nimport shutil\nimport json\nimport codecs\nfrom collections import OrderedDict\n\n\ndef getRegionClass(image_path, data_id, imgName):\n region_class = ['nosmoke_background', 'nosmoke_face', 'nosmoke_suspect',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n label_class = ['nosmoke_bg', 'nosmoke_face', 'nosmoke_susp',\n 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n select_class = None\n for class_id in range(len(region_class)):\n cur_class = region_class[class_id]\n cur_label_class = label_class[class_id]\n check_file_name = os.path.join(image_path, data_id, cur_class, imgName)\n if os.path.isfile(check_file_name):\n select_class = cur_label_class\n break\n return select_class\n\n\ndef add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir):\n if not os.path.exists(dst_json_dir):\n os.makedirs(dst_json_dir)\n smoke_hand_num, smoke_nohand_num, smoke_hard_num = 0, 0, 0\n (nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num\n ) = 0, 0, 0, 0\n for json_file_name in glob.glob(org_json_dir + '/*.json'):\n json_file = open(json_file_name, 'r')\n base_file_id = os.path.basename(json_file_name)[:-5]\n print(base_file_id + '.json')\n json_lines = json_file.read().splitlines()\n dst_json_lines = []\n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id +\n '.json', 'w', 'utf-8')\n new_json_file.close()\n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id +\n '.json', 'a+', 'utf-8')\n for line in json_lines:\n if line[0] == '#':\n new_json_file.write(line + '\\n')\n continue\n js = json.loads(line, object_pairs_hook=OrderedDict)\n imgName = js['image_key']\n select_class = getRegionClass(done_root_dir, base_file_id, imgName)\n if select_class == None:\n new_json_file.write(line + '\\n')\n continue\n new_common_box = {}\n new_attrs = {}\n new_attrs['ignore'] = 'no'\n new_attrs['type'] = 'smoke_region'\n new_attrs['class'] = select_class\n new_common_box['attrs'] = new_attrs\n if select_class == 'smoke_hard':\n new_attrs['ignore'] = 'yes'\n if select_class == 'smoke_hand':\n smoke_hand_num += 1\n elif select_class == 'smoke_nohand':\n smoke_nohand_num += 1\n elif select_class == 'smoke_hard':\n smoke_hard_num += 1\n elif select_class == 'nosmoke_bg':\n nosmoke_bg_num += 1\n elif select_class == 'nosmoke_face':\n nosmoke_face_num += 1\n elif select_class == 'nosmoke_susp':\n nosmoke_susp_num += 1\n elif select_class == 'nosmoke_cover':\n nosmoke_cover_num += 1\n else:\n print('Invalid smoke class.', select_class)\n if 'common_box' in js:\n js['common_box'].append(new_common_box)\n else:\n js['common_box'] = [new_common_box]\n new_js_line = json.dumps(js) + '\\n'\n new_json_file.write(new_js_line)\n new_json_file.close()\n print('write ' + base_file_id + '.json')\n print('add_common_box_smoke_region done.')\n print('smoke_hand:%d, smoke_nohand:%d, smoke_hard:%d' % (smoke_hand_num,\n smoke_nohand_num, smoke_hard_num))\n print(\n 'nosmoke_bg:%d, nosmoke_face:%d, nosmoke_susp:%d, nosmoke_cover:%d' %\n (nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num)\n )\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\n 'useage: add_common_box_smoke_region.py org_json_dir dst_json_dir done_root_dir'\n )\n exit()\n org_json_dir = sys.argv[1]\n dst_json_dir = sys.argv[2]\n done_root_dir = sys.argv[3]\n add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir)\n",
"step-5": "import os\nimport sys\nimport glob\nimport shutil\nimport json\nimport codecs\nfrom collections import OrderedDict\n\ndef getRegionClass(image_path, data_id, imgName):\n region_class = ['nosmoke_background', 'nosmoke_face', 'nosmoke_suspect', 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n label_class = ['nosmoke_bg', 'nosmoke_face', 'nosmoke_susp', 'nosmoke_cover', 'smoke_hand', 'smoke_nohand', 'smoke_hard']\n select_class = None\n for class_id in range(len(region_class)):\n cur_class = region_class[class_id]\n cur_label_class = label_class[class_id]\n check_file_name = os.path.join(image_path, data_id, cur_class, imgName)\n if os.path.isfile(check_file_name):\n select_class = cur_label_class\n #print check_file_name\n break\n return select_class\n\ndef add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir):\n if not os.path.exists(dst_json_dir):\n os.makedirs(dst_json_dir)\n \n smoke_hand_num, smoke_nohand_num, smoke_hard_num = 0, 0, 0\n nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num = 0, 0, 0, 0\n for json_file_name in glob.glob(org_json_dir + '/*.json'):\n json_file = open(json_file_name, 'r')\n base_file_id = os.path.basename(json_file_name)[:-5]\n print(base_file_id + '.json')\n \n json_lines = json_file.read().splitlines()\n dst_json_lines = []\n \n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id + '.json', \"w\", \"utf-8\")\n new_json_file.close()\n new_json_file = codecs.open(dst_json_dir + '/' + base_file_id + '.json', \"a+\", 'utf-8')\n for line in json_lines:\n if line[0] == '#':\n new_json_file.write(line + '\\n')\n continue\n js = json.loads(line, object_pairs_hook=OrderedDict)\n \n #new_js_line = json.dumps(js) + \"\\n\"\n #new_json_file.write(new_js_line)\n #continue\n \n imgName = js[\"image_key\"]\n select_class = getRegionClass(done_root_dir, base_file_id, imgName)\n if select_class == None:\n new_json_file.write(line + '\\n') #\n #print('Not Found: ', done_root_dir, base_file_id, imgName)\n continue\n #print select_class\n new_common_box = {}\n new_attrs = {}\n new_attrs['ignore'] = 'no'\n new_attrs['type'] = 'smoke_region'\n new_attrs['class'] = select_class\n new_common_box['attrs'] = new_attrs\n if select_class == 'smoke_hard':\n new_attrs['ignore'] = 'yes'\n \n # statistic\n if select_class == 'smoke_hand':\n smoke_hand_num += 1\n elif select_class == 'smoke_nohand':\n smoke_nohand_num += 1\n elif select_class == 'smoke_hard':\n smoke_hard_num += 1\n elif select_class == 'nosmoke_bg':\n nosmoke_bg_num += 1\n elif select_class == 'nosmoke_face':\n nosmoke_face_num += 1\n elif select_class == 'nosmoke_susp':\n nosmoke_susp_num += 1\n elif select_class == 'nosmoke_cover':\n nosmoke_cover_num += 1\n else:\n print('Invalid smoke class.', select_class)\n \n # common box, like phone, hand\n if 'common_box' in js:\n js['common_box'].append(new_common_box)\n else:\n js['common_box'] = [new_common_box]\n new_js_line = json.dumps(js) + \"\\n\"\n new_json_file.write(new_js_line)\n new_json_file.close()\n print('write ' + base_file_id + '.json')\n print('add_common_box_smoke_region done.')\n print('smoke_hand:%d, smoke_nohand:%d, smoke_hard:%d'%(smoke_hand_num, smoke_nohand_num, smoke_hard_num))\n print('nosmoke_bg:%d, nosmoke_face:%d, nosmoke_susp:%d, nosmoke_cover:%d'%(nosmoke_bg_num, nosmoke_face_num, nosmoke_susp_num, nosmoke_cover_num))\n \nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('useage: add_common_box_smoke_region.py org_json_dir dst_json_dir done_root_dir')\n exit()\n org_json_dir = sys.argv[1]\n dst_json_dir = sys.argv[2]\n done_root_dir = sys.argv[3]\n add_common_box_smoke_region(org_json_dir, dst_json_dir, done_root_dir)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
""" Soil and water decomposition rates """
import math
from water_balance import WaterBalance
from utilities import float_eq, float_lt, float_le, float_gt, float_ge, clip
__author__ = "Martin De Kauwe"
__version__ = "1.0 (25.02.2011)"
__email__ = "mdekauwe@gmail.com"
class DecompFactors(object):
""" Calculate C and N litter production rates """
def __init__(self, control, params, state, fluxes, met_data):
"""
Parameters
----------
control : integers, structure
model control flags
params: floats, structure
model parameters
state: floats, structure
model state
fluxes : floats, structure
model fluxes
met_data : floats, dictionary
meteorological forcing data
"""
self.params = params
self.fluxes = fluxes
self.control = control
self.state = state
self.met_data = met_data
self.wb = WaterBalance(self.control, self.params, self.state,
self.fluxes, self.met_data)
def decay_rates(self, project_day):
""" Model decay rates - temperature dependency (i.e. increase with temp)
[See section A8 in Comins and McMurtrie 1993].
Parameters:
-----------
project_day : int
current simulation day (index)
"""
# temperature and water factors for decomposition
tempact = self.soil_temp_factor(project_day)
wtfac = self.wb.calculate_soil_water_fac(topsoil=True)
# decay rate of surface structural pool
self.params.decayrate[0] = (self.params.kdec1 *
math.exp(-3. * self.params.ligshoot) *
tempact * wtfac)
# decay rate of surface metabolic pool
self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac
# decay rate of soil structural pool
self.params.decayrate[2] = (self.params.kdec3 *
math.exp(-3. * self.params.ligroot) *
tempact * wtfac)
# decay rate of soil metabolic pool
self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac
# decay rate of active pool
self.params.decayrate[4] = (self.params.kdec5 *
(1.0 - 0.75 * self.params.finesoil) *
tempact * wtfac)
# decay rate of slow pool
self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac
# decay rate of passive pool
self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac
def soil_temp_factor(self, project_day):
"""Soil-temperature activity factor (A9).
Parameters:
-----------
project_day : int
current simulation day (index)
Returns:
--------
tfac : float
soil temperature factor [degC]
"""
tsoil = self.met_data['tsoil'][project_day]
if float_gt(tsoil, 0.0):
tfac = (0.0326 + 0.00351 * tsoil**1.652 - (tsoil / 41.748)**7.19)
if float_lt(tfac, 0.0):
tfac = 0.0
else:
# negative number cannot be raised to a fractional power
# number would need to be complex
tfac = 0.0
return tfac
|
normal
|
{
"blob_id": "74f3b4001a0520a25a314ff537719b679ba0fca4",
"index": 2578,
"step-1": "<mask token>\n\n\nclass DecompFactors(object):\n <mask token>\n\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n self.wb = WaterBalance(self.control, self.params, self.state, self.\n fluxes, self.met_data)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DecompFactors(object):\n \"\"\" Calculate C and N litter production rates \"\"\"\n\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n self.wb = WaterBalance(self.control, self.params, self.state, self.\n fluxes, self.met_data)\n\n def decay_rates(self, project_day):\n \"\"\" Model decay rates - temperature dependency (i.e. increase with temp)\n [See section A8 in Comins and McMurtrie 1993].\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n \"\"\"\n tempact = self.soil_temp_factor(project_day)\n wtfac = self.wb.calculate_soil_water_fac(topsoil=True)\n self.params.decayrate[0] = self.params.kdec1 * math.exp(-3.0 * self\n .params.ligshoot) * tempact * wtfac\n self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac\n self.params.decayrate[2] = self.params.kdec3 * math.exp(-3.0 * self\n .params.ligroot) * tempact * wtfac\n self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac\n self.params.decayrate[4] = self.params.kdec5 * (1.0 - 0.75 * self.\n params.finesoil) * tempact * wtfac\n self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac\n self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac\n\n def soil_temp_factor(self, project_day):\n \"\"\"Soil-temperature activity factor (A9).\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n Returns:\n --------\n tfac : float\n soil temperature factor [degC]\n\n \"\"\"\n tsoil = self.met_data['tsoil'][project_day]\n if float_gt(tsoil, 0.0):\n tfac = 0.0326 + 0.00351 * tsoil ** 1.652 - (tsoil / 41.748) ** 7.19\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n tfac = 0.0\n return tfac\n",
"step-3": "<mask token>\n__author__ = 'Martin De Kauwe'\n__version__ = '1.0 (25.02.2011)'\n__email__ = 'mdekauwe@gmail.com'\n\n\nclass DecompFactors(object):\n \"\"\" Calculate C and N litter production rates \"\"\"\n\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n self.wb = WaterBalance(self.control, self.params, self.state, self.\n fluxes, self.met_data)\n\n def decay_rates(self, project_day):\n \"\"\" Model decay rates - temperature dependency (i.e. increase with temp)\n [See section A8 in Comins and McMurtrie 1993].\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n \"\"\"\n tempact = self.soil_temp_factor(project_day)\n wtfac = self.wb.calculate_soil_water_fac(topsoil=True)\n self.params.decayrate[0] = self.params.kdec1 * math.exp(-3.0 * self\n .params.ligshoot) * tempact * wtfac\n self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac\n self.params.decayrate[2] = self.params.kdec3 * math.exp(-3.0 * self\n .params.ligroot) * tempact * wtfac\n self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac\n self.params.decayrate[4] = self.params.kdec5 * (1.0 - 0.75 * self.\n params.finesoil) * tempact * wtfac\n self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac\n self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac\n\n def soil_temp_factor(self, project_day):\n \"\"\"Soil-temperature activity factor (A9).\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n Returns:\n --------\n tfac : float\n soil temperature factor [degC]\n\n \"\"\"\n tsoil = self.met_data['tsoil'][project_day]\n if float_gt(tsoil, 0.0):\n tfac = 0.0326 + 0.00351 * tsoil ** 1.652 - (tsoil / 41.748) ** 7.19\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n tfac = 0.0\n return tfac\n",
"step-4": "<mask token>\nimport math\nfrom water_balance import WaterBalance\nfrom utilities import float_eq, float_lt, float_le, float_gt, float_ge, clip\n__author__ = 'Martin De Kauwe'\n__version__ = '1.0 (25.02.2011)'\n__email__ = 'mdekauwe@gmail.com'\n\n\nclass DecompFactors(object):\n \"\"\" Calculate C and N litter production rates \"\"\"\n\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n self.wb = WaterBalance(self.control, self.params, self.state, self.\n fluxes, self.met_data)\n\n def decay_rates(self, project_day):\n \"\"\" Model decay rates - temperature dependency (i.e. increase with temp)\n [See section A8 in Comins and McMurtrie 1993].\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n \"\"\"\n tempact = self.soil_temp_factor(project_day)\n wtfac = self.wb.calculate_soil_water_fac(topsoil=True)\n self.params.decayrate[0] = self.params.kdec1 * math.exp(-3.0 * self\n .params.ligshoot) * tempact * wtfac\n self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac\n self.params.decayrate[2] = self.params.kdec3 * math.exp(-3.0 * self\n .params.ligroot) * tempact * wtfac\n self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac\n self.params.decayrate[4] = self.params.kdec5 * (1.0 - 0.75 * self.\n params.finesoil) * tempact * wtfac\n self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac\n self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac\n\n def soil_temp_factor(self, project_day):\n \"\"\"Soil-temperature activity factor (A9).\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n Returns:\n --------\n tfac : float\n soil temperature factor [degC]\n\n \"\"\"\n tsoil = self.met_data['tsoil'][project_day]\n if float_gt(tsoil, 0.0):\n tfac = 0.0326 + 0.00351 * tsoil ** 1.652 - (tsoil / 41.748) ** 7.19\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n tfac = 0.0\n return tfac\n",
"step-5": "\"\"\" Soil and water decomposition rates \"\"\"\n\nimport math\n\nfrom water_balance import WaterBalance\nfrom utilities import float_eq, float_lt, float_le, float_gt, float_ge, clip\n\n__author__ = \"Martin De Kauwe\"\n__version__ = \"1.0 (25.02.2011)\"\n__email__ = \"mdekauwe@gmail.com\"\n\n\nclass DecompFactors(object):\n \"\"\" Calculate C and N litter production rates \"\"\"\n def __init__(self, control, params, state, fluxes, met_data):\n \"\"\"\n Parameters\n ----------\n control : integers, structure\n model control flags\n params: floats, structure\n model parameters\n state: floats, structure\n model state\n fluxes : floats, structure\n model fluxes\n met_data : floats, dictionary\n meteorological forcing data\n\n \"\"\"\n self.params = params\n self.fluxes = fluxes\n self.control = control\n self.state = state\n self.met_data = met_data\n\n self.wb = WaterBalance(self.control, self.params, self.state,\n self.fluxes, self.met_data)\n\n def decay_rates(self, project_day):\n \"\"\" Model decay rates - temperature dependency (i.e. increase with temp)\n [See section A8 in Comins and McMurtrie 1993].\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n \"\"\"\n # temperature and water factors for decomposition\n tempact = self.soil_temp_factor(project_day)\n wtfac = self.wb.calculate_soil_water_fac(topsoil=True)\n\n # decay rate of surface structural pool\n self.params.decayrate[0] = (self.params.kdec1 *\n math.exp(-3. * self.params.ligshoot) *\n tempact * wtfac)\n\n # decay rate of surface metabolic pool\n self.params.decayrate[1] = self.params.kdec2 * tempact * wtfac\n\n\n # decay rate of soil structural pool\n self.params.decayrate[2] = (self.params.kdec3 *\n math.exp(-3. * self.params.ligroot) *\n tempact * wtfac)\n\n # decay rate of soil metabolic pool\n self.params.decayrate[3] = self.params.kdec4 * tempact * wtfac\n\n\n # decay rate of active pool\n self.params.decayrate[4] = (self.params.kdec5 *\n (1.0 - 0.75 * self.params.finesoil) *\n tempact * wtfac)\n\n # decay rate of slow pool\n self.params.decayrate[5] = self.params.kdec6 * tempact * wtfac\n\n # decay rate of passive pool\n self.params.decayrate[6] = self.params.kdec7 * tempact * wtfac\n\n def soil_temp_factor(self, project_day):\n \"\"\"Soil-temperature activity factor (A9).\n\n Parameters:\n -----------\n project_day : int\n current simulation day (index)\n\n Returns:\n --------\n tfac : float\n soil temperature factor [degC]\n\n \"\"\"\n tsoil = self.met_data['tsoil'][project_day]\n\n if float_gt(tsoil, 0.0):\n tfac = (0.0326 + 0.00351 * tsoil**1.652 - (tsoil / 41.748)**7.19)\n if float_lt(tfac, 0.0):\n tfac = 0.0\n else:\n # negative number cannot be raised to a fractional power\n # number would need to be complex\n tfac = 0.0\n\n return tfac\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
from django.db import models
from django.contrib import admin
from django.utils import timezone
class Libros(models.Model):
ISBN = models.CharField(max_length=13,primary_key=True)
Titulo = models.CharField(max_length=15)
# Portada = models.ImageField(upload_to='imagen/')
Autor = models.CharField(max_length=100)
Editorial = models.CharField(max_length=100)
Pais=models.CharField(max_length=100)
anno= models.IntegerField()
def __str__(self):
return self.Titulo
class Usuario(models.Model):
DPI = models.CharField(max_length=20)
NombreCompleto= models.CharField(max_length=100)
def __str__(self):
return self.DPI
class Prestamo (models.Model):
Fecha_Prestamo=models.DateTimeField(default=timezone.now)
Fecha_Devolucion=models.DateField()
Fecha_Devolucion_Real=models.DateField()
Libro=models.ForeignKey(Libros,on_delete=models.CASCADE)
Usuario=models.ForeignKey(Usuario,on_delete=models.CASCADE)
class PrestamoInLine(admin.TabularInline):
model=Prestamo
extra=1
class LibroAdmin(admin.ModelAdmin):
inlines = (PrestamoInLine,)
class UsuarioAdmin(admin.ModelAdmin):
inlines = (PrestamoInLine,)
|
normal
|
{
"blob_id": "86fdea2ae8e253aa4639bb3114de70c693536760",
"index": 1046,
"step-1": "<mask token>\n\n\nclass Prestamo(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"step-2": "<mask token>\n\n\nclass Prestamo(models.Model):\n Fecha_Prestamo = models.DateTimeField(default=timezone.now)\n Fecha_Devolucion = models.DateField()\n Fecha_Devolucion_Real = models.DateField()\n Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)\n Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"step-3": "<mask token>\n\n\nclass Libros(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Usuario(models.Model):\n DPI = models.CharField(max_length=20)\n NombreCompleto = models.CharField(max_length=100)\n\n def __str__(self):\n return self.DPI\n\n\nclass Prestamo(models.Model):\n Fecha_Prestamo = models.DateTimeField(default=timezone.now)\n Fecha_Devolucion = models.DateField()\n Fecha_Devolucion_Real = models.DateField()\n Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)\n Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"step-4": "<mask token>\n\n\nclass Libros(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.Titulo\n\n\nclass Usuario(models.Model):\n DPI = models.CharField(max_length=20)\n NombreCompleto = models.CharField(max_length=100)\n\n def __str__(self):\n return self.DPI\n\n\nclass Prestamo(models.Model):\n Fecha_Prestamo = models.DateTimeField(default=timezone.now)\n Fecha_Devolucion = models.DateField()\n Fecha_Devolucion_Real = models.DateField()\n Libro = models.ForeignKey(Libros, on_delete=models.CASCADE)\n Usuario = models.ForeignKey(Usuario, on_delete=models.CASCADE)\n\n\nclass PrestamoInLine(admin.TabularInline):\n model = Prestamo\n extra = 1\n\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = PrestamoInLine,\n",
"step-5": "from django.db import models\nfrom django.contrib import admin\nfrom django.utils import timezone\n\nclass Libros(models.Model):\n ISBN = models.CharField(max_length=13,primary_key=True)\n Titulo = models.CharField(max_length=15)\n # Portada = models.ImageField(upload_to='imagen/')\n Autor = models.CharField(max_length=100)\n Editorial = models.CharField(max_length=100)\n Pais=models.CharField(max_length=100)\n anno= models.IntegerField()\n\n def __str__(self):\n return self.Titulo\n\nclass Usuario(models.Model):\n DPI = models.CharField(max_length=20)\n NombreCompleto= models.CharField(max_length=100)\n\n def __str__(self):\n return self.DPI\n\n\n\nclass Prestamo (models.Model):\n Fecha_Prestamo=models.DateTimeField(default=timezone.now)\n Fecha_Devolucion=models.DateField()\n Fecha_Devolucion_Real=models.DateField()\n Libro=models.ForeignKey(Libros,on_delete=models.CASCADE)\n Usuario=models.ForeignKey(Usuario,on_delete=models.CASCADE)\n\nclass PrestamoInLine(admin.TabularInline):\n model=Prestamo\n extra=1\n\nclass LibroAdmin(admin.ModelAdmin):\n inlines = (PrestamoInLine,)\n\nclass UsuarioAdmin(admin.ModelAdmin):\n inlines = (PrestamoInLine,)\n",
"step-ids": [
7,
8,
12,
13,
16
]
}
|
[
7,
8,
12,
13,
16
] |
numbers = [3, 7, 5]
maxNumber = 0
for number in numbers:
if maxNumber < number:
maxNumber = number
print maxNumber
|
normal
|
{
"blob_id": "2d9d66ea8a95285744b797570bfbeaa17fdc922a",
"index": 4036,
"step-1": "numbers = [3, 7, 5]\nmaxNumber = 0\nfor number in numbers:\n if maxNumber < number:\n maxNumber = number\n\nprint maxNumber",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
print(input()in[str(i**i+i)for i in range(11)])
num = int(input())
suma = 0
x = 0
while(suma < num):
x += 1
suma = x**x + x
print(True if suma == num else False
|
normal
|
{
"blob_id": "20fe9b68e65f6f017897bfa8e99d0c21ba1617fb",
"index": 1522,
"step-1": "print(input()in[str(i**i+i)for i in range(11)])\n\n\n\nnum = int(input())\nsuma = 0\nx = 0\nwhile(suma < num):\n x += 1\n suma = x**x + x\nprint(True if suma == num else False\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from oslo_log import log
from watcher._i18n import _
from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class ParallelMigrationStrategy(base.BaseStrategy):
VM = "vm"
VOLUME = "volume"
ACTIVE = "active"
SHUTOFF = "shutoff"
AVAILABLE = "available"
IN_USE = "in-use"
LIVE_MIGRATION = "live_migration"
COLD_MIGRATION = "cold_migration"
VOLUME_MIGRATION = "volume_migration"
VOLUME_RETYPE = "volume_retype"
VOLUME_UPDATE = "volume_update"
STATUS = "status"
DST_HOSTNAME = "dst_hostname"
DST_TYPE = "dst_type"
def __init__(self, config, osc=None):
super(ParallelMigrationStrategy, self).__init__(config, osc)
def pre_execute(self):
pass
def do_execute(self):
params = self.input_parameters.params
for key, value in params.iteritems():
for resource_id, dict in value.items():
resource_status = dict.get(self.STATUS)
dst_hostname = dict.get(self.DST_HOSTNAME)
dst_type = dict.get(self.DST_TYPE)
if key == self.VM:
if resource_status == self.ACTIVE:
# do live migration
self._live_migration(resource_id, dst_hostname)
elif resource_status == self.SHUTOFF:
# do cold migration
# cold migration can not specify dest_hostname
self._cold_migration(resource_id)
else:
raise Exception("Wrong status: %s." % resource_status)
elif key == self.VOLUME:
if resource_status == self.IN_USE:
# do novavolume update
self._volume_update(resource_id, dst_type)
elif resource_status == self.AVAILABLE:
# detached volume with no snapshots
# do cinder migrate
self._volume_retype(resource_id, dst_type)
else:
raise Exception("Wrong status: %s." % resource_status)
else:
raise Exception("Wrong key: %s." % key)
def _live_migration(self, resource_id, dst_hostname):
parameters = {self.DST_HOSTNAME: dst_hostname}
self.solution.add_action(
action_type=self.LIVE_MIGRATION,
resource_id=resource_id,
input_parameters=parameters)
def _cold_migration(self, resource_id):
self.solution.add_action(
action_type=self.COLD_MIGRATION,
resource_id=resource_id,
input_parameters={})
def _volume_update(self, resource_id, dst_type):
parameters = {self.DST_TYPE: dst_type}
self.solution.add_action(
action_type=self.VOLUME_UPDATE,
resource_id=resource_id,
input_parameters=parameters)
def _volume_migrate(self, resource_id, dst_hostname):
parameters = {self.DST_HOSTNAME: dst_hostname}
self.solution.add_action(
action_type=self.VOLUME_MIGRATION,
resource_id=resource_id,
input_parameters=parameters)
def _volume_retype(self, resource_id, dst_type):
parameters = {self.DST_TYPE: dst_type}
self.solution.add_action(
action_type=self.VOLUME_RETYPE,
resource_id=resource_id,
input_parameters=parameters)
def post_execute(self):
pass
@classmethod
def get_goal_name(cls):
return "zone_migration"
@classmethod
def get_name(cls):
return "parallel_migration"
@classmethod
def get_display_name(cls):
return _("Parallel migration strategy")
@classmethod
def get_translatable_display_name(cls):
return "Parallel migration strategy"
@classmethod
def get_schema(cls):
return {
"properties": {
"params": {
"description": "",
"type": "object",
"default":
{"vm":
{"instance_id1":
{"status": "active",
"dst_hostname": "vm_dest_hostname1"},
"instance_id2":
{"status": "shutoff"}},
"volume":
{"cinder_id1":
{"status": "available",
"dst_type": "volume_dst_type"},
"cinder_id2":
{"status": "in-use",
"dst_type": "volume_dst_type"}}}
}
}
}
|
normal
|
{
"blob_id": "43e721ac45570e4f9ab9c1970abee3da6db40afa",
"index": 156,
"step-1": "<mask token>\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass ParallelMigrationStrategy(base.BaseStrategy):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, config, osc=None):\n super(ParallelMigrationStrategy, self).__init__(config, osc)\n\n def pre_execute(self):\n pass\n <mask token>\n\n def _live_migration(self, resource_id, dst_hostname):\n parameters = {self.DST_HOSTNAME: dst_hostname}\n self.solution.add_action(action_type=self.LIVE_MIGRATION,\n resource_id=resource_id, input_parameters=parameters)\n\n def _cold_migration(self, resource_id):\n self.solution.add_action(action_type=self.COLD_MIGRATION,\n resource_id=resource_id, input_parameters={})\n <mask token>\n\n def _volume_migrate(self, resource_id, dst_hostname):\n parameters = {self.DST_HOSTNAME: dst_hostname}\n self.solution.add_action(action_type=self.VOLUME_MIGRATION,\n resource_id=resource_id, input_parameters=parameters)\n\n def _volume_retype(self, resource_id, dst_type):\n parameters = {self.DST_TYPE: dst_type}\n self.solution.add_action(action_type=self.VOLUME_RETYPE,\n resource_id=resource_id, input_parameters=parameters)\n <mask token>\n\n @classmethod\n def get_goal_name(cls):\n return 'zone_migration'\n\n @classmethod\n def get_name(cls):\n return 'parallel_migration'\n <mask token>\n\n @classmethod\n def get_translatable_display_name(cls):\n return 'Parallel migration strategy'\n\n @classmethod\n def get_schema(cls):\n return {'properties': {'params': {'description': '', 'type':\n 'object', 'default': {'vm': {'instance_id1': {'status':\n 'active', 'dst_hostname': 'vm_dest_hostname1'}, 'instance_id2':\n {'status': 'shutoff'}}, 'volume': {'cinder_id1': {'status':\n 'available', 'dst_type': 'volume_dst_type'}, 'cinder_id2': {\n 'status': 'in-use', 'dst_type': 'volume_dst_type'}}}}}}\n",
"step-2": "<mask token>\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass ParallelMigrationStrategy(base.BaseStrategy):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, config, osc=None):\n super(ParallelMigrationStrategy, self).__init__(config, osc)\n\n def pre_execute(self):\n pass\n <mask token>\n\n def _live_migration(self, resource_id, dst_hostname):\n parameters = {self.DST_HOSTNAME: dst_hostname}\n self.solution.add_action(action_type=self.LIVE_MIGRATION,\n resource_id=resource_id, input_parameters=parameters)\n\n def _cold_migration(self, resource_id):\n self.solution.add_action(action_type=self.COLD_MIGRATION,\n resource_id=resource_id, input_parameters={})\n <mask token>\n\n def _volume_migrate(self, resource_id, dst_hostname):\n parameters = {self.DST_HOSTNAME: dst_hostname}\n self.solution.add_action(action_type=self.VOLUME_MIGRATION,\n resource_id=resource_id, input_parameters=parameters)\n\n def _volume_retype(self, resource_id, dst_type):\n parameters = {self.DST_TYPE: dst_type}\n self.solution.add_action(action_type=self.VOLUME_RETYPE,\n resource_id=resource_id, input_parameters=parameters)\n\n def post_execute(self):\n pass\n\n @classmethod\n def get_goal_name(cls):\n return 'zone_migration'\n\n @classmethod\n def get_name(cls):\n return 'parallel_migration'\n\n @classmethod\n def get_display_name(cls):\n return _('Parallel migration strategy')\n\n @classmethod\n def get_translatable_display_name(cls):\n return 'Parallel migration strategy'\n\n @classmethod\n def get_schema(cls):\n return {'properties': {'params': {'description': '', 'type':\n 'object', 'default': {'vm': {'instance_id1': {'status':\n 'active', 'dst_hostname': 'vm_dest_hostname1'}, 'instance_id2':\n {'status': 'shutoff'}}, 'volume': {'cinder_id1': {'status':\n 'available', 'dst_type': 'volume_dst_type'}, 'cinder_id2': {\n 'status': 'in-use', 'dst_type': 'volume_dst_type'}}}}}}\n",
"step-3": "<mask token>\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass ParallelMigrationStrategy(base.BaseStrategy):\n VM = 'vm'\n VOLUME = 'volume'\n ACTIVE = 'active'\n SHUTOFF = 'shutoff'\n AVAILABLE = 'available'\n IN_USE = 'in-use'\n LIVE_MIGRATION = 'live_migration'\n COLD_MIGRATION = 'cold_migration'\n VOLUME_MIGRATION = 'volume_migration'\n VOLUME_RETYPE = 'volume_retype'\n VOLUME_UPDATE = 'volume_update'\n STATUS = 'status'\n DST_HOSTNAME = 'dst_hostname'\n DST_TYPE = 'dst_type'\n\n def __init__(self, config, osc=None):\n super(ParallelMigrationStrategy, self).__init__(config, osc)\n\n def pre_execute(self):\n pass\n\n def do_execute(self):\n params = self.input_parameters.params\n for key, value in params.iteritems():\n for resource_id, dict in value.items():\n resource_status = dict.get(self.STATUS)\n dst_hostname = dict.get(self.DST_HOSTNAME)\n dst_type = dict.get(self.DST_TYPE)\n if key == self.VM:\n if resource_status == self.ACTIVE:\n self._live_migration(resource_id, dst_hostname)\n elif resource_status == self.SHUTOFF:\n self._cold_migration(resource_id)\n else:\n raise Exception('Wrong status: %s.' % resource_status)\n elif key == self.VOLUME:\n if resource_status == self.IN_USE:\n self._volume_update(resource_id, dst_type)\n elif resource_status == self.AVAILABLE:\n self._volume_retype(resource_id, dst_type)\n else:\n raise Exception('Wrong status: %s.' % resource_status)\n else:\n raise Exception('Wrong key: %s.' % key)\n\n def _live_migration(self, resource_id, dst_hostname):\n parameters = {self.DST_HOSTNAME: dst_hostname}\n self.solution.add_action(action_type=self.LIVE_MIGRATION,\n resource_id=resource_id, input_parameters=parameters)\n\n def _cold_migration(self, resource_id):\n self.solution.add_action(action_type=self.COLD_MIGRATION,\n resource_id=resource_id, input_parameters={})\n\n def _volume_update(self, resource_id, dst_type):\n parameters = {self.DST_TYPE: dst_type}\n self.solution.add_action(action_type=self.VOLUME_UPDATE,\n resource_id=resource_id, input_parameters=parameters)\n\n def _volume_migrate(self, resource_id, dst_hostname):\n parameters = {self.DST_HOSTNAME: dst_hostname}\n self.solution.add_action(action_type=self.VOLUME_MIGRATION,\n resource_id=resource_id, input_parameters=parameters)\n\n def _volume_retype(self, resource_id, dst_type):\n parameters = {self.DST_TYPE: dst_type}\n self.solution.add_action(action_type=self.VOLUME_RETYPE,\n resource_id=resource_id, input_parameters=parameters)\n\n def post_execute(self):\n pass\n\n @classmethod\n def get_goal_name(cls):\n return 'zone_migration'\n\n @classmethod\n def get_name(cls):\n return 'parallel_migration'\n\n @classmethod\n def get_display_name(cls):\n return _('Parallel migration strategy')\n\n @classmethod\n def get_translatable_display_name(cls):\n return 'Parallel migration strategy'\n\n @classmethod\n def get_schema(cls):\n return {'properties': {'params': {'description': '', 'type':\n 'object', 'default': {'vm': {'instance_id1': {'status':\n 'active', 'dst_hostname': 'vm_dest_hostname1'}, 'instance_id2':\n {'status': 'shutoff'}}, 'volume': {'cinder_id1': {'status':\n 'available', 'dst_type': 'volume_dst_type'}, 'cinder_id2': {\n 'status': 'in-use', 'dst_type': 'volume_dst_type'}}}}}}\n",
"step-4": "import abc\nimport six\nfrom oslo_log import log\nfrom watcher._i18n import _\nfrom watcher.decision_engine.strategy.strategies import base\nLOG = log.getLogger(__name__)\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass ParallelMigrationStrategy(base.BaseStrategy):\n VM = 'vm'\n VOLUME = 'volume'\n ACTIVE = 'active'\n SHUTOFF = 'shutoff'\n AVAILABLE = 'available'\n IN_USE = 'in-use'\n LIVE_MIGRATION = 'live_migration'\n COLD_MIGRATION = 'cold_migration'\n VOLUME_MIGRATION = 'volume_migration'\n VOLUME_RETYPE = 'volume_retype'\n VOLUME_UPDATE = 'volume_update'\n STATUS = 'status'\n DST_HOSTNAME = 'dst_hostname'\n DST_TYPE = 'dst_type'\n\n def __init__(self, config, osc=None):\n super(ParallelMigrationStrategy, self).__init__(config, osc)\n\n def pre_execute(self):\n pass\n\n def do_execute(self):\n params = self.input_parameters.params\n for key, value in params.iteritems():\n for resource_id, dict in value.items():\n resource_status = dict.get(self.STATUS)\n dst_hostname = dict.get(self.DST_HOSTNAME)\n dst_type = dict.get(self.DST_TYPE)\n if key == self.VM:\n if resource_status == self.ACTIVE:\n self._live_migration(resource_id, dst_hostname)\n elif resource_status == self.SHUTOFF:\n self._cold_migration(resource_id)\n else:\n raise Exception('Wrong status: %s.' % resource_status)\n elif key == self.VOLUME:\n if resource_status == self.IN_USE:\n self._volume_update(resource_id, dst_type)\n elif resource_status == self.AVAILABLE:\n self._volume_retype(resource_id, dst_type)\n else:\n raise Exception('Wrong status: %s.' % resource_status)\n else:\n raise Exception('Wrong key: %s.' % key)\n\n def _live_migration(self, resource_id, dst_hostname):\n parameters = {self.DST_HOSTNAME: dst_hostname}\n self.solution.add_action(action_type=self.LIVE_MIGRATION,\n resource_id=resource_id, input_parameters=parameters)\n\n def _cold_migration(self, resource_id):\n self.solution.add_action(action_type=self.COLD_MIGRATION,\n resource_id=resource_id, input_parameters={})\n\n def _volume_update(self, resource_id, dst_type):\n parameters = {self.DST_TYPE: dst_type}\n self.solution.add_action(action_type=self.VOLUME_UPDATE,\n resource_id=resource_id, input_parameters=parameters)\n\n def _volume_migrate(self, resource_id, dst_hostname):\n parameters = {self.DST_HOSTNAME: dst_hostname}\n self.solution.add_action(action_type=self.VOLUME_MIGRATION,\n resource_id=resource_id, input_parameters=parameters)\n\n def _volume_retype(self, resource_id, dst_type):\n parameters = {self.DST_TYPE: dst_type}\n self.solution.add_action(action_type=self.VOLUME_RETYPE,\n resource_id=resource_id, input_parameters=parameters)\n\n def post_execute(self):\n pass\n\n @classmethod\n def get_goal_name(cls):\n return 'zone_migration'\n\n @classmethod\n def get_name(cls):\n return 'parallel_migration'\n\n @classmethod\n def get_display_name(cls):\n return _('Parallel migration strategy')\n\n @classmethod\n def get_translatable_display_name(cls):\n return 'Parallel migration strategy'\n\n @classmethod\n def get_schema(cls):\n return {'properties': {'params': {'description': '', 'type':\n 'object', 'default': {'vm': {'instance_id1': {'status':\n 'active', 'dst_hostname': 'vm_dest_hostname1'}, 'instance_id2':\n {'status': 'shutoff'}}, 'volume': {'cinder_id1': {'status':\n 'available', 'dst_type': 'volume_dst_type'}, 'cinder_id2': {\n 'status': 'in-use', 'dst_type': 'volume_dst_type'}}}}}}\n",
"step-5": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\n\nimport six\n\nfrom oslo_log import log\n\nfrom watcher._i18n import _\nfrom watcher.decision_engine.strategy.strategies import base\n\nLOG = log.getLogger(__name__)\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass ParallelMigrationStrategy(base.BaseStrategy):\n\n VM = \"vm\"\n VOLUME = \"volume\"\n ACTIVE = \"active\"\n SHUTOFF = \"shutoff\"\n AVAILABLE = \"available\"\n IN_USE = \"in-use\"\n LIVE_MIGRATION = \"live_migration\"\n COLD_MIGRATION = \"cold_migration\"\n VOLUME_MIGRATION = \"volume_migration\"\n VOLUME_RETYPE = \"volume_retype\"\n VOLUME_UPDATE = \"volume_update\"\n STATUS = \"status\"\n DST_HOSTNAME = \"dst_hostname\"\n DST_TYPE = \"dst_type\"\n\n def __init__(self, config, osc=None):\n super(ParallelMigrationStrategy, self).__init__(config, osc)\n\n def pre_execute(self):\n pass\n\n def do_execute(self):\n params = self.input_parameters.params\n for key, value in params.iteritems():\n for resource_id, dict in value.items():\n resource_status = dict.get(self.STATUS)\n dst_hostname = dict.get(self.DST_HOSTNAME)\n dst_type = dict.get(self.DST_TYPE)\n if key == self.VM:\n if resource_status == self.ACTIVE:\n # do live migration\n self._live_migration(resource_id, dst_hostname)\n elif resource_status == self.SHUTOFF:\n # do cold migration\n # cold migration can not specify dest_hostname\n self._cold_migration(resource_id)\n else:\n raise Exception(\"Wrong status: %s.\" % resource_status)\n elif key == self.VOLUME:\n if resource_status == self.IN_USE:\n # do novavolume update\n self._volume_update(resource_id, dst_type)\n elif resource_status == self.AVAILABLE:\n # detached volume with no snapshots\n # do cinder migrate\n self._volume_retype(resource_id, dst_type)\n else:\n raise Exception(\"Wrong status: %s.\" % resource_status)\n else:\n raise Exception(\"Wrong key: %s.\" % key)\n\n def _live_migration(self, resource_id, dst_hostname):\n parameters = {self.DST_HOSTNAME: dst_hostname}\n self.solution.add_action(\n action_type=self.LIVE_MIGRATION,\n resource_id=resource_id,\n input_parameters=parameters)\n\n def _cold_migration(self, resource_id):\n self.solution.add_action(\n action_type=self.COLD_MIGRATION,\n resource_id=resource_id,\n input_parameters={})\n\n def _volume_update(self, resource_id, dst_type):\n parameters = {self.DST_TYPE: dst_type}\n self.solution.add_action(\n action_type=self.VOLUME_UPDATE,\n resource_id=resource_id,\n input_parameters=parameters)\n\n def _volume_migrate(self, resource_id, dst_hostname):\n parameters = {self.DST_HOSTNAME: dst_hostname}\n self.solution.add_action(\n action_type=self.VOLUME_MIGRATION,\n resource_id=resource_id,\n input_parameters=parameters)\n\n def _volume_retype(self, resource_id, dst_type):\n parameters = {self.DST_TYPE: dst_type}\n self.solution.add_action(\n action_type=self.VOLUME_RETYPE,\n resource_id=resource_id,\n input_parameters=parameters)\n\n def post_execute(self):\n pass\n\n @classmethod\n def get_goal_name(cls):\n return \"zone_migration\"\n\n @classmethod\n def get_name(cls):\n return \"parallel_migration\"\n\n @classmethod\n def get_display_name(cls):\n return _(\"Parallel migration strategy\")\n\n @classmethod\n def get_translatable_display_name(cls):\n return \"Parallel migration strategy\"\n\n @classmethod\n def get_schema(cls):\n return {\n \"properties\": {\n \"params\": {\n \"description\": \"\",\n \"type\": \"object\",\n \"default\":\n {\"vm\":\n {\"instance_id1\":\n {\"status\": \"active\",\n \"dst_hostname\": \"vm_dest_hostname1\"},\n \"instance_id2\":\n {\"status\": \"shutoff\"}},\n \"volume\":\n {\"cinder_id1\":\n {\"status\": \"available\",\n \"dst_type\": \"volume_dst_type\"},\n \"cinder_id2\":\n {\"status\": \"in-use\",\n \"dst_type\": \"volume_dst_type\"}}}\n }\n }\n }\n",
"step-ids": [
11,
13,
16,
18,
19
]
}
|
[
11,
13,
16,
18,
19
] |
#!/usr/bin/env python
import serial
from action import Action
import math
comm = serial.Serial("/dev/ttyACM3", 115200, timeout=1)
#comm = None
robot = Action(comm)
from flask import Flask
from flask import send_from_directory
import os
static_dir = os.path.join(os.getcwd(), "ControlApp")
print "serving from " + static_dir
app = Flask(__name__)
app.debug = False
@app.route('/')
def root():
return send_from_directory(static_dir, "control.html")
@app.route("/stop")
def do_stop():
robot.stop()
return "ok"
@app.route("/forward")
def do_forward():
robot.move(0, 1)
return "ok"
@app.route("/backward")
def do_backward():
robot.move(0, -1)
return "ok"
@app.route("/left")
def do_left():
robot.move(math.pi/2.0, 1)
return "ok"
@app.route("/right")
def do_right():
robot.move(math.pi*3.0/2.0, 1)
return "ok"
@app.route("/turncw")
def do_turncw():
robot.turn(0.5)
return "ok"
@app.route("/turnacw")
def do_turnacw():
robot.turn(-0.5)
return "ok"
@app.route("/kick")
def do_kick():
robot.kick()
return "ok"
@app.route("/catch")
def do_catch():
robot.catch()
return "ok"
if __name__ == "__main__":
app.debug = True
app.run(port=5001)
|
normal
|
{
"blob_id": "54a6405e3447d488aa4fca88159ccaac2506df2c",
"index": 5995,
"step-1": "#!/usr/bin/env python\n\nimport serial\nfrom action import Action\nimport math\n\ncomm = serial.Serial(\"/dev/ttyACM3\", 115200, timeout=1)\n#comm = None\nrobot = Action(comm)\n\nfrom flask import Flask\nfrom flask import send_from_directory\nimport os\n\nstatic_dir = os.path.join(os.getcwd(), \"ControlApp\")\nprint \"serving from \" + static_dir\n\napp = Flask(__name__)\napp.debug = False\n\n\n@app.route('/')\ndef root():\n return send_from_directory(static_dir, \"control.html\")\n\n@app.route(\"/stop\")\ndef do_stop():\n robot.stop()\n return \"ok\"\n \n@app.route(\"/forward\")\ndef do_forward():\n robot.move(0, 1)\n return \"ok\"\n \n@app.route(\"/backward\")\ndef do_backward():\n robot.move(0, -1)\n return \"ok\"\n\n@app.route(\"/left\")\ndef do_left():\n robot.move(math.pi/2.0, 1)\n return \"ok\"\n\n@app.route(\"/right\")\ndef do_right():\n robot.move(math.pi*3.0/2.0, 1)\n return \"ok\"\n\n@app.route(\"/turncw\")\ndef do_turncw():\n robot.turn(0.5)\n return \"ok\"\n\n@app.route(\"/turnacw\")\ndef do_turnacw():\n robot.turn(-0.5)\n return \"ok\"\n\n@app.route(\"/kick\")\ndef do_kick():\n robot.kick()\n return \"ok\"\n\n@app.route(\"/catch\")\ndef do_catch():\n robot.catch()\n return \"ok\" \n \nif __name__ == \"__main__\":\n app.debug = True\n app.run(port=5001)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from selenium.webdriver.common.by import By
class BasePageLocators:
LOGIN_LINK = (By.CSS_SELECTOR, "#login_link")
BASKET_LINK = (By.CSS_SELECTOR, '[class="btn btn-default"]:nth-child(1)')
USER_ICON = (By.CSS_SELECTOR, ".icon-user")
class LoginPageLocators:
LOG_IN_FORM = (By.CSS_SELECTOR, "#login_form")
REGISTER_FORM = (By.CSS_SELECTOR, "#register_form")
REGISTRATION_EMAIL = (By.CSS_SELECTOR, '#id_registration-email')
REGISTRATION_PASSWORD = (By.CSS_SELECTOR, '#id_registration-password1')
REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR, '#id_registration-password2')
REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR, '[name="registration_submit"]')
class BasketPageLocators:
BASKET_STATUS = (By.CSS_SELECTOR, '#content_inner')
NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR, '#messages .alert:nth-child(1) > .alertinner strong')
PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR, '#messages .alert:nth-child(3) > .alertinner strong')
class ProductPageLocators:
ADD_IN_BASKET = (By.CSS_SELECTOR, '.btn-add-to-basket')
SHIPMENT_PRICE = (By.CSS_SELECTOR, '.product_main .price_color')
SHIPMENT_NAME = (By.CSS_SELECTOR, '.product_main h1')
|
normal
|
{
"blob_id": "5d3b9005b8924da36a5885201339aa41082034cd",
"index": 8692,
"step-1": "<mask token>\n\n\nclass BasketPageLocators:\n BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'\n SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'\n SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'\n",
"step-2": "<mask token>\n\n\nclass LoginPageLocators:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BasketPageLocators:\n BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'\n SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'\n SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'\n",
"step-3": "<mask token>\n\n\nclass LoginPageLocators:\n LOG_IN_FORM = By.CSS_SELECTOR, '#login_form'\n REGISTER_FORM = By.CSS_SELECTOR, '#register_form'\n REGISTRATION_EMAIL = By.CSS_SELECTOR, '#id_registration-email'\n REGISTRATION_PASSWORD = By.CSS_SELECTOR, '#id_registration-password1'\n REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR,\n '#id_registration-password2')\n REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR,\n '[name=\"registration_submit\"]')\n\n\nclass BasketPageLocators:\n BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'\n SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'\n SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'\n",
"step-4": "<mask token>\n\n\nclass BasePageLocators:\n LOGIN_LINK = By.CSS_SELECTOR, '#login_link'\n BASKET_LINK = By.CSS_SELECTOR, '[class=\"btn btn-default\"]:nth-child(1)'\n USER_ICON = By.CSS_SELECTOR, '.icon-user'\n\n\nclass LoginPageLocators:\n LOG_IN_FORM = By.CSS_SELECTOR, '#login_form'\n REGISTER_FORM = By.CSS_SELECTOR, '#register_form'\n REGISTRATION_EMAIL = By.CSS_SELECTOR, '#id_registration-email'\n REGISTRATION_PASSWORD = By.CSS_SELECTOR, '#id_registration-password1'\n REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR,\n '#id_registration-password2')\n REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR,\n '[name=\"registration_submit\"]')\n\n\nclass BasketPageLocators:\n BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'\n SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'\n SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'\n",
"step-5": "from selenium.webdriver.common.by import By\n\n\nclass BasePageLocators:\n LOGIN_LINK = (By.CSS_SELECTOR, \"#login_link\")\n BASKET_LINK = (By.CSS_SELECTOR, '[class=\"btn btn-default\"]:nth-child(1)')\n USER_ICON = (By.CSS_SELECTOR, \".icon-user\")\n\n\nclass LoginPageLocators:\n LOG_IN_FORM = (By.CSS_SELECTOR, \"#login_form\")\n REGISTER_FORM = (By.CSS_SELECTOR, \"#register_form\")\n REGISTRATION_EMAIL = (By.CSS_SELECTOR, '#id_registration-email')\n REGISTRATION_PASSWORD = (By.CSS_SELECTOR, '#id_registration-password1')\n REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR, '#id_registration-password2')\n REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR, '[name=\"registration_submit\"]')\n\n\nclass BasketPageLocators:\n BASKET_STATUS = (By.CSS_SELECTOR, '#content_inner')\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR, '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR, '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = (By.CSS_SELECTOR, '.btn-add-to-basket')\n SHIPMENT_PRICE = (By.CSS_SELECTOR, '.product_main .price_color')\n SHIPMENT_NAME = (By.CSS_SELECTOR, '.product_main h1')\n\n",
"step-ids": [
4,
5,
6,
8,
10
]
}
|
[
4,
5,
6,
8,
10
] |
def unique(lisst):
setlisst = set(lisst)
return len(setlisst)
print(unique({4, 5, 1, 1, 3}))
|
normal
|
{
"blob_id": "42d26ef51bb4dafc8a0201a828652e166a3905e4",
"index": 7339,
"step-1": "<mask token>\n",
"step-2": "def unique(lisst):\n setlisst = set(lisst)\n return len(setlisst)\n\n\n<mask token>\n",
"step-3": "def unique(lisst):\n setlisst = set(lisst)\n return len(setlisst)\n\n\nprint(unique({4, 5, 1, 1, 3}))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-29 03:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_otp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='otpsecrets',
name='issuer_name',
field=models.CharField(blank=True, db_index=True, max_length=40),
),
]
|
normal
|
{
"blob_id": "d45ca839a24093266c48e5f97164b160190b154d",
"index": 2133,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('django_otp', '0001_initial')]\n operations = [migrations.AddField(model_name='otpsecrets', name=\n 'issuer_name', field=models.CharField(blank=True, db_index=True,\n max_length=40))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('django_otp', '0001_initial')]\n operations = [migrations.AddField(model_name='otpsecrets', name=\n 'issuer_name', field=models.CharField(blank=True, db_index=True,\n max_length=40))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.4 on 2016-12-29 03:38\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('django_otp', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='otpsecrets',\n name='issuer_name',\n field=models.CharField(blank=True, db_index=True, max_length=40),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import matplotlib.pyplot as plt
import cv2
# 0
img = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE)
# IMREAD_COLOR = 1
# IMREAD_UNCHANGED = -1
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# cv2.imwrite('watchgray,png', img)
plt.imshow(img, cmap='gray', interpolation='bicubic')
plt.show()
|
normal
|
{
"blob_id": "34ccaaf5eb47afd556588cd94cddbddaee1f0b53",
"index": 2851,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nplt.imshow(img, cmap='gray', interpolation='bicubic')\nplt.show()\n",
"step-3": "<mask token>\nimg = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE)\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nplt.imshow(img, cmap='gray', interpolation='bicubic')\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nimport cv2\nimg = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE)\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nplt.imshow(img, cmap='gray', interpolation='bicubic')\nplt.show()\n",
"step-5": "import matplotlib.pyplot as plt\nimport cv2\n# 0\nimg = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE)\n# IMREAD_COLOR = 1\n# IMREAD_UNCHANGED = -1\ncv2.imshow('image', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n# cv2.imwrite('watchgray,png', img)\n\nplt.imshow(img, cmap='gray', interpolation='bicubic')\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/local/autopkg/python
"""
JamfExtensionAttributeUploader processor for uploading extension attributes
to Jamf Pro using AutoPkg
by G Pugh
"""
import os
import sys
from time import sleep
from xml.sax.saxutils import escape
from autopkglib import ProcessorError # pylint: disable=import-error
# to use a base module in AutoPkg we need to add this path to the sys.path.
# this violates flake8 E402 (PEP8 imports) but is unavoidable, so the following
# imports require noqa comments for E402
sys.path.insert(0, os.path.dirname(__file__))
from JamfUploaderLib.JamfUploaderBase import JamfUploaderBase # noqa: E402
__all__ = ["JamfExtensionAttributeUploader"]
class JamfExtensionAttributeUploader(JamfUploaderBase):
description = (
"A processor for AutoPkg that will upload an Extension Attribute item to a "
"Jamf Cloud or on-prem server."
)
input_variables = {
"JSS_URL": {
"required": True,
"description": "URL to a Jamf Pro server that the API user has write access "
"to, optionally set as a key in the com.github.autopkg "
"preference file.",
},
"API_USERNAME": {
"required": True,
"description": "Username of account with appropriate access to "
"jss, optionally set as a key in the com.github.autopkg "
"preference file.",
},
"API_PASSWORD": {
"required": True,
"description": "Password of api user, optionally set as a key in "
"the com.github.autopkg preference file.",
},
"ea_name": {
"required": False,
"description": "Extension Attribute name",
"default": "",
},
"ea_script_path": {
"required": False,
"description": "Full path to the script to be uploaded",
},
"replace_ea": {
"required": False,
"description": "Overwrite an existing category if True.",
"default": False,
},
"ea_inventory_display": {
"required": False,
"description": "Inventory Display value for the EA.",
"default": "Extension Attributes",
},
"ea_data_type": {
"required": False,
"description": "Data type for the EA. One of String, Integer or Date.",
"default": "String",
},
"sleep": {
"required": False,
"description": "Pause after running this processor for specified seconds.",
"default": "0",
},
}
output_variables = {
"jamfextensionattributeuploader_summary_result": {
"description": "Description of interesting results.",
},
}
def upload_ea(
self,
jamf_url,
ea_name,
ea_data_type,
ea_inventory_display,
script_path,
obj_id=None,
enc_creds="",
token="",
):
"""Update extension attribute metadata."""
# import script from file and replace any keys in the script
if os.path.exists(script_path):
with open(script_path, "r") as file:
script_contents = file.read()
else:
raise ProcessorError("Script does not exist!")
# substitute user-assignable keys
script_contents = self.substitute_assignable_keys(script_contents)
# XML-escape the script
script_contents_escaped = escape(script_contents)
# build the object
ea_data = (
"<computer_extension_attribute>"
+ "<name>{}</name>".format(ea_name)
+ "<enabled>true</enabled>"
+ "<description/>"
+ "<data_type>{}</data_type>".format(ea_data_type)
+ "<input_type>"
+ " <type>script</type>"
+ " <platform>Mac</platform>"
+ " <script>{}</script>".format(script_contents_escaped)
+ "</input_type>"
+ "<inventory_display>{}</inventory_display>".format(ea_inventory_display)
+ "<recon_display>Extension Attributes</recon_display>"
+ "</computer_extension_attribute>"
)
self.output(
"Extension Attribute data:",
verbose_level=2,
)
self.output(
ea_data,
verbose_level=2,
)
self.output("Uploading Extension Attribute..")
# write the template to temp file
template_xml = self.write_temp_file(ea_data)
# if we find an object ID we put, if not, we post
object_type = "extension_attribute"
url = "{}/{}/id/{}".format(jamf_url, self.api_endpoints(object_type), obj_id)
count = 0
while True:
count += 1
self.output(
"Extension Attribute upload attempt {}".format(count),
verbose_level=2,
)
request = "PUT" if obj_id else "POST"
r = self.curl(
request=request,
url=url,
enc_creds=enc_creds,
token=token,
data=template_xml,
)
# check HTTP response
if self.status_check(r, "Extension Attribute", ea_name, request) == "break":
break
if count > 5:
self.output(
"ERROR: Extension Attribute upload did not succeed after 5 attempts"
)
self.output("\nHTTP POST Response Code: {}".format(r.status_code))
raise ProcessorError("ERROR: Extension Attribute upload failed ")
if int(self.sleep) > 30:
sleep(int(self.sleep))
else:
sleep(30)
def main(self):
"""Do the main thing here"""
self.jamf_url = self.env.get("JSS_URL")
self.jamf_user = self.env.get("API_USERNAME")
self.jamf_password = self.env.get("API_PASSWORD")
self.ea_script_path = self.env.get("ea_script_path")
self.ea_name = self.env.get("ea_name")
self.replace = self.env.get("replace_ea")
self.ea_data_type = self.env.get("ea_data_type")
self.ea_inventory_display = self.env.get("ea_inventory_display")
self.sleep = self.env.get("sleep")
# handle setting replace in overrides
if not self.replace or self.replace == "False":
self.replace = False
# clear any pre-existing summary result
if "jamfextensionattributeuploader_summary_result" in self.env:
del self.env["jamfextensionattributeuploader_summary_result"]
ea_uploaded = False
# handle files with a relative path
if not self.ea_script_path.startswith("/"):
found_template = self.get_path_to_file(self.ea_script_path)
if found_template:
self.ea_script_path = found_template
else:
raise ProcessorError(f"ERROR: EA file {self.ea_script_path} not found")
# now start the process of uploading the object
self.output(f"Checking for existing '{self.ea_name}' on {self.jamf_url}")
# obtain the relevant credentials
token, send_creds, _ = self.handle_classic_auth(
self.jamf_url, self.jamf_user, self.jamf_password
)
# check for existing - requires obj_name
obj_type = "extension_attribute"
obj_name = self.ea_name
obj_id = self.get_api_obj_id_from_name(
self.jamf_url,
obj_name,
obj_type,
enc_creds=send_creds,
token=token,
)
if obj_id:
self.output(
"Extension Attribute '{}' already exists: ID {}".format(
self.ea_name, obj_id
)
)
if self.replace:
self.output(
"Replacing existing Extension Attribute as 'replace_ea' is set to {}".format(
self.replace
),
verbose_level=1,
)
else:
self.output(
"Not replacing existing Extension Attribute. Use replace_ea='True' to enforce.",
verbose_level=1,
)
return
# upload the EA
self.upload_ea(
self.jamf_url,
self.ea_name,
self.ea_data_type,
self.ea_inventory_display,
self.ea_script_path,
obj_id=obj_id,
enc_creds=send_creds,
token=token,
)
ea_uploaded = True
# output the summary
self.env["extension_attribute"] = self.ea_name
self.env["ea_uploaded"] = ea_uploaded
if ea_uploaded:
self.env["jamfextensionattributeuploader_summary_result"] = {
"summary_text": (
"The following extension attributes were created or "
"updated in Jamf Pro:"
),
"report_fields": ["name", "path"],
"data": {"name": self.ea_name, "path": self.ea_script_path},
}
if __name__ == "__main__":
PROCESSOR = JamfExtensionAttributeUploader()
PROCESSOR.execute_shell()
|
normal
|
{
"blob_id": "31f91e67d0adde0a984a6d162ea5607f06e9208e",
"index": 9876,
"step-1": "<mask token>\n\n\nclass JamfExtensionAttributeUploader(JamfUploaderBase):\n description = (\n 'A processor for AutoPkg that will upload an Extension Attribute item to a Jamf Cloud or on-prem server.'\n )\n input_variables = {'JSS_URL': {'required': True, 'description':\n 'URL to a Jamf Pro server that the API user has write access to, optionally set as a key in the com.github.autopkg preference file.'\n }, 'API_USERNAME': {'required': True, 'description':\n 'Username of account with appropriate access to jss, optionally set as a key in the com.github.autopkg preference file.'\n }, 'API_PASSWORD': {'required': True, 'description':\n 'Password of api user, optionally set as a key in the com.github.autopkg preference file.'\n }, 'ea_name': {'required': False, 'description':\n 'Extension Attribute name', 'default': ''}, 'ea_script_path': {\n 'required': False, 'description':\n 'Full path to the script to be uploaded'}, 'replace_ea': {\n 'required': False, 'description':\n 'Overwrite an existing category if True.', 'default': False},\n 'ea_inventory_display': {'required': False, 'description':\n 'Inventory Display value for the EA.', 'default':\n 'Extension Attributes'}, 'ea_data_type': {'required': False,\n 'description':\n 'Data type for the EA. One of String, Integer or Date.', 'default':\n 'String'}, 'sleep': {'required': False, 'description':\n 'Pause after running this processor for specified seconds.',\n 'default': '0'}}\n output_variables = {'jamfextensionattributeuploader_summary_result': {\n 'description': 'Description of interesting results.'}}\n\n def upload_ea(self, jamf_url, ea_name, ea_data_type,\n ea_inventory_display, script_path, obj_id=None, enc_creds='', token=''\n ):\n \"\"\"Update extension attribute metadata.\"\"\"\n if os.path.exists(script_path):\n with open(script_path, 'r') as file:\n script_contents = file.read()\n else:\n raise ProcessorError('Script does not exist!')\n script_contents = self.substitute_assignable_keys(script_contents)\n script_contents_escaped = escape(script_contents)\n ea_data = ('<computer_extension_attribute>' + '<name>{}</name>'.\n format(ea_name) + '<enabled>true</enabled>' + '<description/>' +\n '<data_type>{}</data_type>'.format(ea_data_type) +\n '<input_type>' + ' <type>script</type>' +\n ' <platform>Mac</platform>' + ' <script>{}</script>'.format(\n script_contents_escaped) + '</input_type>' +\n '<inventory_display>{}</inventory_display>'.format(\n ea_inventory_display) +\n '<recon_display>Extension Attributes</recon_display>' +\n '</computer_extension_attribute>')\n self.output('Extension Attribute data:', verbose_level=2)\n self.output(ea_data, verbose_level=2)\n self.output('Uploading Extension Attribute..')\n template_xml = self.write_temp_file(ea_data)\n object_type = 'extension_attribute'\n url = '{}/{}/id/{}'.format(jamf_url, self.api_endpoints(object_type\n ), obj_id)\n count = 0\n while True:\n count += 1\n self.output('Extension Attribute upload attempt {}'.format(\n count), verbose_level=2)\n request = 'PUT' if obj_id else 'POST'\n r = self.curl(request=request, url=url, enc_creds=enc_creds,\n token=token, data=template_xml)\n if self.status_check(r, 'Extension Attribute', ea_name, request\n ) == 'break':\n break\n if count > 5:\n self.output(\n 'ERROR: Extension Attribute upload did not succeed after 5 attempts'\n )\n self.output('\\nHTTP POST Response Code: {}'.format(r.\n status_code))\n raise ProcessorError(\n 'ERROR: Extension Attribute upload failed ')\n if int(self.sleep) > 30:\n sleep(int(self.sleep))\n else:\n sleep(30)\n\n def main(self):\n \"\"\"Do the main thing here\"\"\"\n self.jamf_url = self.env.get('JSS_URL')\n self.jamf_user = self.env.get('API_USERNAME')\n self.jamf_password = self.env.get('API_PASSWORD')\n self.ea_script_path = self.env.get('ea_script_path')\n self.ea_name = self.env.get('ea_name')\n self.replace = self.env.get('replace_ea')\n self.ea_data_type = self.env.get('ea_data_type')\n self.ea_inventory_display = self.env.get('ea_inventory_display')\n self.sleep = self.env.get('sleep')\n if not self.replace or self.replace == 'False':\n self.replace = False\n if 'jamfextensionattributeuploader_summary_result' in self.env:\n del self.env['jamfextensionattributeuploader_summary_result']\n ea_uploaded = False\n if not self.ea_script_path.startswith('/'):\n found_template = self.get_path_to_file(self.ea_script_path)\n if found_template:\n self.ea_script_path = found_template\n else:\n raise ProcessorError(\n f'ERROR: EA file {self.ea_script_path} not found')\n self.output(\n f\"Checking for existing '{self.ea_name}' on {self.jamf_url}\")\n token, send_creds, _ = self.handle_classic_auth(self.jamf_url, self\n .jamf_user, self.jamf_password)\n obj_type = 'extension_attribute'\n obj_name = self.ea_name\n obj_id = self.get_api_obj_id_from_name(self.jamf_url, obj_name,\n obj_type, enc_creds=send_creds, token=token)\n if obj_id:\n self.output(\"Extension Attribute '{}' already exists: ID {}\".\n format(self.ea_name, obj_id))\n if self.replace:\n self.output(\n \"Replacing existing Extension Attribute as 'replace_ea' is set to {}\"\n .format(self.replace), verbose_level=1)\n else:\n self.output(\n \"Not replacing existing Extension Attribute. Use replace_ea='True' to enforce.\"\n , verbose_level=1)\n return\n self.upload_ea(self.jamf_url, self.ea_name, self.ea_data_type, self\n .ea_inventory_display, self.ea_script_path, obj_id=obj_id,\n enc_creds=send_creds, token=token)\n ea_uploaded = True\n self.env['extension_attribute'] = self.ea_name\n self.env['ea_uploaded'] = ea_uploaded\n if ea_uploaded:\n self.env['jamfextensionattributeuploader_summary_result'] = {\n 'summary_text':\n 'The following extension attributes were created or updated in Jamf Pro:'\n , 'report_fields': ['name', 'path'], 'data': {'name': self.\n ea_name, 'path': self.ea_script_path}}\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, os.path.dirname(__file__))\n<mask token>\n\n\nclass JamfExtensionAttributeUploader(JamfUploaderBase):\n description = (\n 'A processor for AutoPkg that will upload an Extension Attribute item to a Jamf Cloud or on-prem server.'\n )\n input_variables = {'JSS_URL': {'required': True, 'description':\n 'URL to a Jamf Pro server that the API user has write access to, optionally set as a key in the com.github.autopkg preference file.'\n }, 'API_USERNAME': {'required': True, 'description':\n 'Username of account with appropriate access to jss, optionally set as a key in the com.github.autopkg preference file.'\n }, 'API_PASSWORD': {'required': True, 'description':\n 'Password of api user, optionally set as a key in the com.github.autopkg preference file.'\n }, 'ea_name': {'required': False, 'description':\n 'Extension Attribute name', 'default': ''}, 'ea_script_path': {\n 'required': False, 'description':\n 'Full path to the script to be uploaded'}, 'replace_ea': {\n 'required': False, 'description':\n 'Overwrite an existing category if True.', 'default': False},\n 'ea_inventory_display': {'required': False, 'description':\n 'Inventory Display value for the EA.', 'default':\n 'Extension Attributes'}, 'ea_data_type': {'required': False,\n 'description':\n 'Data type for the EA. One of String, Integer or Date.', 'default':\n 'String'}, 'sleep': {'required': False, 'description':\n 'Pause after running this processor for specified seconds.',\n 'default': '0'}}\n output_variables = {'jamfextensionattributeuploader_summary_result': {\n 'description': 'Description of interesting results.'}}\n\n def upload_ea(self, jamf_url, ea_name, ea_data_type,\n ea_inventory_display, script_path, obj_id=None, enc_creds='', token=''\n ):\n \"\"\"Update extension attribute metadata.\"\"\"\n if os.path.exists(script_path):\n with open(script_path, 'r') as file:\n script_contents = file.read()\n else:\n raise ProcessorError('Script does not exist!')\n script_contents = self.substitute_assignable_keys(script_contents)\n script_contents_escaped = escape(script_contents)\n ea_data = ('<computer_extension_attribute>' + '<name>{}</name>'.\n format(ea_name) + '<enabled>true</enabled>' + '<description/>' +\n '<data_type>{}</data_type>'.format(ea_data_type) +\n '<input_type>' + ' <type>script</type>' +\n ' <platform>Mac</platform>' + ' <script>{}</script>'.format(\n script_contents_escaped) + '</input_type>' +\n '<inventory_display>{}</inventory_display>'.format(\n ea_inventory_display) +\n '<recon_display>Extension Attributes</recon_display>' +\n '</computer_extension_attribute>')\n self.output('Extension Attribute data:', verbose_level=2)\n self.output(ea_data, verbose_level=2)\n self.output('Uploading Extension Attribute..')\n template_xml = self.write_temp_file(ea_data)\n object_type = 'extension_attribute'\n url = '{}/{}/id/{}'.format(jamf_url, self.api_endpoints(object_type\n ), obj_id)\n count = 0\n while True:\n count += 1\n self.output('Extension Attribute upload attempt {}'.format(\n count), verbose_level=2)\n request = 'PUT' if obj_id else 'POST'\n r = self.curl(request=request, url=url, enc_creds=enc_creds,\n token=token, data=template_xml)\n if self.status_check(r, 'Extension Attribute', ea_name, request\n ) == 'break':\n break\n if count > 5:\n self.output(\n 'ERROR: Extension Attribute upload did not succeed after 5 attempts'\n )\n self.output('\\nHTTP POST Response Code: {}'.format(r.\n status_code))\n raise ProcessorError(\n 'ERROR: Extension Attribute upload failed ')\n if int(self.sleep) > 30:\n sleep(int(self.sleep))\n else:\n sleep(30)\n\n def main(self):\n \"\"\"Do the main thing here\"\"\"\n self.jamf_url = self.env.get('JSS_URL')\n self.jamf_user = self.env.get('API_USERNAME')\n self.jamf_password = self.env.get('API_PASSWORD')\n self.ea_script_path = self.env.get('ea_script_path')\n self.ea_name = self.env.get('ea_name')\n self.replace = self.env.get('replace_ea')\n self.ea_data_type = self.env.get('ea_data_type')\n self.ea_inventory_display = self.env.get('ea_inventory_display')\n self.sleep = self.env.get('sleep')\n if not self.replace or self.replace == 'False':\n self.replace = False\n if 'jamfextensionattributeuploader_summary_result' in self.env:\n del self.env['jamfextensionattributeuploader_summary_result']\n ea_uploaded = False\n if not self.ea_script_path.startswith('/'):\n found_template = self.get_path_to_file(self.ea_script_path)\n if found_template:\n self.ea_script_path = found_template\n else:\n raise ProcessorError(\n f'ERROR: EA file {self.ea_script_path} not found')\n self.output(\n f\"Checking for existing '{self.ea_name}' on {self.jamf_url}\")\n token, send_creds, _ = self.handle_classic_auth(self.jamf_url, self\n .jamf_user, self.jamf_password)\n obj_type = 'extension_attribute'\n obj_name = self.ea_name\n obj_id = self.get_api_obj_id_from_name(self.jamf_url, obj_name,\n obj_type, enc_creds=send_creds, token=token)\n if obj_id:\n self.output(\"Extension Attribute '{}' already exists: ID {}\".\n format(self.ea_name, obj_id))\n if self.replace:\n self.output(\n \"Replacing existing Extension Attribute as 'replace_ea' is set to {}\"\n .format(self.replace), verbose_level=1)\n else:\n self.output(\n \"Not replacing existing Extension Attribute. Use replace_ea='True' to enforce.\"\n , verbose_level=1)\n return\n self.upload_ea(self.jamf_url, self.ea_name, self.ea_data_type, self\n .ea_inventory_display, self.ea_script_path, obj_id=obj_id,\n enc_creds=send_creds, token=token)\n ea_uploaded = True\n self.env['extension_attribute'] = self.ea_name\n self.env['ea_uploaded'] = ea_uploaded\n if ea_uploaded:\n self.env['jamfextensionattributeuploader_summary_result'] = {\n 'summary_text':\n 'The following extension attributes were created or updated in Jamf Pro:'\n , 'report_fields': ['name', 'path'], 'data': {'name': self.\n ea_name, 'path': self.ea_script_path}}\n\n\nif __name__ == '__main__':\n PROCESSOR = JamfExtensionAttributeUploader()\n PROCESSOR.execute_shell()\n",
"step-3": "<mask token>\nsys.path.insert(0, os.path.dirname(__file__))\n<mask token>\n__all__ = ['JamfExtensionAttributeUploader']\n\n\nclass JamfExtensionAttributeUploader(JamfUploaderBase):\n description = (\n 'A processor for AutoPkg that will upload an Extension Attribute item to a Jamf Cloud or on-prem server.'\n )\n input_variables = {'JSS_URL': {'required': True, 'description':\n 'URL to a Jamf Pro server that the API user has write access to, optionally set as a key in the com.github.autopkg preference file.'\n }, 'API_USERNAME': {'required': True, 'description':\n 'Username of account with appropriate access to jss, optionally set as a key in the com.github.autopkg preference file.'\n }, 'API_PASSWORD': {'required': True, 'description':\n 'Password of api user, optionally set as a key in the com.github.autopkg preference file.'\n }, 'ea_name': {'required': False, 'description':\n 'Extension Attribute name', 'default': ''}, 'ea_script_path': {\n 'required': False, 'description':\n 'Full path to the script to be uploaded'}, 'replace_ea': {\n 'required': False, 'description':\n 'Overwrite an existing category if True.', 'default': False},\n 'ea_inventory_display': {'required': False, 'description':\n 'Inventory Display value for the EA.', 'default':\n 'Extension Attributes'}, 'ea_data_type': {'required': False,\n 'description':\n 'Data type for the EA. One of String, Integer or Date.', 'default':\n 'String'}, 'sleep': {'required': False, 'description':\n 'Pause after running this processor for specified seconds.',\n 'default': '0'}}\n output_variables = {'jamfextensionattributeuploader_summary_result': {\n 'description': 'Description of interesting results.'}}\n\n def upload_ea(self, jamf_url, ea_name, ea_data_type,\n ea_inventory_display, script_path, obj_id=None, enc_creds='', token=''\n ):\n \"\"\"Update extension attribute metadata.\"\"\"\n if os.path.exists(script_path):\n with open(script_path, 'r') as file:\n script_contents = file.read()\n else:\n raise ProcessorError('Script does not exist!')\n script_contents = self.substitute_assignable_keys(script_contents)\n script_contents_escaped = escape(script_contents)\n ea_data = ('<computer_extension_attribute>' + '<name>{}</name>'.\n format(ea_name) + '<enabled>true</enabled>' + '<description/>' +\n '<data_type>{}</data_type>'.format(ea_data_type) +\n '<input_type>' + ' <type>script</type>' +\n ' <platform>Mac</platform>' + ' <script>{}</script>'.format(\n script_contents_escaped) + '</input_type>' +\n '<inventory_display>{}</inventory_display>'.format(\n ea_inventory_display) +\n '<recon_display>Extension Attributes</recon_display>' +\n '</computer_extension_attribute>')\n self.output('Extension Attribute data:', verbose_level=2)\n self.output(ea_data, verbose_level=2)\n self.output('Uploading Extension Attribute..')\n template_xml = self.write_temp_file(ea_data)\n object_type = 'extension_attribute'\n url = '{}/{}/id/{}'.format(jamf_url, self.api_endpoints(object_type\n ), obj_id)\n count = 0\n while True:\n count += 1\n self.output('Extension Attribute upload attempt {}'.format(\n count), verbose_level=2)\n request = 'PUT' if obj_id else 'POST'\n r = self.curl(request=request, url=url, enc_creds=enc_creds,\n token=token, data=template_xml)\n if self.status_check(r, 'Extension Attribute', ea_name, request\n ) == 'break':\n break\n if count > 5:\n self.output(\n 'ERROR: Extension Attribute upload did not succeed after 5 attempts'\n )\n self.output('\\nHTTP POST Response Code: {}'.format(r.\n status_code))\n raise ProcessorError(\n 'ERROR: Extension Attribute upload failed ')\n if int(self.sleep) > 30:\n sleep(int(self.sleep))\n else:\n sleep(30)\n\n def main(self):\n \"\"\"Do the main thing here\"\"\"\n self.jamf_url = self.env.get('JSS_URL')\n self.jamf_user = self.env.get('API_USERNAME')\n self.jamf_password = self.env.get('API_PASSWORD')\n self.ea_script_path = self.env.get('ea_script_path')\n self.ea_name = self.env.get('ea_name')\n self.replace = self.env.get('replace_ea')\n self.ea_data_type = self.env.get('ea_data_type')\n self.ea_inventory_display = self.env.get('ea_inventory_display')\n self.sleep = self.env.get('sleep')\n if not self.replace or self.replace == 'False':\n self.replace = False\n if 'jamfextensionattributeuploader_summary_result' in self.env:\n del self.env['jamfextensionattributeuploader_summary_result']\n ea_uploaded = False\n if not self.ea_script_path.startswith('/'):\n found_template = self.get_path_to_file(self.ea_script_path)\n if found_template:\n self.ea_script_path = found_template\n else:\n raise ProcessorError(\n f'ERROR: EA file {self.ea_script_path} not found')\n self.output(\n f\"Checking for existing '{self.ea_name}' on {self.jamf_url}\")\n token, send_creds, _ = self.handle_classic_auth(self.jamf_url, self\n .jamf_user, self.jamf_password)\n obj_type = 'extension_attribute'\n obj_name = self.ea_name\n obj_id = self.get_api_obj_id_from_name(self.jamf_url, obj_name,\n obj_type, enc_creds=send_creds, token=token)\n if obj_id:\n self.output(\"Extension Attribute '{}' already exists: ID {}\".\n format(self.ea_name, obj_id))\n if self.replace:\n self.output(\n \"Replacing existing Extension Attribute as 'replace_ea' is set to {}\"\n .format(self.replace), verbose_level=1)\n else:\n self.output(\n \"Not replacing existing Extension Attribute. Use replace_ea='True' to enforce.\"\n , verbose_level=1)\n return\n self.upload_ea(self.jamf_url, self.ea_name, self.ea_data_type, self\n .ea_inventory_display, self.ea_script_path, obj_id=obj_id,\n enc_creds=send_creds, token=token)\n ea_uploaded = True\n self.env['extension_attribute'] = self.ea_name\n self.env['ea_uploaded'] = ea_uploaded\n if ea_uploaded:\n self.env['jamfextensionattributeuploader_summary_result'] = {\n 'summary_text':\n 'The following extension attributes were created or updated in Jamf Pro:'\n , 'report_fields': ['name', 'path'], 'data': {'name': self.\n ea_name, 'path': self.ea_script_path}}\n\n\nif __name__ == '__main__':\n PROCESSOR = JamfExtensionAttributeUploader()\n PROCESSOR.execute_shell()\n",
"step-4": "<mask token>\nimport os\nimport sys\nfrom time import sleep\nfrom xml.sax.saxutils import escape\nfrom autopkglib import ProcessorError\nsys.path.insert(0, os.path.dirname(__file__))\nfrom JamfUploaderLib.JamfUploaderBase import JamfUploaderBase\n__all__ = ['JamfExtensionAttributeUploader']\n\n\nclass JamfExtensionAttributeUploader(JamfUploaderBase):\n description = (\n 'A processor for AutoPkg that will upload an Extension Attribute item to a Jamf Cloud or on-prem server.'\n )\n input_variables = {'JSS_URL': {'required': True, 'description':\n 'URL to a Jamf Pro server that the API user has write access to, optionally set as a key in the com.github.autopkg preference file.'\n }, 'API_USERNAME': {'required': True, 'description':\n 'Username of account with appropriate access to jss, optionally set as a key in the com.github.autopkg preference file.'\n }, 'API_PASSWORD': {'required': True, 'description':\n 'Password of api user, optionally set as a key in the com.github.autopkg preference file.'\n }, 'ea_name': {'required': False, 'description':\n 'Extension Attribute name', 'default': ''}, 'ea_script_path': {\n 'required': False, 'description':\n 'Full path to the script to be uploaded'}, 'replace_ea': {\n 'required': False, 'description':\n 'Overwrite an existing category if True.', 'default': False},\n 'ea_inventory_display': {'required': False, 'description':\n 'Inventory Display value for the EA.', 'default':\n 'Extension Attributes'}, 'ea_data_type': {'required': False,\n 'description':\n 'Data type for the EA. One of String, Integer or Date.', 'default':\n 'String'}, 'sleep': {'required': False, 'description':\n 'Pause after running this processor for specified seconds.',\n 'default': '0'}}\n output_variables = {'jamfextensionattributeuploader_summary_result': {\n 'description': 'Description of interesting results.'}}\n\n def upload_ea(self, jamf_url, ea_name, ea_data_type,\n ea_inventory_display, script_path, obj_id=None, enc_creds='', token=''\n ):\n \"\"\"Update extension attribute metadata.\"\"\"\n if os.path.exists(script_path):\n with open(script_path, 'r') as file:\n script_contents = file.read()\n else:\n raise ProcessorError('Script does not exist!')\n script_contents = self.substitute_assignable_keys(script_contents)\n script_contents_escaped = escape(script_contents)\n ea_data = ('<computer_extension_attribute>' + '<name>{}</name>'.\n format(ea_name) + '<enabled>true</enabled>' + '<description/>' +\n '<data_type>{}</data_type>'.format(ea_data_type) +\n '<input_type>' + ' <type>script</type>' +\n ' <platform>Mac</platform>' + ' <script>{}</script>'.format(\n script_contents_escaped) + '</input_type>' +\n '<inventory_display>{}</inventory_display>'.format(\n ea_inventory_display) +\n '<recon_display>Extension Attributes</recon_display>' +\n '</computer_extension_attribute>')\n self.output('Extension Attribute data:', verbose_level=2)\n self.output(ea_data, verbose_level=2)\n self.output('Uploading Extension Attribute..')\n template_xml = self.write_temp_file(ea_data)\n object_type = 'extension_attribute'\n url = '{}/{}/id/{}'.format(jamf_url, self.api_endpoints(object_type\n ), obj_id)\n count = 0\n while True:\n count += 1\n self.output('Extension Attribute upload attempt {}'.format(\n count), verbose_level=2)\n request = 'PUT' if obj_id else 'POST'\n r = self.curl(request=request, url=url, enc_creds=enc_creds,\n token=token, data=template_xml)\n if self.status_check(r, 'Extension Attribute', ea_name, request\n ) == 'break':\n break\n if count > 5:\n self.output(\n 'ERROR: Extension Attribute upload did not succeed after 5 attempts'\n )\n self.output('\\nHTTP POST Response Code: {}'.format(r.\n status_code))\n raise ProcessorError(\n 'ERROR: Extension Attribute upload failed ')\n if int(self.sleep) > 30:\n sleep(int(self.sleep))\n else:\n sleep(30)\n\n def main(self):\n \"\"\"Do the main thing here\"\"\"\n self.jamf_url = self.env.get('JSS_URL')\n self.jamf_user = self.env.get('API_USERNAME')\n self.jamf_password = self.env.get('API_PASSWORD')\n self.ea_script_path = self.env.get('ea_script_path')\n self.ea_name = self.env.get('ea_name')\n self.replace = self.env.get('replace_ea')\n self.ea_data_type = self.env.get('ea_data_type')\n self.ea_inventory_display = self.env.get('ea_inventory_display')\n self.sleep = self.env.get('sleep')\n if not self.replace or self.replace == 'False':\n self.replace = False\n if 'jamfextensionattributeuploader_summary_result' in self.env:\n del self.env['jamfextensionattributeuploader_summary_result']\n ea_uploaded = False\n if not self.ea_script_path.startswith('/'):\n found_template = self.get_path_to_file(self.ea_script_path)\n if found_template:\n self.ea_script_path = found_template\n else:\n raise ProcessorError(\n f'ERROR: EA file {self.ea_script_path} not found')\n self.output(\n f\"Checking for existing '{self.ea_name}' on {self.jamf_url}\")\n token, send_creds, _ = self.handle_classic_auth(self.jamf_url, self\n .jamf_user, self.jamf_password)\n obj_type = 'extension_attribute'\n obj_name = self.ea_name\n obj_id = self.get_api_obj_id_from_name(self.jamf_url, obj_name,\n obj_type, enc_creds=send_creds, token=token)\n if obj_id:\n self.output(\"Extension Attribute '{}' already exists: ID {}\".\n format(self.ea_name, obj_id))\n if self.replace:\n self.output(\n \"Replacing existing Extension Attribute as 'replace_ea' is set to {}\"\n .format(self.replace), verbose_level=1)\n else:\n self.output(\n \"Not replacing existing Extension Attribute. Use replace_ea='True' to enforce.\"\n , verbose_level=1)\n return\n self.upload_ea(self.jamf_url, self.ea_name, self.ea_data_type, self\n .ea_inventory_display, self.ea_script_path, obj_id=obj_id,\n enc_creds=send_creds, token=token)\n ea_uploaded = True\n self.env['extension_attribute'] = self.ea_name\n self.env['ea_uploaded'] = ea_uploaded\n if ea_uploaded:\n self.env['jamfextensionattributeuploader_summary_result'] = {\n 'summary_text':\n 'The following extension attributes were created or updated in Jamf Pro:'\n , 'report_fields': ['name', 'path'], 'data': {'name': self.\n ea_name, 'path': self.ea_script_path}}\n\n\nif __name__ == '__main__':\n PROCESSOR = JamfExtensionAttributeUploader()\n PROCESSOR.execute_shell()\n",
"step-5": "#!/usr/local/autopkg/python\n\n\"\"\"\nJamfExtensionAttributeUploader processor for uploading extension attributes\nto Jamf Pro using AutoPkg\n by G Pugh\n\"\"\"\n\nimport os\nimport sys\nfrom time import sleep\nfrom xml.sax.saxutils import escape\nfrom autopkglib import ProcessorError # pylint: disable=import-error\n\n# to use a base module in AutoPkg we need to add this path to the sys.path.\n# this violates flake8 E402 (PEP8 imports) but is unavoidable, so the following\n# imports require noqa comments for E402\nsys.path.insert(0, os.path.dirname(__file__))\n\nfrom JamfUploaderLib.JamfUploaderBase import JamfUploaderBase # noqa: E402\n\n__all__ = [\"JamfExtensionAttributeUploader\"]\n\n\nclass JamfExtensionAttributeUploader(JamfUploaderBase):\n description = (\n \"A processor for AutoPkg that will upload an Extension Attribute item to a \"\n \"Jamf Cloud or on-prem server.\"\n )\n input_variables = {\n \"JSS_URL\": {\n \"required\": True,\n \"description\": \"URL to a Jamf Pro server that the API user has write access \"\n \"to, optionally set as a key in the com.github.autopkg \"\n \"preference file.\",\n },\n \"API_USERNAME\": {\n \"required\": True,\n \"description\": \"Username of account with appropriate access to \"\n \"jss, optionally set as a key in the com.github.autopkg \"\n \"preference file.\",\n },\n \"API_PASSWORD\": {\n \"required\": True,\n \"description\": \"Password of api user, optionally set as a key in \"\n \"the com.github.autopkg preference file.\",\n },\n \"ea_name\": {\n \"required\": False,\n \"description\": \"Extension Attribute name\",\n \"default\": \"\",\n },\n \"ea_script_path\": {\n \"required\": False,\n \"description\": \"Full path to the script to be uploaded\",\n },\n \"replace_ea\": {\n \"required\": False,\n \"description\": \"Overwrite an existing category if True.\",\n \"default\": False,\n },\n \"ea_inventory_display\": {\n \"required\": False,\n \"description\": \"Inventory Display value for the EA.\",\n \"default\": \"Extension Attributes\",\n },\n \"ea_data_type\": {\n \"required\": False,\n \"description\": \"Data type for the EA. One of String, Integer or Date.\",\n \"default\": \"String\",\n },\n \"sleep\": {\n \"required\": False,\n \"description\": \"Pause after running this processor for specified seconds.\",\n \"default\": \"0\",\n },\n }\n\n output_variables = {\n \"jamfextensionattributeuploader_summary_result\": {\n \"description\": \"Description of interesting results.\",\n },\n }\n\n def upload_ea(\n self,\n jamf_url,\n ea_name,\n ea_data_type,\n ea_inventory_display,\n script_path,\n obj_id=None,\n enc_creds=\"\",\n token=\"\",\n ):\n \"\"\"Update extension attribute metadata.\"\"\"\n # import script from file and replace any keys in the script\n if os.path.exists(script_path):\n with open(script_path, \"r\") as file:\n script_contents = file.read()\n else:\n raise ProcessorError(\"Script does not exist!\")\n\n # substitute user-assignable keys\n script_contents = self.substitute_assignable_keys(script_contents)\n\n # XML-escape the script\n script_contents_escaped = escape(script_contents)\n\n # build the object\n ea_data = (\n \"<computer_extension_attribute>\"\n + \"<name>{}</name>\".format(ea_name)\n + \"<enabled>true</enabled>\"\n + \"<description/>\"\n + \"<data_type>{}</data_type>\".format(ea_data_type)\n + \"<input_type>\"\n + \" <type>script</type>\"\n + \" <platform>Mac</platform>\"\n + \" <script>{}</script>\".format(script_contents_escaped)\n + \"</input_type>\"\n + \"<inventory_display>{}</inventory_display>\".format(ea_inventory_display)\n + \"<recon_display>Extension Attributes</recon_display>\"\n + \"</computer_extension_attribute>\"\n )\n self.output(\n \"Extension Attribute data:\",\n verbose_level=2,\n )\n self.output(\n ea_data,\n verbose_level=2,\n )\n\n self.output(\"Uploading Extension Attribute..\")\n # write the template to temp file\n template_xml = self.write_temp_file(ea_data)\n\n # if we find an object ID we put, if not, we post\n object_type = \"extension_attribute\"\n url = \"{}/{}/id/{}\".format(jamf_url, self.api_endpoints(object_type), obj_id)\n\n count = 0\n while True:\n count += 1\n self.output(\n \"Extension Attribute upload attempt {}\".format(count),\n verbose_level=2,\n )\n request = \"PUT\" if obj_id else \"POST\"\n r = self.curl(\n request=request,\n url=url,\n enc_creds=enc_creds,\n token=token,\n data=template_xml,\n )\n\n # check HTTP response\n if self.status_check(r, \"Extension Attribute\", ea_name, request) == \"break\":\n break\n if count > 5:\n self.output(\n \"ERROR: Extension Attribute upload did not succeed after 5 attempts\"\n )\n self.output(\"\\nHTTP POST Response Code: {}\".format(r.status_code))\n raise ProcessorError(\"ERROR: Extension Attribute upload failed \")\n if int(self.sleep) > 30:\n sleep(int(self.sleep))\n else:\n sleep(30)\n\n def main(self):\n \"\"\"Do the main thing here\"\"\"\n self.jamf_url = self.env.get(\"JSS_URL\")\n self.jamf_user = self.env.get(\"API_USERNAME\")\n self.jamf_password = self.env.get(\"API_PASSWORD\")\n self.ea_script_path = self.env.get(\"ea_script_path\")\n self.ea_name = self.env.get(\"ea_name\")\n self.replace = self.env.get(\"replace_ea\")\n self.ea_data_type = self.env.get(\"ea_data_type\")\n self.ea_inventory_display = self.env.get(\"ea_inventory_display\")\n self.sleep = self.env.get(\"sleep\")\n # handle setting replace in overrides\n if not self.replace or self.replace == \"False\":\n self.replace = False\n\n # clear any pre-existing summary result\n if \"jamfextensionattributeuploader_summary_result\" in self.env:\n del self.env[\"jamfextensionattributeuploader_summary_result\"]\n ea_uploaded = False\n\n # handle files with a relative path\n if not self.ea_script_path.startswith(\"/\"):\n found_template = self.get_path_to_file(self.ea_script_path)\n if found_template:\n self.ea_script_path = found_template\n else:\n raise ProcessorError(f\"ERROR: EA file {self.ea_script_path} not found\")\n\n # now start the process of uploading the object\n self.output(f\"Checking for existing '{self.ea_name}' on {self.jamf_url}\")\n\n # obtain the relevant credentials\n token, send_creds, _ = self.handle_classic_auth(\n self.jamf_url, self.jamf_user, self.jamf_password\n )\n\n # check for existing - requires obj_name\n obj_type = \"extension_attribute\"\n obj_name = self.ea_name\n obj_id = self.get_api_obj_id_from_name(\n self.jamf_url,\n obj_name,\n obj_type,\n enc_creds=send_creds,\n token=token,\n )\n\n if obj_id:\n self.output(\n \"Extension Attribute '{}' already exists: ID {}\".format(\n self.ea_name, obj_id\n )\n )\n if self.replace:\n self.output(\n \"Replacing existing Extension Attribute as 'replace_ea' is set to {}\".format(\n self.replace\n ),\n verbose_level=1,\n )\n else:\n self.output(\n \"Not replacing existing Extension Attribute. Use replace_ea='True' to enforce.\",\n verbose_level=1,\n )\n return\n\n # upload the EA\n self.upload_ea(\n self.jamf_url,\n self.ea_name,\n self.ea_data_type,\n self.ea_inventory_display,\n self.ea_script_path,\n obj_id=obj_id,\n enc_creds=send_creds,\n token=token,\n )\n ea_uploaded = True\n\n # output the summary\n self.env[\"extension_attribute\"] = self.ea_name\n self.env[\"ea_uploaded\"] = ea_uploaded\n if ea_uploaded:\n self.env[\"jamfextensionattributeuploader_summary_result\"] = {\n \"summary_text\": (\n \"The following extension attributes were created or \"\n \"updated in Jamf Pro:\"\n ),\n \"report_fields\": [\"name\", \"path\"],\n \"data\": {\"name\": self.ea_name, \"path\": self.ea_script_path},\n }\n\n\nif __name__ == \"__main__\":\n PROCESSOR = JamfExtensionAttributeUploader()\n PROCESSOR.execute_shell()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Generated by Django 2.2.6 on 2019-10-10 07:02
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='cronjob',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titel', models.CharField(max_length=255)),
('adresse', models.URLField(max_length=255)),
('authentifizierung_checked', models.BooleanField(default=False)),
('benutzername', models.CharField(max_length=255)),
('passwort', models.CharField(max_length=255)),
('ausführen', models.DateTimeField(default=datetime.datetime(2019, 10, 10, 9, 2, 22, 105756))),
('benachrichtigung_fehlschlag', models.BooleanField(default=False)),
('benachrichtigung_erfolg', models.BooleanField(default=False)),
('benachrichtigung_deaktivierung', models.BooleanField(default=False)),
('antwort_speichern', models.BooleanField(default=False)),
],
),
]
|
normal
|
{
"blob_id": "af523777e32c44112bd37a4b9dcbc0941f7e8236",
"index": 4242,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='cronjob', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('titel', models.CharField(max_length=\n 255)), ('adresse', models.URLField(max_length=255)), (\n 'authentifizierung_checked', models.BooleanField(default=False)), (\n 'benutzername', models.CharField(max_length=255)), ('passwort',\n models.CharField(max_length=255)), ('ausführen', models.\n DateTimeField(default=datetime.datetime(2019, 10, 10, 9, 2, 22, \n 105756))), ('benachrichtigung_fehlschlag', models.BooleanField(\n default=False)), ('benachrichtigung_erfolg', models.BooleanField(\n default=False)), ('benachrichtigung_deaktivierung', models.\n BooleanField(default=False)), ('antwort_speichern', models.\n BooleanField(default=False))])]\n",
"step-4": "import datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='cronjob', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('titel', models.CharField(max_length=\n 255)), ('adresse', models.URLField(max_length=255)), (\n 'authentifizierung_checked', models.BooleanField(default=False)), (\n 'benutzername', models.CharField(max_length=255)), ('passwort',\n models.CharField(max_length=255)), ('ausführen', models.\n DateTimeField(default=datetime.datetime(2019, 10, 10, 9, 2, 22, \n 105756))), ('benachrichtigung_fehlschlag', models.BooleanField(\n default=False)), ('benachrichtigung_erfolg', models.BooleanField(\n default=False)), ('benachrichtigung_deaktivierung', models.\n BooleanField(default=False)), ('antwort_speichern', models.\n BooleanField(default=False))])]\n",
"step-5": "# Generated by Django 2.2.6 on 2019-10-10 07:02\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='cronjob',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('titel', models.CharField(max_length=255)),\n ('adresse', models.URLField(max_length=255)),\n ('authentifizierung_checked', models.BooleanField(default=False)),\n ('benutzername', models.CharField(max_length=255)),\n ('passwort', models.CharField(max_length=255)),\n ('ausführen', models.DateTimeField(default=datetime.datetime(2019, 10, 10, 9, 2, 22, 105756))),\n ('benachrichtigung_fehlschlag', models.BooleanField(default=False)),\n ('benachrichtigung_erfolg', models.BooleanField(default=False)),\n ('benachrichtigung_deaktivierung', models.BooleanField(default=False)),\n ('antwort_speichern', models.BooleanField(default=False)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
from io import BytesIO
import telegram
from flask import Flask, request, send_file
from fsm import TocMachine
API_TOKEN = '375541027:AAFvLkySNkMSGgOl7PtsPIsJgnxophQpllQ'
WEBHOOK_URL = 'https://a140f4ad.ngrok.io/show-fsm'
app = Flask(__name__)
bot = telegram.Bot(token=API_TOKEN)
machine = TocMachine(
states=[
'user',
'state3',
'state4',
'state5',
'state6',
'state7',
'state8',
'state9',
'state10',
'state11',
'state12',
'state13',
'state14',
'state15'
],
transitions=[
{
'trigger': 'advance',
'source': 'user',
'dest': 'state3',
'conditions': 'is_going_from_state0_to_state3'
},
{
'trigger': 'advance',
'source': 'state3',
'dest': 'state4',
'conditions': 'is_going_from_state3_to_state4'
},
{
'trigger': 'advance',
'source': 'state4',
'dest': 'state5',
'conditions': 'is_going_from_state4_to_state5'
},
{
'trigger': 'advance',
'source': 'state5',
'dest': 'state6',
'conditions': 'is_going_from_state5_to_state6'
},
{
'trigger': 'advance',
'source': 'state5',
'dest': 'state7',
'conditions': 'is_going_from_state5_to_state7'
},
{
'trigger': 'advance',
'source': 'state4',
'dest': 'state8',
'conditions': 'is_going_from_state4_to_state8'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state9',
'conditions': 'is_going_from_state8_to_state9'
},
{
'trigger': 'advance',
'source': 'state6',
'dest': 'state8',
'conditions': 'is_going_from_state6_to_state8'
},
{
'trigger': 'advance',
'source': 'state7',
'dest': 'state8',
'conditions': 'is_going_from_state7_to_state8'
},
{
'trigger': 'advance',
'source': 'state9',
'dest': 'state5',
'conditions': 'is_going_from_state9_to_state5'
},
{
'trigger': 'advance',
'source': 'state9',
'dest': 'state10',
'conditions': 'is_going_from_state9_to_state10'
},
{
'trigger': 'advance',
'source': 'state6',
'dest': 'state10',
'conditions': 'is_going_from_state6_to_state10'
},
{
'trigger': 'advance',
'source': 'state7',
'dest': 'state10',
'conditions': 'is_going_from_state7_to_state10'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state11',
'conditions': 'is_going_from_state8_to_state11'
},
{
'trigger': 'advance',
'source': 'state11',
'dest': 'state10',
'conditions': 'is_going_from_state11_to_state10'
},
{
'trigger': 'advance',
'source': 'state11',
'dest': 'state5',
'conditions': 'is_going_from_state11_to_state5'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state12',
'conditions': 'is_going_from_state8_to_state12'
},
{
'trigger': 'advance',
'source': 'state12',
'dest': 'state10',
'conditions': 'is_going_from_state12_to_state10'
},
{
'trigger': 'advance',
'source': 'state12',
'dest': 'state5',
'conditions': 'is_going_from_state12_to_state5'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state13',
'conditions': 'is_going_from_state8_to_state13'
},
{
'trigger': 'advance',
'source': 'state13',
'dest': 'state10',
'conditions': 'is_going_from_state13_to_state10'
},
{
'trigger': 'advance',
'source': 'state13',
'dest': 'state5',
'conditions': 'is_going_from_state13_to_state5'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state14',
'conditions': 'is_going_from_state8_to_state14'
},
{
'trigger': 'advance',
'source': 'state14',
'dest': 'state10',
'conditions': 'is_going_from_state14_to_state10'
},
{
'trigger': 'advance',
'source': 'state14',
'dest': 'state5',
'conditions': 'is_going_from_state14_to_state5'
},
{
'trigger': 'advance',
'source': 'state8',
'dest': 'state15',
'conditions': 'is_going_from_state8_to_state15'
},
{
'trigger': 'advance',
'source': 'state15',
'dest': 'state10',
'conditions': 'is_going_from_state15_to_state10'
},
{
'trigger': 'advance',
'source': 'state15',
'dest': 'state5',
'conditions': 'is_going_from_state15_to_state5'
},
{
'trigger': 'go_back',
'source': [
'state10'
],
'dest': 'user'
}
],
initial='user',
auto_transitions=False,
show_conditions=True,
)
def _set_webhook():
status = bot.set_webhook(WEBHOOK_URL)
if not status:
print('Webhook setup failed')
sys.exit(1)
else:
print('Your webhook URL has been set to "{}"'.format(WEBHOOK_URL))
@app.route('/hook', methods=['POST'])
def webhook_handler():
update = telegram.Update.de_json(request.get_json(force=True), bot)
machine.advance(update)
return 'ok'
@app.route('/show-fsm', methods=['GET'])
def show_fsm():
byte_io = BytesIO()
machine.graph.draw(byte_io, prog='dot', format='png')
byte_io.seek(0)
return send_file(byte_io, attachment_filename='fsm.png', mimetype='image/png')
if __name__ == "__main__":
_set_webhook()
app.run()
|
normal
|
{
"blob_id": "984efa858e782777472d84aab85471616a05b0e0",
"index": 2886,
"step-1": "<mask token>\n\n\ndef _set_webhook():\n status = bot.set_webhook(WEBHOOK_URL)\n if not status:\n print('Webhook setup failed')\n sys.exit(1)\n else:\n print('Your webhook URL has been set to \"{}\"'.format(WEBHOOK_URL))\n\n\n@app.route('/hook', methods=['POST'])\ndef webhook_handler():\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n machine.advance(update)\n return 'ok'\n\n\n@app.route('/show-fsm', methods=['GET'])\ndef show_fsm():\n byte_io = BytesIO()\n machine.graph.draw(byte_io, prog='dot', format='png')\n byte_io.seek(0)\n return send_file(byte_io, attachment_filename='fsm.png', mimetype=\n 'image/png')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _set_webhook():\n status = bot.set_webhook(WEBHOOK_URL)\n if not status:\n print('Webhook setup failed')\n sys.exit(1)\n else:\n print('Your webhook URL has been set to \"{}\"'.format(WEBHOOK_URL))\n\n\n@app.route('/hook', methods=['POST'])\ndef webhook_handler():\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n machine.advance(update)\n return 'ok'\n\n\n@app.route('/show-fsm', methods=['GET'])\ndef show_fsm():\n byte_io = BytesIO()\n machine.graph.draw(byte_io, prog='dot', format='png')\n byte_io.seek(0)\n return send_file(byte_io, attachment_filename='fsm.png', mimetype=\n 'image/png')\n\n\nif __name__ == '__main__':\n _set_webhook()\n app.run()\n",
"step-3": "<mask token>\nAPI_TOKEN = '375541027:AAFvLkySNkMSGgOl7PtsPIsJgnxophQpllQ'\nWEBHOOK_URL = 'https://a140f4ad.ngrok.io/show-fsm'\napp = Flask(__name__)\nbot = telegram.Bot(token=API_TOKEN)\nmachine = TocMachine(states=['user', 'state3', 'state4', 'state5', 'state6',\n 'state7', 'state8', 'state9', 'state10', 'state11', 'state12',\n 'state13', 'state14', 'state15'], transitions=[{'trigger': 'advance',\n 'source': 'user', 'dest': 'state3', 'conditions':\n 'is_going_from_state0_to_state3'}, {'trigger': 'advance', 'source':\n 'state3', 'dest': 'state4', 'conditions':\n 'is_going_from_state3_to_state4'}, {'trigger': 'advance', 'source':\n 'state4', 'dest': 'state5', 'conditions':\n 'is_going_from_state4_to_state5'}, {'trigger': 'advance', 'source':\n 'state5', 'dest': 'state6', 'conditions':\n 'is_going_from_state5_to_state6'}, {'trigger': 'advance', 'source':\n 'state5', 'dest': 'state7', 'conditions':\n 'is_going_from_state5_to_state7'}, {'trigger': 'advance', 'source':\n 'state4', 'dest': 'state8', 'conditions':\n 'is_going_from_state4_to_state8'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state9', 'conditions':\n 'is_going_from_state8_to_state9'}, {'trigger': 'advance', 'source':\n 'state6', 'dest': 'state8', 'conditions':\n 'is_going_from_state6_to_state8'}, {'trigger': 'advance', 'source':\n 'state7', 'dest': 'state8', 'conditions':\n 'is_going_from_state7_to_state8'}, {'trigger': 'advance', 'source':\n 'state9', 'dest': 'state5', 'conditions':\n 'is_going_from_state9_to_state5'}, {'trigger': 'advance', 'source':\n 'state9', 'dest': 'state10', 'conditions':\n 'is_going_from_state9_to_state10'}, {'trigger': 'advance', 'source':\n 'state6', 'dest': 'state10', 'conditions':\n 'is_going_from_state6_to_state10'}, {'trigger': 'advance', 'source':\n 'state7', 'dest': 'state10', 'conditions':\n 'is_going_from_state7_to_state10'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state11', 'conditions':\n 'is_going_from_state8_to_state11'}, {'trigger': 'advance', 'source':\n 'state11', 'dest': 'state10', 'conditions':\n 'is_going_from_state11_to_state10'}, {'trigger': 'advance', 'source':\n 'state11', 'dest': 'state5', 'conditions':\n 'is_going_from_state11_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state12', 'conditions':\n 'is_going_from_state8_to_state12'}, {'trigger': 'advance', 'source':\n 'state12', 'dest': 'state10', 'conditions':\n 'is_going_from_state12_to_state10'}, {'trigger': 'advance', 'source':\n 'state12', 'dest': 'state5', 'conditions':\n 'is_going_from_state12_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state13', 'conditions':\n 'is_going_from_state8_to_state13'}, {'trigger': 'advance', 'source':\n 'state13', 'dest': 'state10', 'conditions':\n 'is_going_from_state13_to_state10'}, {'trigger': 'advance', 'source':\n 'state13', 'dest': 'state5', 'conditions':\n 'is_going_from_state13_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state14', 'conditions':\n 'is_going_from_state8_to_state14'}, {'trigger': 'advance', 'source':\n 'state14', 'dest': 'state10', 'conditions':\n 'is_going_from_state14_to_state10'}, {'trigger': 'advance', 'source':\n 'state14', 'dest': 'state5', 'conditions':\n 'is_going_from_state14_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state15', 'conditions':\n 'is_going_from_state8_to_state15'}, {'trigger': 'advance', 'source':\n 'state15', 'dest': 'state10', 'conditions':\n 'is_going_from_state15_to_state10'}, {'trigger': 'advance', 'source':\n 'state15', 'dest': 'state5', 'conditions':\n 'is_going_from_state15_to_state5'}, {'trigger': 'go_back', 'source': [\n 'state10'], 'dest': 'user'}], initial='user', auto_transitions=False,\n show_conditions=True)\n\n\ndef _set_webhook():\n status = bot.set_webhook(WEBHOOK_URL)\n if not status:\n print('Webhook setup failed')\n sys.exit(1)\n else:\n print('Your webhook URL has been set to \"{}\"'.format(WEBHOOK_URL))\n\n\n@app.route('/hook', methods=['POST'])\ndef webhook_handler():\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n machine.advance(update)\n return 'ok'\n\n\n@app.route('/show-fsm', methods=['GET'])\ndef show_fsm():\n byte_io = BytesIO()\n machine.graph.draw(byte_io, prog='dot', format='png')\n byte_io.seek(0)\n return send_file(byte_io, attachment_filename='fsm.png', mimetype=\n 'image/png')\n\n\nif __name__ == '__main__':\n _set_webhook()\n app.run()\n",
"step-4": "import sys\nfrom io import BytesIO\nimport telegram\nfrom flask import Flask, request, send_file\nfrom fsm import TocMachine\nAPI_TOKEN = '375541027:AAFvLkySNkMSGgOl7PtsPIsJgnxophQpllQ'\nWEBHOOK_URL = 'https://a140f4ad.ngrok.io/show-fsm'\napp = Flask(__name__)\nbot = telegram.Bot(token=API_TOKEN)\nmachine = TocMachine(states=['user', 'state3', 'state4', 'state5', 'state6',\n 'state7', 'state8', 'state9', 'state10', 'state11', 'state12',\n 'state13', 'state14', 'state15'], transitions=[{'trigger': 'advance',\n 'source': 'user', 'dest': 'state3', 'conditions':\n 'is_going_from_state0_to_state3'}, {'trigger': 'advance', 'source':\n 'state3', 'dest': 'state4', 'conditions':\n 'is_going_from_state3_to_state4'}, {'trigger': 'advance', 'source':\n 'state4', 'dest': 'state5', 'conditions':\n 'is_going_from_state4_to_state5'}, {'trigger': 'advance', 'source':\n 'state5', 'dest': 'state6', 'conditions':\n 'is_going_from_state5_to_state6'}, {'trigger': 'advance', 'source':\n 'state5', 'dest': 'state7', 'conditions':\n 'is_going_from_state5_to_state7'}, {'trigger': 'advance', 'source':\n 'state4', 'dest': 'state8', 'conditions':\n 'is_going_from_state4_to_state8'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state9', 'conditions':\n 'is_going_from_state8_to_state9'}, {'trigger': 'advance', 'source':\n 'state6', 'dest': 'state8', 'conditions':\n 'is_going_from_state6_to_state8'}, {'trigger': 'advance', 'source':\n 'state7', 'dest': 'state8', 'conditions':\n 'is_going_from_state7_to_state8'}, {'trigger': 'advance', 'source':\n 'state9', 'dest': 'state5', 'conditions':\n 'is_going_from_state9_to_state5'}, {'trigger': 'advance', 'source':\n 'state9', 'dest': 'state10', 'conditions':\n 'is_going_from_state9_to_state10'}, {'trigger': 'advance', 'source':\n 'state6', 'dest': 'state10', 'conditions':\n 'is_going_from_state6_to_state10'}, {'trigger': 'advance', 'source':\n 'state7', 'dest': 'state10', 'conditions':\n 'is_going_from_state7_to_state10'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state11', 'conditions':\n 'is_going_from_state8_to_state11'}, {'trigger': 'advance', 'source':\n 'state11', 'dest': 'state10', 'conditions':\n 'is_going_from_state11_to_state10'}, {'trigger': 'advance', 'source':\n 'state11', 'dest': 'state5', 'conditions':\n 'is_going_from_state11_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state12', 'conditions':\n 'is_going_from_state8_to_state12'}, {'trigger': 'advance', 'source':\n 'state12', 'dest': 'state10', 'conditions':\n 'is_going_from_state12_to_state10'}, {'trigger': 'advance', 'source':\n 'state12', 'dest': 'state5', 'conditions':\n 'is_going_from_state12_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state13', 'conditions':\n 'is_going_from_state8_to_state13'}, {'trigger': 'advance', 'source':\n 'state13', 'dest': 'state10', 'conditions':\n 'is_going_from_state13_to_state10'}, {'trigger': 'advance', 'source':\n 'state13', 'dest': 'state5', 'conditions':\n 'is_going_from_state13_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state14', 'conditions':\n 'is_going_from_state8_to_state14'}, {'trigger': 'advance', 'source':\n 'state14', 'dest': 'state10', 'conditions':\n 'is_going_from_state14_to_state10'}, {'trigger': 'advance', 'source':\n 'state14', 'dest': 'state5', 'conditions':\n 'is_going_from_state14_to_state5'}, {'trigger': 'advance', 'source':\n 'state8', 'dest': 'state15', 'conditions':\n 'is_going_from_state8_to_state15'}, {'trigger': 'advance', 'source':\n 'state15', 'dest': 'state10', 'conditions':\n 'is_going_from_state15_to_state10'}, {'trigger': 'advance', 'source':\n 'state15', 'dest': 'state5', 'conditions':\n 'is_going_from_state15_to_state5'}, {'trigger': 'go_back', 'source': [\n 'state10'], 'dest': 'user'}], initial='user', auto_transitions=False,\n show_conditions=True)\n\n\ndef _set_webhook():\n status = bot.set_webhook(WEBHOOK_URL)\n if not status:\n print('Webhook setup failed')\n sys.exit(1)\n else:\n print('Your webhook URL has been set to \"{}\"'.format(WEBHOOK_URL))\n\n\n@app.route('/hook', methods=['POST'])\ndef webhook_handler():\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n machine.advance(update)\n return 'ok'\n\n\n@app.route('/show-fsm', methods=['GET'])\ndef show_fsm():\n byte_io = BytesIO()\n machine.graph.draw(byte_io, prog='dot', format='png')\n byte_io.seek(0)\n return send_file(byte_io, attachment_filename='fsm.png', mimetype=\n 'image/png')\n\n\nif __name__ == '__main__':\n _set_webhook()\n app.run()\n",
"step-5": "import sys\nfrom io import BytesIO\n\nimport telegram\nfrom flask import Flask, request, send_file\n\nfrom fsm import TocMachine\n\n\nAPI_TOKEN = '375541027:AAFvLkySNkMSGgOl7PtsPIsJgnxophQpllQ'\nWEBHOOK_URL = 'https://a140f4ad.ngrok.io/show-fsm'\n\napp = Flask(__name__)\nbot = telegram.Bot(token=API_TOKEN)\nmachine = TocMachine(\n states=[\n 'user',\n 'state3',\n 'state4',\n 'state5',\n 'state6',\n 'state7',\n 'state8',\n 'state9',\n 'state10',\n 'state11',\n 'state12',\n 'state13',\n 'state14',\n 'state15'\n ],\n transitions=[\n {\n 'trigger': 'advance',\n 'source': 'user',\n 'dest': 'state3',\n 'conditions': 'is_going_from_state0_to_state3'\n },\n {\n 'trigger': 'advance',\n 'source': 'state3',\n 'dest': 'state4',\n 'conditions': 'is_going_from_state3_to_state4'\n },\n {\n 'trigger': 'advance',\n 'source': 'state4',\n 'dest': 'state5',\n 'conditions': 'is_going_from_state4_to_state5'\n },\n {\n 'trigger': 'advance',\n 'source': 'state5',\n 'dest': 'state6',\n 'conditions': 'is_going_from_state5_to_state6'\n },\n {\n 'trigger': 'advance',\n 'source': 'state5',\n 'dest': 'state7',\n 'conditions': 'is_going_from_state5_to_state7'\n },\n {\n 'trigger': 'advance',\n 'source': 'state4',\n 'dest': 'state8',\n 'conditions': 'is_going_from_state4_to_state8'\n },\n {\n 'trigger': 'advance',\n 'source': 'state8',\n 'dest': 'state9',\n 'conditions': 'is_going_from_state8_to_state9'\n },\n {\n 'trigger': 'advance',\n 'source': 'state6',\n 'dest': 'state8',\n 'conditions': 'is_going_from_state6_to_state8'\n },\n {\n 'trigger': 'advance',\n 'source': 'state7',\n 'dest': 'state8',\n 'conditions': 'is_going_from_state7_to_state8'\n },\n {\n 'trigger': 'advance',\n 'source': 'state9',\n 'dest': 'state5',\n 'conditions': 'is_going_from_state9_to_state5'\n },\n {\n 'trigger': 'advance',\n 'source': 'state9',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state9_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state6',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state6_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state7',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state7_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state8',\n 'dest': 'state11',\n 'conditions': 'is_going_from_state8_to_state11'\n },\n {\n 'trigger': 'advance',\n 'source': 'state11',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state11_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state11',\n 'dest': 'state5',\n 'conditions': 'is_going_from_state11_to_state5'\n },\n {\n 'trigger': 'advance',\n 'source': 'state8',\n 'dest': 'state12',\n 'conditions': 'is_going_from_state8_to_state12'\n },\n {\n 'trigger': 'advance',\n 'source': 'state12',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state12_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state12',\n 'dest': 'state5',\n 'conditions': 'is_going_from_state12_to_state5'\n },\n {\n 'trigger': 'advance',\n 'source': 'state8',\n 'dest': 'state13',\n 'conditions': 'is_going_from_state8_to_state13'\n },\n {\n 'trigger': 'advance',\n 'source': 'state13',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state13_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state13',\n 'dest': 'state5',\n 'conditions': 'is_going_from_state13_to_state5'\n },\n {\n 'trigger': 'advance',\n 'source': 'state8',\n 'dest': 'state14',\n 'conditions': 'is_going_from_state8_to_state14'\n },\n {\n 'trigger': 'advance',\n 'source': 'state14',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state14_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state14',\n 'dest': 'state5',\n 'conditions': 'is_going_from_state14_to_state5'\n },\n {\n 'trigger': 'advance',\n 'source': 'state8',\n 'dest': 'state15',\n 'conditions': 'is_going_from_state8_to_state15'\n },\n {\n 'trigger': 'advance',\n 'source': 'state15',\n 'dest': 'state10',\n 'conditions': 'is_going_from_state15_to_state10'\n },\n {\n 'trigger': 'advance',\n 'source': 'state15',\n 'dest': 'state5',\n 'conditions': 'is_going_from_state15_to_state5'\n },\n {\n 'trigger': 'go_back',\n 'source': [\n 'state10'\n ],\n 'dest': 'user'\n }\n ],\n initial='user',\n auto_transitions=False,\n show_conditions=True,\n)\n\n\ndef _set_webhook():\n status = bot.set_webhook(WEBHOOK_URL)\n if not status:\n print('Webhook setup failed')\n sys.exit(1)\n else:\n print('Your webhook URL has been set to \"{}\"'.format(WEBHOOK_URL))\n\n\n@app.route('/hook', methods=['POST'])\ndef webhook_handler():\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n machine.advance(update)\n return 'ok'\n\n\n@app.route('/show-fsm', methods=['GET'])\ndef show_fsm():\n byte_io = BytesIO()\n machine.graph.draw(byte_io, prog='dot', format='png')\n byte_io.seek(0)\n return send_file(byte_io, attachment_filename='fsm.png', mimetype='image/png')\n\n\nif __name__ == \"__main__\":\n _set_webhook()\n app.run()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from django.test import TestCase, Client
from accounts.models import Account
from .data import account
from rest_framework import status
class TestAccountRequests(TestCase):
def setUp(self):
self.client = Client()
self.superuser = Account.objects.create_superuser(**account)
def test_register_admin(self):
response = self.client.post(f'/account/register/', data=account,
content_type='application/json')
self.assertTrue(status.HTTP_200_OK, response.status_code)
def test_login(self):
data = {
'email': 'office@theoscoding.com',
'password': 'Pwd1q2w3e',
}
Account.objects.create(**data)
response = self.client.post(f'/account/login/', data=data,
content_type='application/json')
self.assertTrue(status.HTTP_200_OK, response.status_code)
|
normal
|
{
"blob_id": "3d43bf0d0ca1df06b3647a33f88cee067eeff9f4",
"index": 2605,
"step-1": "<mask token>\n\n\nclass TestAccountRequests(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestAccountRequests(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n\n def test_register_admin(self):\n response = self.client.post(f'/account/register/', data=account,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestAccountRequests(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n\n def test_register_admin(self):\n response = self.client.post(f'/account/register/', data=account,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n\n def test_login(self):\n data = {'email': 'office@theoscoding.com', 'password': 'Pwd1q2w3e'}\n Account.objects.create(**data)\n response = self.client.post(f'/account/login/', data=data,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n",
"step-4": "from django.test import TestCase, Client\nfrom accounts.models import Account\nfrom .data import account\nfrom rest_framework import status\n\n\nclass TestAccountRequests(TestCase):\n\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n\n def test_register_admin(self):\n response = self.client.post(f'/account/register/', data=account,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n\n def test_login(self):\n data = {'email': 'office@theoscoding.com', 'password': 'Pwd1q2w3e'}\n Account.objects.create(**data)\n response = self.client.post(f'/account/login/', data=data,\n content_type='application/json')\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n",
"step-5": "from django.test import TestCase, Client\n\nfrom accounts.models import Account\nfrom .data import account\nfrom rest_framework import status\n\n\nclass TestAccountRequests(TestCase):\n def setUp(self):\n self.client = Client()\n self.superuser = Account.objects.create_superuser(**account)\n\n def test_register_admin(self):\n response = self.client.post(f'/account/register/', data=account,\n content_type='application/json')\n\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n\n def test_login(self):\n data = {\n 'email': 'office@theoscoding.com',\n 'password': 'Pwd1q2w3e',\n }\n Account.objects.create(**data)\n response = self.client.post(f'/account/login/', data=data,\n content_type='application/json')\n\n self.assertTrue(status.HTTP_200_OK, response.status_code)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
user = settings.AUTH_USER_MODEL
commment_lenght = settings.COMMENT_LENGTH
# Entity Comment
class Comment(TimeStampedModel):
"""
Text comment posted by users
"""
# User - Foreign key
user = models.ForeignKey(user, blank=False, null=False, related_name='comment_user')
# Parent comment (optional) - i.e. a comment of a comment
starting_comment = models.ForeignKey('Comment', blank=True, null=True, related_name='parent_comment')
# Text content of a comment
content = models.TextField(_('comment text'), max_length=commment_lenght, blank=False, null=False)
class Meta:
verbose_name = _('comment')
verbose_name_plural = _('comments')
def __unicode__(self):
return self.content
def get_content(self):
"Returns the text content for the comment"
return self.content
def get_user_id(self):
"Returns the id of the user who posted the comment"
return self.comment_user.pk
def get_date(self):
"Returns the timestamp associated to the comment"
return self.created
def get_parent_comment_id(self):
"Returns the id of the parent comment"
return self.parent_comment.pk
def set_parent_comment(parent_comment):
self.starting_comment = parent_comment
# Entity Cigarette
class Cigarette(models.Model):
"""
Cigarette smoked by a user
"""
# User - Foreign key
user = models.ForeignKey(user, blank=False, null=False, related_name='user_cigarettes')
# Date and time associated to the cigarette
cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)
cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)
class Meta:
verbose_name = _('cigarette')
verbose_name_plural = _('cigarettes')
def __unicode__(self):
return u'%s' % ( self.pk)
def get_cigarette_user_id(self):
"Returns the user id who smoked the cigarette"
return self.cigarette_user.pk
def get_date(self):
"Returns the date associated to the cigarette"
return self.cigarette_date
def get_time(self):
"Returns the time associated to the cigarette"
return self.cigarette_time
|
normal
|
{
"blob_id": "68ea462f56ba029a7c977d9c8b94e6f913336fb7",
"index": 4680,
"step-1": "<mask token>\n\n\nclass Cigarette(models.Model):\n <mask token>\n user = models.ForeignKey(user, blank=False, null=False, related_name=\n 'user_cigarettes')\n cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)\n cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)\n\n\n class Meta:\n verbose_name = _('cigarette')\n verbose_name_plural = _('cigarettes')\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n def get_cigarette_user_id(self):\n \"\"\"Returns the user id who smoked the cigarette\"\"\"\n return self.cigarette_user.pk\n\n def get_date(self):\n \"\"\"Returns the date associated to the cigarette\"\"\"\n return self.cigarette_date\n\n def get_time(self):\n \"\"\"Returns the time associated to the cigarette\"\"\"\n return self.cigarette_time\n",
"step-2": "<mask token>\n\n\nclass Comment(TimeStampedModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = _('comment')\n verbose_name_plural = _('comments')\n\n def __unicode__(self):\n return self.content\n <mask token>\n\n def get_user_id(self):\n \"\"\"Returns the id of the user who posted the comment\"\"\"\n return self.comment_user.pk\n <mask token>\n\n def get_parent_comment_id(self):\n \"\"\"Returns the id of the parent comment\"\"\"\n return self.parent_comment.pk\n\n def set_parent_comment(parent_comment):\n self.starting_comment = parent_comment\n\n\nclass Cigarette(models.Model):\n \"\"\"\n Cigarette smoked by a user\n \"\"\"\n user = models.ForeignKey(user, blank=False, null=False, related_name=\n 'user_cigarettes')\n cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)\n cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)\n\n\n class Meta:\n verbose_name = _('cigarette')\n verbose_name_plural = _('cigarettes')\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n def get_cigarette_user_id(self):\n \"\"\"Returns the user id who smoked the cigarette\"\"\"\n return self.cigarette_user.pk\n\n def get_date(self):\n \"\"\"Returns the date associated to the cigarette\"\"\"\n return self.cigarette_date\n\n def get_time(self):\n \"\"\"Returns the time associated to the cigarette\"\"\"\n return self.cigarette_time\n",
"step-3": "<mask token>\n\n\nclass Comment(TimeStampedModel):\n \"\"\"\n Text comment posted by users\n \"\"\"\n user = models.ForeignKey(user, blank=False, null=False, related_name=\n 'comment_user')\n starting_comment = models.ForeignKey('Comment', blank=True, null=True,\n related_name='parent_comment')\n content = models.TextField(_('comment text'), max_length=\n commment_lenght, blank=False, null=False)\n\n\n class Meta:\n verbose_name = _('comment')\n verbose_name_plural = _('comments')\n\n def __unicode__(self):\n return self.content\n\n def get_content(self):\n \"\"\"Returns the text content for the comment\"\"\"\n return self.content\n\n def get_user_id(self):\n \"\"\"Returns the id of the user who posted the comment\"\"\"\n return self.comment_user.pk\n\n def get_date(self):\n \"\"\"Returns the timestamp associated to the comment\"\"\"\n return self.created\n\n def get_parent_comment_id(self):\n \"\"\"Returns the id of the parent comment\"\"\"\n return self.parent_comment.pk\n\n def set_parent_comment(parent_comment):\n self.starting_comment = parent_comment\n\n\nclass Cigarette(models.Model):\n \"\"\"\n Cigarette smoked by a user\n \"\"\"\n user = models.ForeignKey(user, blank=False, null=False, related_name=\n 'user_cigarettes')\n cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)\n cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)\n\n\n class Meta:\n verbose_name = _('cigarette')\n verbose_name_plural = _('cigarettes')\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n def get_cigarette_user_id(self):\n \"\"\"Returns the user id who smoked the cigarette\"\"\"\n return self.cigarette_user.pk\n\n def get_date(self):\n \"\"\"Returns the date associated to the cigarette\"\"\"\n return self.cigarette_date\n\n def get_time(self):\n \"\"\"Returns the time associated to the cigarette\"\"\"\n return self.cigarette_time\n",
"step-4": "<mask token>\nuser = settings.AUTH_USER_MODEL\ncommment_lenght = settings.COMMENT_LENGTH\n\n\nclass Comment(TimeStampedModel):\n \"\"\"\n Text comment posted by users\n \"\"\"\n user = models.ForeignKey(user, blank=False, null=False, related_name=\n 'comment_user')\n starting_comment = models.ForeignKey('Comment', blank=True, null=True,\n related_name='parent_comment')\n content = models.TextField(_('comment text'), max_length=\n commment_lenght, blank=False, null=False)\n\n\n class Meta:\n verbose_name = _('comment')\n verbose_name_plural = _('comments')\n\n def __unicode__(self):\n return self.content\n\n def get_content(self):\n \"\"\"Returns the text content for the comment\"\"\"\n return self.content\n\n def get_user_id(self):\n \"\"\"Returns the id of the user who posted the comment\"\"\"\n return self.comment_user.pk\n\n def get_date(self):\n \"\"\"Returns the timestamp associated to the comment\"\"\"\n return self.created\n\n def get_parent_comment_id(self):\n \"\"\"Returns the id of the parent comment\"\"\"\n return self.parent_comment.pk\n\n def set_parent_comment(parent_comment):\n self.starting_comment = parent_comment\n\n\nclass Cigarette(models.Model):\n \"\"\"\n Cigarette smoked by a user\n \"\"\"\n user = models.ForeignKey(user, blank=False, null=False, related_name=\n 'user_cigarettes')\n cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)\n cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)\n\n\n class Meta:\n verbose_name = _('cigarette')\n verbose_name_plural = _('cigarettes')\n\n def __unicode__(self):\n return u'%s' % self.pk\n\n def get_cigarette_user_id(self):\n \"\"\"Returns the user id who smoked the cigarette\"\"\"\n return self.cigarette_user.pk\n\n def get_date(self):\n \"\"\"Returns the date associated to the cigarette\"\"\"\n return self.cigarette_date\n\n def get_time(self):\n \"\"\"Returns the time associated to the cigarette\"\"\"\n return self.cigarette_time\n",
"step-5": "from django.db import models\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nfrom model_utils.models import TimeStampedModel\n\nuser = settings.AUTH_USER_MODEL\ncommment_lenght = settings.COMMENT_LENGTH\n\n\n# Entity Comment\nclass Comment(TimeStampedModel):\n \"\"\"\n Text comment posted by users\n \"\"\"\n\n # User - Foreign key\n user = models.ForeignKey(user, blank=False, null=False, related_name='comment_user')\n # Parent comment (optional) - i.e. a comment of a comment\n starting_comment = models.ForeignKey('Comment', blank=True, null=True, related_name='parent_comment')\n # Text content of a comment\n content = models.TextField(_('comment text'), max_length=commment_lenght, blank=False, null=False)\n\n class Meta:\n verbose_name = _('comment')\n verbose_name_plural = _('comments')\n\n def __unicode__(self):\n return self.content\n\n def get_content(self):\n \"Returns the text content for the comment\"\n return self.content\n\n def get_user_id(self):\n \"Returns the id of the user who posted the comment\"\n return self.comment_user.pk\n\n def get_date(self):\n \"Returns the timestamp associated to the comment\"\n return self.created\n\n def get_parent_comment_id(self):\n \"Returns the id of the parent comment\"\n return self.parent_comment.pk\n\n\n def set_parent_comment(parent_comment):\n self.starting_comment = parent_comment\n\n\n# Entity Cigarette\nclass Cigarette(models.Model):\n \"\"\"\n Cigarette smoked by a user\n \"\"\"\n\n # User - Foreign key\n user = models.ForeignKey(user, blank=False, null=False, related_name='user_cigarettes')\n # Date and time associated to the cigarette\n cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)\n cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)\n\n class Meta:\n verbose_name = _('cigarette')\n verbose_name_plural = _('cigarettes')\n\n def __unicode__(self):\n return u'%s' % ( self.pk)\n\n\n def get_cigarette_user_id(self):\n \"Returns the user id who smoked the cigarette\"\n return self.cigarette_user.pk\n\n def get_date(self):\n \"Returns the date associated to the cigarette\"\n return self.cigarette_date\n\n def get_time(self):\n \"Returns the time associated to the cigarette\"\n return self.cigarette_time\n\n\n",
"step-ids": [
6,
12,
16,
17,
19
]
}
|
[
6,
12,
16,
17,
19
] |
# 15650번 수열 2번째
n, m = list(map(int, input().split()))
arr = [i for i in range(1,n+1)]
check = []
def seq(ctn, array, l):
if sorted(check) in array:
return
# if ctn == m:
# # l+=1
# # print('ctn :',ctn,' check :',sorted(check))
# array.append(sorted(check))
# for k in range(m):
# print(check[k], end = ' ')
# print()
# return
for i in range(n):
l += 1
check.append(arr[i])
seq(ctn+1, array, l)
check.pop()
print('l :',l,' i :',i)
seq(0,[], 1)
|
normal
|
{
"blob_id": "dc5d56d65417dd8061a018a2f07132b03e2d616e",
"index": 5127,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef seq(ctn, array, l):\n if sorted(check) in array:\n return\n for i in range(n):\n l += 1\n check.append(arr[i])\n seq(ctn + 1, array, l)\n check.pop()\n print('l :', l, ' i :', i)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef seq(ctn, array, l):\n if sorted(check) in array:\n return\n for i in range(n):\n l += 1\n check.append(arr[i])\n seq(ctn + 1, array, l)\n check.pop()\n print('l :', l, ' i :', i)\n\n\nseq(0, [], 1)\n",
"step-4": "n, m = list(map(int, input().split()))\narr = [i for i in range(1, n + 1)]\ncheck = []\n\n\ndef seq(ctn, array, l):\n if sorted(check) in array:\n return\n for i in range(n):\n l += 1\n check.append(arr[i])\n seq(ctn + 1, array, l)\n check.pop()\n print('l :', l, ' i :', i)\n\n\nseq(0, [], 1)\n",
"step-5": "# 15650번 수열 2번째\n\nn, m = list(map(int, input().split()))\n\narr = [i for i in range(1,n+1)]\ncheck = []\n\ndef seq(ctn, array, l):\n if sorted(check) in array:\n return\n # if ctn == m:\n # # l+=1\n # # print('ctn :',ctn,' check :',sorted(check))\n # array.append(sorted(check))\n # for k in range(m):\n # print(check[k], end = ' ')\n # print()\n # return\n\n for i in range(n):\n l += 1\n check.append(arr[i])\n seq(ctn+1, array, l)\n check.pop()\n print('l :',l,' i :',i)\n\n\nseq(0,[], 1)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import logging
from blogofile.cache import bf
github = bf.config.controllers.github
from github2.client import Github
github_api = Github()
config = {
"name": "Github",
"description": "Makes a nice github project listing for the sidebar",
"priority": 95.0,
}
def get_list(user):
"""
Each item in the list has:
name, url, description, forks, watchers, homepage, open_issues
"""
return [g for g in github_api.repos.list(user) if not g.fork]
def run():
github.logger = logging.getLogger(config['name'])
github.repo_list = get_list(github.user)
github.full_repo_list = github_api.repos.list(github.user)
|
normal
|
{
"blob_id": "ee2cf6c472fa955ba3718bf3a3f60b66811b4907",
"index": 4705,
"step-1": "<mask token>\n\n\ndef get_list(user):\n \"\"\"\n Each item in the list has:\n name, url, description, forks, watchers, homepage, open_issues\n\n \"\"\"\n return [g for g in github_api.repos.list(user) if not g.fork]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_list(user):\n \"\"\"\n Each item in the list has:\n name, url, description, forks, watchers, homepage, open_issues\n\n \"\"\"\n return [g for g in github_api.repos.list(user) if not g.fork]\n\n\ndef run():\n github.logger = logging.getLogger(config['name'])\n github.repo_list = get_list(github.user)\n github.full_repo_list = github_api.repos.list(github.user)\n",
"step-3": "<mask token>\ngithub = bf.config.controllers.github\n<mask token>\ngithub_api = Github()\nconfig = {'name': 'Github', 'description':\n 'Makes a nice github project listing for the sidebar', 'priority': 95.0}\n\n\ndef get_list(user):\n \"\"\"\n Each item in the list has:\n name, url, description, forks, watchers, homepage, open_issues\n\n \"\"\"\n return [g for g in github_api.repos.list(user) if not g.fork]\n\n\ndef run():\n github.logger = logging.getLogger(config['name'])\n github.repo_list = get_list(github.user)\n github.full_repo_list = github_api.repos.list(github.user)\n",
"step-4": "import logging\nfrom blogofile.cache import bf\ngithub = bf.config.controllers.github\nfrom github2.client import Github\ngithub_api = Github()\nconfig = {'name': 'Github', 'description':\n 'Makes a nice github project listing for the sidebar', 'priority': 95.0}\n\n\ndef get_list(user):\n \"\"\"\n Each item in the list has:\n name, url, description, forks, watchers, homepage, open_issues\n\n \"\"\"\n return [g for g in github_api.repos.list(user) if not g.fork]\n\n\ndef run():\n github.logger = logging.getLogger(config['name'])\n github.repo_list = get_list(github.user)\n github.full_repo_list = github_api.repos.list(github.user)\n",
"step-5": "import logging\n\nfrom blogofile.cache import bf\ngithub = bf.config.controllers.github\n\nfrom github2.client import Github\ngithub_api = Github()\n\nconfig = {\n \"name\": \"Github\",\n \"description\": \"Makes a nice github project listing for the sidebar\",\n \"priority\": 95.0,\n }\n\ndef get_list(user):\n \"\"\"\n Each item in the list has:\n name, url, description, forks, watchers, homepage, open_issues\n\n \"\"\"\n return [g for g in github_api.repos.list(user) if not g.fork]\n\n\ndef run():\n github.logger = logging.getLogger(config['name'])\n github.repo_list = get_list(github.user)\n github.full_repo_list = github_api.repos.list(github.user)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import sys
try:
myfile = open("mydata.txt",encoding ="utf-8")
except FileNotFoundError as ex:
print("file is not found")
print(ex.args)
else:
print("file :",myfile.read())
myfile.close()
finally :
print("finished working")
|
normal
|
{
"blob_id": "8bf75bf3b16296c36c34e8c4c50149259d792af7",
"index": 4319,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n myfile = open('mydata.txt', encoding='utf-8')\nexcept FileNotFoundError as ex:\n print('file is not found')\n print(ex.args)\nelse:\n print('file :', myfile.read())\n myfile.close()\nfinally:\n print('finished working')\n",
"step-3": "import sys\ntry:\n myfile = open('mydata.txt', encoding='utf-8')\nexcept FileNotFoundError as ex:\n print('file is not found')\n print(ex.args)\nelse:\n print('file :', myfile.read())\n myfile.close()\nfinally:\n print('finished working')\n",
"step-4": "import sys\n\ntry:\n myfile = open(\"mydata.txt\",encoding =\"utf-8\")\n\nexcept FileNotFoundError as ex:\n print(\"file is not found\")\n print(ex.args)\nelse:\n print(\"file :\",myfile.read())\n myfile.close()\nfinally :\n\n print(\"finished working\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import unittest
from LempelZivWelchDecoder import LempelZivWelchDecoder
class TestLempelZivWelchDecoder(unittest.TestCase):
def test_decode(self):
test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']
run_length_decoder = LempelZivWelchDecoder()
self.assertRaises(ValueError,
lambda: run_length_decoder.decode()) # assert if method raises error when there is no input
self.assertTrue(run_length_decoder.input is None) # assert if input is none when it's not set
run_length_decoder.input = test_value
self.assertEqual(run_length_decoder.input, test_value) # assert that input is initialized with proper value
self.assertEqual(run_length_decoder.decode(),
"ttttttessst1") # assert that result is correct
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "8126af930ec75e2818455d959f00285bdc08c044",
"index": 1899,
"step-1": "<mask token>\n\n\nclass TestLempelZivWelchDecoder(unittest.TestCase):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestLempelZivWelchDecoder(unittest.TestCase):\n\n def test_decode(self):\n test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']\n run_length_decoder = LempelZivWelchDecoder()\n self.assertRaises(ValueError, lambda : run_length_decoder.decode())\n self.assertTrue(run_length_decoder.input is None)\n run_length_decoder.input = test_value\n self.assertEqual(run_length_decoder.input, test_value)\n self.assertEqual(run_length_decoder.decode(), 'ttttttessst1')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestLempelZivWelchDecoder(unittest.TestCase):\n\n def test_decode(self):\n test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']\n run_length_decoder = LempelZivWelchDecoder()\n self.assertRaises(ValueError, lambda : run_length_decoder.decode())\n self.assertTrue(run_length_decoder.input is None)\n run_length_decoder.input = test_value\n self.assertEqual(run_length_decoder.input, test_value)\n self.assertEqual(run_length_decoder.decode(), 'ttttttessst1')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom LempelZivWelchDecoder import LempelZivWelchDecoder\n\n\nclass TestLempelZivWelchDecoder(unittest.TestCase):\n\n def test_decode(self):\n test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']\n run_length_decoder = LempelZivWelchDecoder()\n self.assertRaises(ValueError, lambda : run_length_decoder.decode())\n self.assertTrue(run_length_decoder.input is None)\n run_length_decoder.input = test_value\n self.assertEqual(run_length_decoder.input, test_value)\n self.assertEqual(run_length_decoder.decode(), 'ttttttessst1')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\n\nfrom LempelZivWelchDecoder import LempelZivWelchDecoder\n\n\nclass TestLempelZivWelchDecoder(unittest.TestCase):\n def test_decode(self):\n test_value = ['t', 256, 257, 'e', 's', 260, 't', '1']\n run_length_decoder = LempelZivWelchDecoder()\n\n self.assertRaises(ValueError,\n lambda: run_length_decoder.decode()) # assert if method raises error when there is no input\n self.assertTrue(run_length_decoder.input is None) # assert if input is none when it's not set\n\n run_length_decoder.input = test_value\n self.assertEqual(run_length_decoder.input, test_value) # assert that input is initialized with proper value\n self.assertEqual(run_length_decoder.decode(),\n \"ttttttessst1\") # assert that result is correct\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import ast
import datetime
import json
from base64 import b64encode
import requests
IMGUR_BASE = "https://api.imgur.com"
class Task:
"""
A class used to represent a job
...
Attributes
----------
queue : list
the list of all urls
pending : list
the name of all pending urls
complete : list
the name of all completed urls
failed : list
the name of all failed urls
url_map : dict
a dictionary that maps provided urls with imgur urls
created:
date created
finished:
date finished
status:
the job status
credentials:
the access token and other useful objects
"""
def __init__(self):
"""
Create the object
:rtype: object
"""
self.queue = list()
self.pending = []
self.complete = []
self.failed = []
self.url_map = {}
self.created = datetime.datetime.now().isoformat()
self.finished = None
self.status = "pending"
self.credentials = None
def initialize(self, urls, cred):
"""
Initialize the object with parameters urls and cred
:param urls : list > the list of urls
:param cred : dict > the client credentials
:rtype: object
"""
for i in urls:
self.enqueue(i)
self.pending.append(i)
clean = str(cred).replace('b\"', '').replace('\"', '').replace("'", '"')
self.credentials = ast.literal_eval(clean)
def export(self):
"""
:rtype: dict
"""
return {
"created": self.created,
"finished": self.finished,
"status": self.status,
"uploaded": {
"pending": self.pending,
"complete": self.complete,
"failed": self.failed
}
}
def executeAll(self, _set_task_progress):
"""
Sequentially upload images and update job progress
:rtype: object
"""
_set_task_progress(self)
self.status = 'in-progress'
_set_task_progress(self)
while self.size() != 0:
val = self.dequeue()
if self.executeOne(val):
self.pending.remove(val)
self.complete.append(self.url_map[val])
_set_task_progress(self)
else:
self.pending.remove(val)
self.failed.append(val)
_set_task_progress(self)
self.status = 'complete'
self.finished = datetime.datetime.now().isoformat()
_set_task_progress(self)
def executeOne(self, val):
"""
Upload a unique image
:rtype: object
"""
v,url = self.upload_image(path=None, url=val, title=None, description=None, album=None)
if v:
self.url_map.update({val: url})
return True
else:
self.url_map.update({val: url})
return False
def enqueue(self, data):
"""
Adding elements to queue
:rtype: object
"""
# Checking to avoid duplicate entry (not mandatory)
if data not in self.queue:
self.queue.insert(0, data)
return True
return False
def dequeue(self):
"""
Adding elements to queue
:rtype: object
"""
if len(self.queue) > 0:
return self.queue.pop()
return ("Queue Empty!")
def size(self):
"""
Getting the size of the queue
:rtype: object
"""
return len(self.queue)
def upload_image(self, path=None, url=None, title=None, description=None,
album=None):
"""
Upload image to the imgur server and returns the new url
:rtype: object
"""
if bool(path) == bool(url):
raise LookupError("Either path or url must be given.")
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': "58tq5Nw", 'image': image,
'title': title, 'description': description}
token = ast.literal_eval(str(self.credentials))["access_token"]
authentication = {'Authorization': 'Bearer {0}'.format(token)}
verify = True
resp = requests.post(IMGUR_BASE + "/3/image", payload, headers=authentication, verify=verify)
if 'error' in json.loads(resp.content)["data"]:
return False, json.loads(resp.content)["data"]["error"]
else:
return True, json.loads(resp.content)["data"]["link"]
|
normal
|
{
"blob_id": "63ee99012089dcb0e5b41860c95e13fff52c6731",
"index": 1546,
"step-1": "<mask token>\n\n\nclass Task:\n <mask token>\n\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = 'pending'\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\"', '').replace('\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {'created': self.created, 'finished': self.finished,\n 'status': self.status, 'uploaded': {'pending': self.pending,\n 'complete': self.complete, 'failed': self.failed}}\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n <mask token>\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return 'Queue Empty!'\n <mask token>\n\n def upload_image(self, path=None, url=None, title=None, description=\n None, album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError('Either path or url must be given.')\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,\n 'description': description}\n token = ast.literal_eval(str(self.credentials))['access_token']\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=\n authentication, verify=verify)\n if 'error' in json.loads(resp.content)['data']:\n return False, json.loads(resp.content)['data']['error']\n else:\n return True, json.loads(resp.content)['data']['link']\n",
"step-2": "<mask token>\n\n\nclass Task:\n <mask token>\n\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = 'pending'\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\"', '').replace('\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {'created': self.created, 'finished': self.finished,\n 'status': self.status, 'uploaded': {'pending': self.pending,\n 'complete': self.complete, 'failed': self.failed}}\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n\n def executeOne(self, val):\n \"\"\"\n Upload a unique image\n :rtype: object\n \"\"\"\n v, url = self.upload_image(path=None, url=val, title=None,\n description=None, album=None)\n if v:\n self.url_map.update({val: url})\n return True\n else:\n self.url_map.update({val: url})\n return False\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return 'Queue Empty!'\n <mask token>\n\n def upload_image(self, path=None, url=None, title=None, description=\n None, album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError('Either path or url must be given.')\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,\n 'description': description}\n token = ast.literal_eval(str(self.credentials))['access_token']\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=\n authentication, verify=verify)\n if 'error' in json.loads(resp.content)['data']:\n return False, json.loads(resp.content)['data']['error']\n else:\n return True, json.loads(resp.content)['data']['link']\n",
"step-3": "<mask token>\nIMGUR_BASE = 'https://api.imgur.com'\n\n\nclass Task:\n \"\"\"\n A class used to represent a job\n ...\n\n Attributes\n ----------\n queue : list\n the list of all urls\n pending : list\n the name of all pending urls\n complete : list\n the name of all completed urls\n failed : list\n the name of all failed urls\n url_map : dict\n a dictionary that maps provided urls with imgur urls\n created:\n date created\n finished:\n date finished\n status:\n the job status\n credentials:\n the access token and other useful objects\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = 'pending'\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\"', '').replace('\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {'created': self.created, 'finished': self.finished,\n 'status': self.status, 'uploaded': {'pending': self.pending,\n 'complete': self.complete, 'failed': self.failed}}\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n\n def executeOne(self, val):\n \"\"\"\n Upload a unique image\n :rtype: object\n \"\"\"\n v, url = self.upload_image(path=None, url=val, title=None,\n description=None, album=None)\n if v:\n self.url_map.update({val: url})\n return True\n else:\n self.url_map.update({val: url})\n return False\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return 'Queue Empty!'\n\n def size(self):\n \"\"\"\n Getting the size of the queue\n :rtype: object\n \"\"\"\n return len(self.queue)\n\n def upload_image(self, path=None, url=None, title=None, description=\n None, album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError('Either path or url must be given.')\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,\n 'description': description}\n token = ast.literal_eval(str(self.credentials))['access_token']\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=\n authentication, verify=verify)\n if 'error' in json.loads(resp.content)['data']:\n return False, json.loads(resp.content)['data']['error']\n else:\n return True, json.loads(resp.content)['data']['link']\n",
"step-4": "import ast\nimport datetime\nimport json\nfrom base64 import b64encode\nimport requests\nIMGUR_BASE = 'https://api.imgur.com'\n\n\nclass Task:\n \"\"\"\n A class used to represent a job\n ...\n\n Attributes\n ----------\n queue : list\n the list of all urls\n pending : list\n the name of all pending urls\n complete : list\n the name of all completed urls\n failed : list\n the name of all failed urls\n url_map : dict\n a dictionary that maps provided urls with imgur urls\n created:\n date created\n finished:\n date finished\n status:\n the job status\n credentials:\n the access token and other useful objects\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = 'pending'\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\"', '').replace('\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {'created': self.created, 'finished': self.finished,\n 'status': self.status, 'uploaded': {'pending': self.pending,\n 'complete': self.complete, 'failed': self.failed}}\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n\n def executeOne(self, val):\n \"\"\"\n Upload a unique image\n :rtype: object\n \"\"\"\n v, url = self.upload_image(path=None, url=val, title=None,\n description=None, album=None)\n if v:\n self.url_map.update({val: url})\n return True\n else:\n self.url_map.update({val: url})\n return False\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return 'Queue Empty!'\n\n def size(self):\n \"\"\"\n Getting the size of the queue\n :rtype: object\n \"\"\"\n return len(self.queue)\n\n def upload_image(self, path=None, url=None, title=None, description=\n None, album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError('Either path or url must be given.')\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,\n 'description': description}\n token = ast.literal_eval(str(self.credentials))['access_token']\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=\n authentication, verify=verify)\n if 'error' in json.loads(resp.content)['data']:\n return False, json.loads(resp.content)['data']['error']\n else:\n return True, json.loads(resp.content)['data']['link']\n",
"step-5": "import ast\nimport datetime\nimport json\nfrom base64 import b64encode\nimport requests\n\nIMGUR_BASE = \"https://api.imgur.com\"\n\n\nclass Task:\n \"\"\"\n A class used to represent a job\n ...\n\n Attributes\n ----------\n queue : list\n the list of all urls\n pending : list\n the name of all pending urls\n complete : list\n the name of all completed urls\n failed : list\n the name of all failed urls\n url_map : dict\n a dictionary that maps provided urls with imgur urls\n created:\n date created\n finished:\n date finished\n status:\n the job status\n credentials:\n the access token and other useful objects\n\n \"\"\"\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = \"pending\"\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\\\"', '').replace('\\\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {\n \"created\": self.created,\n \"finished\": self.finished,\n \"status\": self.status,\n \"uploaded\": {\n \"pending\": self.pending,\n \"complete\": self.complete,\n \"failed\": self.failed\n }\n }\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n\n def executeOne(self, val):\n \"\"\"\n Upload a unique image\n :rtype: object\n \"\"\"\n v,url = self.upload_image(path=None, url=val, title=None, description=None, album=None)\n if v:\n self.url_map.update({val: url})\n return True\n else:\n self.url_map.update({val: url})\n return False\n\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n # Checking to avoid duplicate entry (not mandatory)\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return (\"Queue Empty!\")\n\n\n def size(self):\n \"\"\"\n Getting the size of the queue\n :rtype: object\n \"\"\"\n return len(self.queue)\n\n def upload_image(self, path=None, url=None, title=None, description=None,\n album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError(\"Either path or url must be given.\")\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': \"58tq5Nw\", 'image': image,\n 'title': title, 'description': description}\n\n token = ast.literal_eval(str(self.credentials))[\"access_token\"]\n\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + \"/3/image\", payload, headers=authentication, verify=verify)\n if 'error' in json.loads(resp.content)[\"data\"]:\n return False, json.loads(resp.content)[\"data\"][\"error\"]\n else:\n return True, json.loads(resp.content)[\"data\"][\"link\"]\n\n\n",
"step-ids": [
8,
9,
12,
13,
14
]
}
|
[
8,
9,
12,
13,
14
] |
# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing class for AWS's Redshift Cluster Subnet Group."""
from absl import flags
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
class RedshiftClusterSubnetGroup(resource.BaseResource):
"""Cluster Subnet Group associated with a Redshift cluster launched in a vpc.
A cluster subnet group allows you to specify a set of subnets in your VPC.
Attributes:
name: A string name of the cluster subnet group.
subnet_id: A string name of the subnet id associated with the group.
"""
def __init__(self, cmd_prefix):
super(RedshiftClusterSubnetGroup, self).__init__(user_managed=False)
self.cmd_prefix = cmd_prefix
self.name = 'pkb-' + FLAGS.run_uri
self.subnet_id = ''
def _Create(self):
cmd = self.cmd_prefix + [
'redshift', 'create-cluster-subnet-group',
'--cluster-subnet-group-name', self.name, '--description',
'Cluster Subnet Group for run uri {}'.format(
FLAGS.run_uri), '--subnet-ids', self.subnet_id
]
vm_util.IssueCommand(cmd)
def _Delete(self):
"""Delete a redshift cluster subnet group."""
cmd = self.cmd_prefix + [
'redshift', 'delete-cluster-subnet-group',
'--cluster-subnet-group-name', self.name
]
vm_util.IssueCommand(cmd, raise_on_failure=False)
|
normal
|
{
"blob_id": "9cebce7f97a1848885883692cd0f494cce6bae7f",
"index": 5263,
"step-1": "<mask token>\n\n\nclass RedshiftClusterSubnetGroup(resource.BaseResource):\n <mask token>\n\n def __init__(self, cmd_prefix):\n super(RedshiftClusterSubnetGroup, self).__init__(user_managed=False)\n self.cmd_prefix = cmd_prefix\n self.name = 'pkb-' + FLAGS.run_uri\n self.subnet_id = ''\n\n def _Create(self):\n cmd = self.cmd_prefix + ['redshift', 'create-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name, '--description',\n 'Cluster Subnet Group for run uri {}'.format(FLAGS.run_uri),\n '--subnet-ids', self.subnet_id]\n vm_util.IssueCommand(cmd)\n\n def _Delete(self):\n \"\"\"Delete a redshift cluster subnet group.\"\"\"\n cmd = self.cmd_prefix + ['redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name]\n vm_util.IssueCommand(cmd, raise_on_failure=False)\n",
"step-2": "<mask token>\n\n\nclass RedshiftClusterSubnetGroup(resource.BaseResource):\n \"\"\"Cluster Subnet Group associated with a Redshift cluster launched in a vpc.\n\n A cluster subnet group allows you to specify a set of subnets in your VPC.\n\n\n Attributes:\n name: A string name of the cluster subnet group.\n subnet_id: A string name of the subnet id associated with the group.\n \"\"\"\n\n def __init__(self, cmd_prefix):\n super(RedshiftClusterSubnetGroup, self).__init__(user_managed=False)\n self.cmd_prefix = cmd_prefix\n self.name = 'pkb-' + FLAGS.run_uri\n self.subnet_id = ''\n\n def _Create(self):\n cmd = self.cmd_prefix + ['redshift', 'create-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name, '--description',\n 'Cluster Subnet Group for run uri {}'.format(FLAGS.run_uri),\n '--subnet-ids', self.subnet_id]\n vm_util.IssueCommand(cmd)\n\n def _Delete(self):\n \"\"\"Delete a redshift cluster subnet group.\"\"\"\n cmd = self.cmd_prefix + ['redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name]\n vm_util.IssueCommand(cmd, raise_on_failure=False)\n",
"step-3": "<mask token>\nFLAGS = flags.FLAGS\n\n\nclass RedshiftClusterSubnetGroup(resource.BaseResource):\n \"\"\"Cluster Subnet Group associated with a Redshift cluster launched in a vpc.\n\n A cluster subnet group allows you to specify a set of subnets in your VPC.\n\n\n Attributes:\n name: A string name of the cluster subnet group.\n subnet_id: A string name of the subnet id associated with the group.\n \"\"\"\n\n def __init__(self, cmd_prefix):\n super(RedshiftClusterSubnetGroup, self).__init__(user_managed=False)\n self.cmd_prefix = cmd_prefix\n self.name = 'pkb-' + FLAGS.run_uri\n self.subnet_id = ''\n\n def _Create(self):\n cmd = self.cmd_prefix + ['redshift', 'create-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name, '--description',\n 'Cluster Subnet Group for run uri {}'.format(FLAGS.run_uri),\n '--subnet-ids', self.subnet_id]\n vm_util.IssueCommand(cmd)\n\n def _Delete(self):\n \"\"\"Delete a redshift cluster subnet group.\"\"\"\n cmd = self.cmd_prefix + ['redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name]\n vm_util.IssueCommand(cmd, raise_on_failure=False)\n",
"step-4": "<mask token>\nfrom absl import flags\nfrom perfkitbenchmarker import resource\nfrom perfkitbenchmarker import vm_util\nFLAGS = flags.FLAGS\n\n\nclass RedshiftClusterSubnetGroup(resource.BaseResource):\n \"\"\"Cluster Subnet Group associated with a Redshift cluster launched in a vpc.\n\n A cluster subnet group allows you to specify a set of subnets in your VPC.\n\n\n Attributes:\n name: A string name of the cluster subnet group.\n subnet_id: A string name of the subnet id associated with the group.\n \"\"\"\n\n def __init__(self, cmd_prefix):\n super(RedshiftClusterSubnetGroup, self).__init__(user_managed=False)\n self.cmd_prefix = cmd_prefix\n self.name = 'pkb-' + FLAGS.run_uri\n self.subnet_id = ''\n\n def _Create(self):\n cmd = self.cmd_prefix + ['redshift', 'create-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name, '--description',\n 'Cluster Subnet Group for run uri {}'.format(FLAGS.run_uri),\n '--subnet-ids', self.subnet_id]\n vm_util.IssueCommand(cmd)\n\n def _Delete(self):\n \"\"\"Delete a redshift cluster subnet group.\"\"\"\n cmd = self.cmd_prefix + ['redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name]\n vm_util.IssueCommand(cmd, raise_on_failure=False)\n",
"step-5": "# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Module containing class for AWS's Redshift Cluster Subnet Group.\"\"\"\n\nfrom absl import flags\nfrom perfkitbenchmarker import resource\nfrom perfkitbenchmarker import vm_util\n\nFLAGS = flags.FLAGS\n\n\nclass RedshiftClusterSubnetGroup(resource.BaseResource):\n \"\"\"Cluster Subnet Group associated with a Redshift cluster launched in a vpc.\n\n A cluster subnet group allows you to specify a set of subnets in your VPC.\n\n\n Attributes:\n name: A string name of the cluster subnet group.\n subnet_id: A string name of the subnet id associated with the group.\n \"\"\"\n\n def __init__(self, cmd_prefix):\n super(RedshiftClusterSubnetGroup, self).__init__(user_managed=False)\n self.cmd_prefix = cmd_prefix\n self.name = 'pkb-' + FLAGS.run_uri\n self.subnet_id = ''\n\n def _Create(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'create-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name, '--description',\n 'Cluster Subnet Group for run uri {}'.format(\n FLAGS.run_uri), '--subnet-ids', self.subnet_id\n ]\n vm_util.IssueCommand(cmd)\n\n def _Delete(self):\n \"\"\"Delete a redshift cluster subnet group.\"\"\"\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# coding=utf-8
# Copyright 2016 Mystopia.
from __future__ import (absolute_import, division, generators, nested_scopes,
print_function, unicode_literals, with_statement)
from django.db.models.signals import m2m_changed, post_save
from django.dispatch import receiver
from dicpick.models import Task, TaskType
# The signal handlers below ensure that certain changes to TaskType are reflected onto all the tasks of that type.
# Note that the signal handlers run in the same transaction as the event that triggered the signal.
@receiver(post_save, sender=TaskType)
def create_task_instances(sender, instance, **kwargs):
"""Ensure that there is a task instance for each date in the range specified by the task type.
Necessary to support date range changes.
"""
task_type = instance
existing_dates = set([task.date for task in task_type.tasks.all()])
required_dates = set(task_type.date_range())
missing_dates = required_dates - existing_dates
superfluous_dates = existing_dates - required_dates
Task.objects.filter(task_type=task_type, date__in=superfluous_dates).delete()
for missing_date in missing_dates:
task = Task(task_type=task_type, date=missing_date, num_people=task_type.num_people, score=task_type.score)
task.save()
Task.objects.filter(task_type=task_type).update(num_people=task_type.num_people, score=task_type.score)
@receiver(m2m_changed, sender=TaskType.tags.through)
def tags_updated(sender, instance, action, **kwargs):
"""If tags were added to or removed from a TaskType, add/remove them from all tasks of that type."""
task_type = instance
pk_set = kwargs.pop('pk_set')
if action == 'post_add':
for task in task_type.tasks.all():
task.tags.add(*pk_set)
elif action == 'post_remove':
for task in task_type.tasks.all():
task.tags.remove(*pk_set)
|
normal
|
{
"blob_id": "065a566b3e520c14f20d0d7d668ec58404d6e11b",
"index": 494,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@receiver(post_save, sender=TaskType)\ndef create_task_instances(sender, instance, **kwargs):\n \"\"\"Ensure that there is a task instance for each date in the range specified by the task type.\n\n Necessary to support date range changes.\n \"\"\"\n task_type = instance\n existing_dates = set([task.date for task in task_type.tasks.all()])\n required_dates = set(task_type.date_range())\n missing_dates = required_dates - existing_dates\n superfluous_dates = existing_dates - required_dates\n Task.objects.filter(task_type=task_type, date__in=superfluous_dates\n ).delete()\n for missing_date in missing_dates:\n task = Task(task_type=task_type, date=missing_date, num_people=\n task_type.num_people, score=task_type.score)\n task.save()\n Task.objects.filter(task_type=task_type).update(num_people=task_type.\n num_people, score=task_type.score)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@receiver(post_save, sender=TaskType)\ndef create_task_instances(sender, instance, **kwargs):\n \"\"\"Ensure that there is a task instance for each date in the range specified by the task type.\n\n Necessary to support date range changes.\n \"\"\"\n task_type = instance\n existing_dates = set([task.date for task in task_type.tasks.all()])\n required_dates = set(task_type.date_range())\n missing_dates = required_dates - existing_dates\n superfluous_dates = existing_dates - required_dates\n Task.objects.filter(task_type=task_type, date__in=superfluous_dates\n ).delete()\n for missing_date in missing_dates:\n task = Task(task_type=task_type, date=missing_date, num_people=\n task_type.num_people, score=task_type.score)\n task.save()\n Task.objects.filter(task_type=task_type).update(num_people=task_type.\n num_people, score=task_type.score)\n\n\n@receiver(m2m_changed, sender=TaskType.tags.through)\ndef tags_updated(sender, instance, action, **kwargs):\n \"\"\"If tags were added to or removed from a TaskType, add/remove them from all tasks of that type.\"\"\"\n task_type = instance\n pk_set = kwargs.pop('pk_set')\n if action == 'post_add':\n for task in task_type.tasks.all():\n task.tags.add(*pk_set)\n elif action == 'post_remove':\n for task in task_type.tasks.all():\n task.tags.remove(*pk_set)\n",
"step-4": "from __future__ import absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement\nfrom django.db.models.signals import m2m_changed, post_save\nfrom django.dispatch import receiver\nfrom dicpick.models import Task, TaskType\n\n\n@receiver(post_save, sender=TaskType)\ndef create_task_instances(sender, instance, **kwargs):\n \"\"\"Ensure that there is a task instance for each date in the range specified by the task type.\n\n Necessary to support date range changes.\n \"\"\"\n task_type = instance\n existing_dates = set([task.date for task in task_type.tasks.all()])\n required_dates = set(task_type.date_range())\n missing_dates = required_dates - existing_dates\n superfluous_dates = existing_dates - required_dates\n Task.objects.filter(task_type=task_type, date__in=superfluous_dates\n ).delete()\n for missing_date in missing_dates:\n task = Task(task_type=task_type, date=missing_date, num_people=\n task_type.num_people, score=task_type.score)\n task.save()\n Task.objects.filter(task_type=task_type).update(num_people=task_type.\n num_people, score=task_type.score)\n\n\n@receiver(m2m_changed, sender=TaskType.tags.through)\ndef tags_updated(sender, instance, action, **kwargs):\n \"\"\"If tags were added to or removed from a TaskType, add/remove them from all tasks of that type.\"\"\"\n task_type = instance\n pk_set = kwargs.pop('pk_set')\n if action == 'post_add':\n for task in task_type.tasks.all():\n task.tags.add(*pk_set)\n elif action == 'post_remove':\n for task in task_type.tasks.all():\n task.tags.remove(*pk_set)\n",
"step-5": "# coding=utf-8\n# Copyright 2016 Mystopia.\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes,\n print_function, unicode_literals, with_statement)\n\nfrom django.db.models.signals import m2m_changed, post_save\nfrom django.dispatch import receiver\n\nfrom dicpick.models import Task, TaskType\n\n\n# The signal handlers below ensure that certain changes to TaskType are reflected onto all the tasks of that type.\n# Note that the signal handlers run in the same transaction as the event that triggered the signal.\n\n@receiver(post_save, sender=TaskType)\ndef create_task_instances(sender, instance, **kwargs):\n \"\"\"Ensure that there is a task instance for each date in the range specified by the task type.\n\n Necessary to support date range changes.\n \"\"\"\n task_type = instance\n existing_dates = set([task.date for task in task_type.tasks.all()])\n required_dates = set(task_type.date_range())\n missing_dates = required_dates - existing_dates\n superfluous_dates = existing_dates - required_dates\n Task.objects.filter(task_type=task_type, date__in=superfluous_dates).delete()\n for missing_date in missing_dates:\n task = Task(task_type=task_type, date=missing_date, num_people=task_type.num_people, score=task_type.score)\n task.save()\n\n Task.objects.filter(task_type=task_type).update(num_people=task_type.num_people, score=task_type.score)\n\n\n@receiver(m2m_changed, sender=TaskType.tags.through)\ndef tags_updated(sender, instance, action, **kwargs):\n \"\"\"If tags were added to or removed from a TaskType, add/remove them from all tasks of that type.\"\"\"\n task_type = instance\n pk_set = kwargs.pop('pk_set')\n if action == 'post_add':\n for task in task_type.tasks.all():\n task.tags.add(*pk_set)\n elif action == 'post_remove':\n for task in task_type.tasks.all():\n task.tags.remove(*pk_set)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# write dictionary objects to be stored in a binary file
import pickle
#dictionary objects to be stored in a binary file
emp1 = {"Empno" : 1201, "Name" : "Anushree", "Age" : 25, "Salary" : 47000}
emp2 = {"Empno" : 1211, "Name" : "Zoya", "Age" : 30, "Salary" : 48000}
emp3 = {"Empno" : 1251, "Name" : "Simarjeet", "Age" : 27, "Salary" : 49000}
emp4 = {"Empno" : 1266, "Name" : "Alex", "Age" : 29, "Salary" : 50000}
empObj = open('Emp.dat',"wb")
#write onto the file
pickle.dump(emp1,empObj)
pickle.dump(emp2,empObj)
pickle.dump(emp3,empObj)
pickle.dump(emp4,empObj)
print("Successfully written four dictionaries")
empObj.close()
|
normal
|
{
"blob_id": "23937ae531cc95069a1319f8c77a459ba7645363",
"index": 4331,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npickle.dump(emp1, empObj)\npickle.dump(emp2, empObj)\npickle.dump(emp3, empObj)\npickle.dump(emp4, empObj)\nprint('Successfully written four dictionaries')\nempObj.close()\n",
"step-3": "<mask token>\nemp1 = {'Empno': 1201, 'Name': 'Anushree', 'Age': 25, 'Salary': 47000}\nemp2 = {'Empno': 1211, 'Name': 'Zoya', 'Age': 30, 'Salary': 48000}\nemp3 = {'Empno': 1251, 'Name': 'Simarjeet', 'Age': 27, 'Salary': 49000}\nemp4 = {'Empno': 1266, 'Name': 'Alex', 'Age': 29, 'Salary': 50000}\nempObj = open('Emp.dat', 'wb')\npickle.dump(emp1, empObj)\npickle.dump(emp2, empObj)\npickle.dump(emp3, empObj)\npickle.dump(emp4, empObj)\nprint('Successfully written four dictionaries')\nempObj.close()\n",
"step-4": "import pickle\nemp1 = {'Empno': 1201, 'Name': 'Anushree', 'Age': 25, 'Salary': 47000}\nemp2 = {'Empno': 1211, 'Name': 'Zoya', 'Age': 30, 'Salary': 48000}\nemp3 = {'Empno': 1251, 'Name': 'Simarjeet', 'Age': 27, 'Salary': 49000}\nemp4 = {'Empno': 1266, 'Name': 'Alex', 'Age': 29, 'Salary': 50000}\nempObj = open('Emp.dat', 'wb')\npickle.dump(emp1, empObj)\npickle.dump(emp2, empObj)\npickle.dump(emp3, empObj)\npickle.dump(emp4, empObj)\nprint('Successfully written four dictionaries')\nempObj.close()\n",
"step-5": "# write dictionary objects to be stored in a binary file\n\n\nimport pickle\n#dictionary objects to be stored in a binary file\nemp1 = {\"Empno\" : 1201, \"Name\" : \"Anushree\", \"Age\" : 25, \"Salary\" : 47000}\nemp2 = {\"Empno\" : 1211, \"Name\" : \"Zoya\", \"Age\" : 30, \"Salary\" : 48000}\nemp3 = {\"Empno\" : 1251, \"Name\" : \"Simarjeet\", \"Age\" : 27, \"Salary\" : 49000}\nemp4 = {\"Empno\" : 1266, \"Name\" : \"Alex\", \"Age\" : 29, \"Salary\" : 50000}\n\nempObj = open('Emp.dat',\"wb\")\n\n#write onto the file\n\npickle.dump(emp1,empObj)\npickle.dump(emp2,empObj)\npickle.dump(emp3,empObj)\npickle.dump(emp4,empObj)\n\nprint(\"Successfully written four dictionaries\")\nempObj.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# @Time : 2020/3/4 10:34
# @Author : YYLin
# @Email : 854280599@qq.com
# @File : Skip_GAN.py
from Dataload import load_anime_old, save_images, load_CelebA
from Srresnet_Model import Generator_srresnet, Discriminator_srresnet
import tensorflow as tf
import numpy as np
import sys
class Skip_GAN(object):
def __init__(self, sess, epoch, batch_size, dataset_name, result_dir, z_dim, y_dim, checkpoint_dir, num_resblock,
Cycle_lr, Class_weight, Resnet_weight):
self.sess = sess
self.dataset_name = dataset_name
self.result_dir = result_dir
self.epoch = epoch
self.batch_size = batch_size
self.z_dim = z_dim
self.y_dim = y_dim
self.checkpoint_dir = checkpoint_dir
self.num_resblock = num_resblock
self.Cycle_lr = Cycle_lr
self.Class_weight = Class_weight
# La is used to increase the weight of image authenticity
self.la = 10
self.learningRateD = 2e-4
self.learningRateG = 2e-4
#
self.Resnet_weight = Resnet_weight
# 加载不同的数据集
if self.dataset_name == 'anime':
print('loading anime .............')
self.height = 96
self.width = 96
self.c_dim = 3
self.data_X, self.data_Y = load_anime_old()
print('self.data_X:', self.data_X.shape, 'self.data_y:', self.data_Y.shape)
elif self.dataset_name == 'celebA':
print('loading celebA ...............')
self.height = 96
self.width = 96
self.c_dim = 3
self.data_X, self.data_Y = load_CelebA()
print('self.data_X:', self.data_X.shape, 'self.data_y:', self.data_Y.shape)
else:
print('Sorry there is no option for ', self.dataset_name)
sys.exit()
def build_model(self):
# some placeholder in our model
self.y = tf.placeholder(tf.float32, [None, self.y_dim], name='y')
self.img = tf.placeholder(tf.float32, [self.batch_size, self.height, self.width, 3], name='img')
self.z = tf.placeholder(tf.float32, [None, self.z_dim])
self.G_sample = Generator_srresnet(self.z, self.y, self.num_resblock, self.Resnet_weight)
print('The return of Generator:', self.G_sample)
# 识别器对真实图像进行判断
D_real, C_real = Discriminator_srresnet(self.img, dataset=self.dataset_name)
print('The return of Discriminator:', D_real, C_real)
# 识别器对生成图像进行判断
D_fake, C_fake = Discriminator_srresnet(self.G_sample, dataset=self.dataset_name, reuse=True)
print('The return of Discriminator:', D_fake, C_fake)
# 判断图像的类别
self.C_real_loss = tf.reduce_mean(
tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=C_real, labels=self.y), axis=1))
self.C_fake_loss = tf.reduce_mean(
tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=C_fake, labels=self.y), axis=1))
# D_Loss 希望真实图像被判断为1 希望生成图像被判断为0
D_real_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real, labels=tf.ones_like(D_real)))
D_fake_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.zeros_like(D_fake)))
'''注意 la也即是我是用动态学习率的时候要关注的参数
但是我的目标是使得类别损失变得更加的大 而不是真伪的损失'''
D_loss = D_real_loss + D_fake_loss
self.DC_loss = (self.la * D_loss + self.C_real_loss)
# 对生成模型的损失也在关注该模型
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.ones_like(D_fake)))
self.GC_loss = (self.la * G_loss + self.C_fake_loss)
print('Calualtion the loss of Optimizer')
self.theta_D = [v for v in tf.global_variables() if 'd_net' in v.name]
self.theta_G = [v for v in tf.global_variables() if 'g_net' in v.name]
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
self.d_updates = tf.train.AdamOptimizer(self.learningRateD, beta1=0.5, beta2=0.9).minimize(self.DC_loss,
var_list=self.theta_D)
self.g_updates = tf.train.AdamOptimizer(self.learningRateG, beta1=0.5, beta2=0.9).minimize(self.GC_loss,
var_list=self.theta_G)
self.sampler = Generator_srresnet(self.y, self.z, self.num_resblock, self.Resnet_weight, reuse=True, train=False)
def train(self):
print('begin training ...........')
tf.global_variables_initializer().run()
# sample_num 用于控制存储图像
sample_num = 64
tot_num_samples = min(sample_num, self.batch_size)
manifold_h = int(np.floor(np.sqrt(tot_num_samples)))
manifold_w = int(np.floor(np.sqrt(tot_num_samples)))
# 定义随机噪音以及标签 2019/09/29
self.sample = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim)).astype(np.float32)
self.sample_y = self.data_Y[0:self.batch_size]
counter = 0
# shuffle the dataset 2019/9/29
batch_offset = 0
data_index = np.arange(self.data_X.shape[0])
np.random.shuffle(data_index)
self.data_X = self.data_X[data_index, :, :, :]
self.data_Y = self.data_Y[data_index]
# 这种方式会有使得小于batch_size个数据用不上
for epoch in range(self.epoch):
if batch_offset + self.batch_size > len(self.data_X):
batch_offset = 0
# shuffle dataset
data_index = np.arange(self.data_X.shape[0])
np.random.shuffle(data_index)
self.data_X = self.data_X[data_index, :, :, :]
self.data_Y = self.data_Y[data_index]
else:
# 首先是得到输入的数据
batch_images = self.data_X[batch_offset:batch_offset + self.batch_size]
batch_codes = self.data_Y[batch_offset:batch_offset + self.batch_size]
batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_dim]).astype(np.float32)
# 然后更新识别器
for i_d_loss in range(3):
_, d_loss = self.sess.run([self.d_updates, self.DC_loss], feed_dict={self.img: batch_images,
self.y: batch_codes,
self.z: batch_z})
for i_g_loss in range(1):
# 最后更新生成器模型
_, g_loss, _ = self.sess.run([self.g_updates, self.GC_loss, self.G_sample],
feed_dict={self.y: batch_codes, self.img: batch_images, self.z: batch_z})
batch_offset = batch_offset + self.batch_size
# display the loss every 10 steps
if (counter % 10) == 0:
print('Epoch: %2d counter: %5d d_loss: %.8f, g_loss: %.8f' % (epoch, counter, d_loss, g_loss))
# save image every 500 steps
if counter % 500 == 0:
samples = self.sess.run(self.sampler,
feed_dict={self.z: self.sample, self.y: self.sample_y})
save_images(samples[:manifold_h * manifold_w, :, :, :], [manifold_h, manifold_w],
self.result_dir + '/{}.png'.format(str(counter).zfill(7)))
# save the model every 1000 steps
if counter % 1000 == 0:
saver = tf.train.Saver(max_to_keep=5)
saver.save(self.sess, self.checkpoint_dir + '/{}'.format(str(counter).zfill(7)))
if (counter % 100) == 0:
if self.Cycle_lr:
self.learningRateD = self.learningRateD * 0.99
if self.learningRateD < 0.0001:
self.learningRateD = 2e-4
if (counter % 500) == 0:
if self.Class_weight:
if self.la > 25:
self.la = 25
else:
self.la = self.la * 1.5
counter += 1
|
normal
|
{
"blob_id": "d3b00a8d410248aedb1c43354e89ccc298b56a3c",
"index": 7693,
"step-1": "<mask token>\n\n\nclass Skip_GAN(object):\n\n def __init__(self, sess, epoch, batch_size, dataset_name, result_dir,\n z_dim, y_dim, checkpoint_dir, num_resblock, Cycle_lr, Class_weight,\n Resnet_weight):\n self.sess = sess\n self.dataset_name = dataset_name\n self.result_dir = result_dir\n self.epoch = epoch\n self.batch_size = batch_size\n self.z_dim = z_dim\n self.y_dim = y_dim\n self.checkpoint_dir = checkpoint_dir\n self.num_resblock = num_resblock\n self.Cycle_lr = Cycle_lr\n self.Class_weight = Class_weight\n self.la = 10\n self.learningRateD = 0.0002\n self.learningRateG = 0.0002\n self.Resnet_weight = Resnet_weight\n if self.dataset_name == 'anime':\n print('loading anime .............')\n self.height = 96\n self.width = 96\n self.c_dim = 3\n self.data_X, self.data_Y = load_anime_old()\n print('self.data_X:', self.data_X.shape, 'self.data_y:', self.\n data_Y.shape)\n elif self.dataset_name == 'celebA':\n print('loading celebA ...............')\n self.height = 96\n self.width = 96\n self.c_dim = 3\n self.data_X, self.data_Y = load_CelebA()\n print('self.data_X:', self.data_X.shape, 'self.data_y:', self.\n data_Y.shape)\n else:\n print('Sorry there is no option for ', self.dataset_name)\n sys.exit()\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Skip_GAN(object):\n\n def __init__(self, sess, epoch, batch_size, dataset_name, result_dir,\n z_dim, y_dim, checkpoint_dir, num_resblock, Cycle_lr, Class_weight,\n Resnet_weight):\n self.sess = sess\n self.dataset_name = dataset_name\n self.result_dir = result_dir\n self.epoch = epoch\n self.batch_size = batch_size\n self.z_dim = z_dim\n self.y_dim = y_dim\n self.checkpoint_dir = checkpoint_dir\n self.num_resblock = num_resblock\n self.Cycle_lr = Cycle_lr\n self.Class_weight = Class_weight\n self.la = 10\n self.learningRateD = 0.0002\n self.learningRateG = 0.0002\n self.Resnet_weight = Resnet_weight\n if self.dataset_name == 'anime':\n print('loading anime .............')\n self.height = 96\n self.width = 96\n self.c_dim = 3\n self.data_X, self.data_Y = load_anime_old()\n print('self.data_X:', self.data_X.shape, 'self.data_y:', self.\n data_Y.shape)\n elif self.dataset_name == 'celebA':\n print('loading celebA ...............')\n self.height = 96\n self.width = 96\n self.c_dim = 3\n self.data_X, self.data_Y = load_CelebA()\n print('self.data_X:', self.data_X.shape, 'self.data_y:', self.\n data_Y.shape)\n else:\n print('Sorry there is no option for ', self.dataset_name)\n sys.exit()\n <mask token>\n\n def train(self):\n print('begin training ...........')\n tf.global_variables_initializer().run()\n sample_num = 64\n tot_num_samples = min(sample_num, self.batch_size)\n manifold_h = int(np.floor(np.sqrt(tot_num_samples)))\n manifold_w = int(np.floor(np.sqrt(tot_num_samples)))\n self.sample = np.random.uniform(-1, 1, size=(self.batch_size, self.\n z_dim)).astype(np.float32)\n self.sample_y = self.data_Y[0:self.batch_size]\n counter = 0\n batch_offset = 0\n data_index = np.arange(self.data_X.shape[0])\n np.random.shuffle(data_index)\n self.data_X = self.data_X[data_index, :, :, :]\n self.data_Y = self.data_Y[data_index]\n for epoch in range(self.epoch):\n if batch_offset + self.batch_size > len(self.data_X):\n batch_offset = 0\n data_index = np.arange(self.data_X.shape[0])\n np.random.shuffle(data_index)\n self.data_X = self.data_X[data_index, :, :, :]\n self.data_Y = self.data_Y[data_index]\n else:\n batch_images = self.data_X[batch_offset:batch_offset + self\n .batch_size]\n batch_codes = self.data_Y[batch_offset:batch_offset + self.\n batch_size]\n batch_z = np.random.uniform(-1, 1, [self.batch_size, self.\n z_dim]).astype(np.float32)\n for i_d_loss in range(3):\n _, d_loss = self.sess.run([self.d_updates, self.DC_loss\n ], feed_dict={self.img: batch_images, self.y:\n batch_codes, self.z: batch_z})\n for i_g_loss in range(1):\n _, g_loss, _ = self.sess.run([self.g_updates, self.\n GC_loss, self.G_sample], feed_dict={self.y:\n batch_codes, self.img: batch_images, self.z: batch_z})\n batch_offset = batch_offset + self.batch_size\n if counter % 10 == 0:\n print(\n 'Epoch: %2d counter: %5d d_loss: %.8f, g_loss: %.8f' %\n (epoch, counter, d_loss, g_loss))\n if counter % 500 == 0:\n samples = self.sess.run(self.sampler, feed_dict={self.z:\n self.sample, self.y: self.sample_y})\n save_images(samples[:manifold_h * manifold_w, :, :, :],\n [manifold_h, manifold_w], self.result_dir +\n '/{}.png'.format(str(counter).zfill(7)))\n if counter % 1000 == 0:\n saver = tf.train.Saver(max_to_keep=5)\n saver.save(self.sess, self.checkpoint_dir + '/{}'.\n format(str(counter).zfill(7)))\n if counter % 100 == 0:\n if self.Cycle_lr:\n self.learningRateD = self.learningRateD * 0.99\n if self.learningRateD < 0.0001:\n self.learningRateD = 0.0002\n if counter % 500 == 0:\n if self.Class_weight:\n if self.la > 25:\n self.la = 25\n else:\n self.la = self.la * 1.5\n counter += 1\n",
"step-3": "<mask token>\n\n\nclass Skip_GAN(object):\n\n def __init__(self, sess, epoch, batch_size, dataset_name, result_dir,\n z_dim, y_dim, checkpoint_dir, num_resblock, Cycle_lr, Class_weight,\n Resnet_weight):\n self.sess = sess\n self.dataset_name = dataset_name\n self.result_dir = result_dir\n self.epoch = epoch\n self.batch_size = batch_size\n self.z_dim = z_dim\n self.y_dim = y_dim\n self.checkpoint_dir = checkpoint_dir\n self.num_resblock = num_resblock\n self.Cycle_lr = Cycle_lr\n self.Class_weight = Class_weight\n self.la = 10\n self.learningRateD = 0.0002\n self.learningRateG = 0.0002\n self.Resnet_weight = Resnet_weight\n if self.dataset_name == 'anime':\n print('loading anime .............')\n self.height = 96\n self.width = 96\n self.c_dim = 3\n self.data_X, self.data_Y = load_anime_old()\n print('self.data_X:', self.data_X.shape, 'self.data_y:', self.\n data_Y.shape)\n elif self.dataset_name == 'celebA':\n print('loading celebA ...............')\n self.height = 96\n self.width = 96\n self.c_dim = 3\n self.data_X, self.data_Y = load_CelebA()\n print('self.data_X:', self.data_X.shape, 'self.data_y:', self.\n data_Y.shape)\n else:\n print('Sorry there is no option for ', self.dataset_name)\n sys.exit()\n\n def build_model(self):\n self.y = tf.placeholder(tf.float32, [None, self.y_dim], name='y')\n self.img = tf.placeholder(tf.float32, [self.batch_size, self.height,\n self.width, 3], name='img')\n self.z = tf.placeholder(tf.float32, [None, self.z_dim])\n self.G_sample = Generator_srresnet(self.z, self.y, self.\n num_resblock, self.Resnet_weight)\n print('The return of Generator:', self.G_sample)\n D_real, C_real = Discriminator_srresnet(self.img, dataset=self.\n dataset_name)\n print('The return of Discriminator:', D_real, C_real)\n D_fake, C_fake = Discriminator_srresnet(self.G_sample, dataset=self\n .dataset_name, reuse=True)\n print('The return of Discriminator:', D_fake, C_fake)\n self.C_real_loss = tf.reduce_mean(tf.reduce_sum(tf.nn.\n sigmoid_cross_entropy_with_logits(logits=C_real, labels=self.y),\n axis=1))\n self.C_fake_loss = tf.reduce_mean(tf.reduce_sum(tf.nn.\n sigmoid_cross_entropy_with_logits(logits=C_fake, labels=self.y),\n axis=1))\n D_real_loss = tf.reduce_mean(tf.nn.\n sigmoid_cross_entropy_with_logits(logits=D_real, labels=tf.\n ones_like(D_real)))\n D_fake_loss = tf.reduce_mean(tf.nn.\n sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.\n zeros_like(D_fake)))\n \"\"\"注意 la也即是我是用动态学习率的时候要关注的参数 \n 但是我的目标是使得类别损失变得更加的大 而不是真伪的损失\"\"\"\n D_loss = D_real_loss + D_fake_loss\n self.DC_loss = self.la * D_loss + self.C_real_loss\n G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n logits=D_fake, labels=tf.ones_like(D_fake)))\n self.GC_loss = self.la * G_loss + self.C_fake_loss\n print('Calualtion the loss of Optimizer')\n self.theta_D = [v for v in tf.global_variables() if 'd_net' in v.name]\n self.theta_G = [v for v in tf.global_variables() if 'g_net' in v.name]\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n ):\n self.d_updates = tf.train.AdamOptimizer(self.learningRateD,\n beta1=0.5, beta2=0.9).minimize(self.DC_loss, var_list=self.\n theta_D)\n self.g_updates = tf.train.AdamOptimizer(self.learningRateG,\n beta1=0.5, beta2=0.9).minimize(self.GC_loss, var_list=self.\n theta_G)\n self.sampler = Generator_srresnet(self.y, self.z, self.num_resblock,\n self.Resnet_weight, reuse=True, train=False)\n\n def train(self):\n print('begin training ...........')\n tf.global_variables_initializer().run()\n sample_num = 64\n tot_num_samples = min(sample_num, self.batch_size)\n manifold_h = int(np.floor(np.sqrt(tot_num_samples)))\n manifold_w = int(np.floor(np.sqrt(tot_num_samples)))\n self.sample = np.random.uniform(-1, 1, size=(self.batch_size, self.\n z_dim)).astype(np.float32)\n self.sample_y = self.data_Y[0:self.batch_size]\n counter = 0\n batch_offset = 0\n data_index = np.arange(self.data_X.shape[0])\n np.random.shuffle(data_index)\n self.data_X = self.data_X[data_index, :, :, :]\n self.data_Y = self.data_Y[data_index]\n for epoch in range(self.epoch):\n if batch_offset + self.batch_size > len(self.data_X):\n batch_offset = 0\n data_index = np.arange(self.data_X.shape[0])\n np.random.shuffle(data_index)\n self.data_X = self.data_X[data_index, :, :, :]\n self.data_Y = self.data_Y[data_index]\n else:\n batch_images = self.data_X[batch_offset:batch_offset + self\n .batch_size]\n batch_codes = self.data_Y[batch_offset:batch_offset + self.\n batch_size]\n batch_z = np.random.uniform(-1, 1, [self.batch_size, self.\n z_dim]).astype(np.float32)\n for i_d_loss in range(3):\n _, d_loss = self.sess.run([self.d_updates, self.DC_loss\n ], feed_dict={self.img: batch_images, self.y:\n batch_codes, self.z: batch_z})\n for i_g_loss in range(1):\n _, g_loss, _ = self.sess.run([self.g_updates, self.\n GC_loss, self.G_sample], feed_dict={self.y:\n batch_codes, self.img: batch_images, self.z: batch_z})\n batch_offset = batch_offset + self.batch_size\n if counter % 10 == 0:\n print(\n 'Epoch: %2d counter: %5d d_loss: %.8f, g_loss: %.8f' %\n (epoch, counter, d_loss, g_loss))\n if counter % 500 == 0:\n samples = self.sess.run(self.sampler, feed_dict={self.z:\n self.sample, self.y: self.sample_y})\n save_images(samples[:manifold_h * manifold_w, :, :, :],\n [manifold_h, manifold_w], self.result_dir +\n '/{}.png'.format(str(counter).zfill(7)))\n if counter % 1000 == 0:\n saver = tf.train.Saver(max_to_keep=5)\n saver.save(self.sess, self.checkpoint_dir + '/{}'.\n format(str(counter).zfill(7)))\n if counter % 100 == 0:\n if self.Cycle_lr:\n self.learningRateD = self.learningRateD * 0.99\n if self.learningRateD < 0.0001:\n self.learningRateD = 0.0002\n if counter % 500 == 0:\n if self.Class_weight:\n if self.la > 25:\n self.la = 25\n else:\n self.la = self.la * 1.5\n counter += 1\n",
"step-4": "from Dataload import load_anime_old, save_images, load_CelebA\nfrom Srresnet_Model import Generator_srresnet, Discriminator_srresnet\nimport tensorflow as tf\nimport numpy as np\nimport sys\n\n\nclass Skip_GAN(object):\n\n def __init__(self, sess, epoch, batch_size, dataset_name, result_dir,\n z_dim, y_dim, checkpoint_dir, num_resblock, Cycle_lr, Class_weight,\n Resnet_weight):\n self.sess = sess\n self.dataset_name = dataset_name\n self.result_dir = result_dir\n self.epoch = epoch\n self.batch_size = batch_size\n self.z_dim = z_dim\n self.y_dim = y_dim\n self.checkpoint_dir = checkpoint_dir\n self.num_resblock = num_resblock\n self.Cycle_lr = Cycle_lr\n self.Class_weight = Class_weight\n self.la = 10\n self.learningRateD = 0.0002\n self.learningRateG = 0.0002\n self.Resnet_weight = Resnet_weight\n if self.dataset_name == 'anime':\n print('loading anime .............')\n self.height = 96\n self.width = 96\n self.c_dim = 3\n self.data_X, self.data_Y = load_anime_old()\n print('self.data_X:', self.data_X.shape, 'self.data_y:', self.\n data_Y.shape)\n elif self.dataset_name == 'celebA':\n print('loading celebA ...............')\n self.height = 96\n self.width = 96\n self.c_dim = 3\n self.data_X, self.data_Y = load_CelebA()\n print('self.data_X:', self.data_X.shape, 'self.data_y:', self.\n data_Y.shape)\n else:\n print('Sorry there is no option for ', self.dataset_name)\n sys.exit()\n\n def build_model(self):\n self.y = tf.placeholder(tf.float32, [None, self.y_dim], name='y')\n self.img = tf.placeholder(tf.float32, [self.batch_size, self.height,\n self.width, 3], name='img')\n self.z = tf.placeholder(tf.float32, [None, self.z_dim])\n self.G_sample = Generator_srresnet(self.z, self.y, self.\n num_resblock, self.Resnet_weight)\n print('The return of Generator:', self.G_sample)\n D_real, C_real = Discriminator_srresnet(self.img, dataset=self.\n dataset_name)\n print('The return of Discriminator:', D_real, C_real)\n D_fake, C_fake = Discriminator_srresnet(self.G_sample, dataset=self\n .dataset_name, reuse=True)\n print('The return of Discriminator:', D_fake, C_fake)\n self.C_real_loss = tf.reduce_mean(tf.reduce_sum(tf.nn.\n sigmoid_cross_entropy_with_logits(logits=C_real, labels=self.y),\n axis=1))\n self.C_fake_loss = tf.reduce_mean(tf.reduce_sum(tf.nn.\n sigmoid_cross_entropy_with_logits(logits=C_fake, labels=self.y),\n axis=1))\n D_real_loss = tf.reduce_mean(tf.nn.\n sigmoid_cross_entropy_with_logits(logits=D_real, labels=tf.\n ones_like(D_real)))\n D_fake_loss = tf.reduce_mean(tf.nn.\n sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.\n zeros_like(D_fake)))\n \"\"\"注意 la也即是我是用动态学习率的时候要关注的参数 \n 但是我的目标是使得类别损失变得更加的大 而不是真伪的损失\"\"\"\n D_loss = D_real_loss + D_fake_loss\n self.DC_loss = self.la * D_loss + self.C_real_loss\n G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(\n logits=D_fake, labels=tf.ones_like(D_fake)))\n self.GC_loss = self.la * G_loss + self.C_fake_loss\n print('Calualtion the loss of Optimizer')\n self.theta_D = [v for v in tf.global_variables() if 'd_net' in v.name]\n self.theta_G = [v for v in tf.global_variables() if 'g_net' in v.name]\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n ):\n self.d_updates = tf.train.AdamOptimizer(self.learningRateD,\n beta1=0.5, beta2=0.9).minimize(self.DC_loss, var_list=self.\n theta_D)\n self.g_updates = tf.train.AdamOptimizer(self.learningRateG,\n beta1=0.5, beta2=0.9).minimize(self.GC_loss, var_list=self.\n theta_G)\n self.sampler = Generator_srresnet(self.y, self.z, self.num_resblock,\n self.Resnet_weight, reuse=True, train=False)\n\n def train(self):\n print('begin training ...........')\n tf.global_variables_initializer().run()\n sample_num = 64\n tot_num_samples = min(sample_num, self.batch_size)\n manifold_h = int(np.floor(np.sqrt(tot_num_samples)))\n manifold_w = int(np.floor(np.sqrt(tot_num_samples)))\n self.sample = np.random.uniform(-1, 1, size=(self.batch_size, self.\n z_dim)).astype(np.float32)\n self.sample_y = self.data_Y[0:self.batch_size]\n counter = 0\n batch_offset = 0\n data_index = np.arange(self.data_X.shape[0])\n np.random.shuffle(data_index)\n self.data_X = self.data_X[data_index, :, :, :]\n self.data_Y = self.data_Y[data_index]\n for epoch in range(self.epoch):\n if batch_offset + self.batch_size > len(self.data_X):\n batch_offset = 0\n data_index = np.arange(self.data_X.shape[0])\n np.random.shuffle(data_index)\n self.data_X = self.data_X[data_index, :, :, :]\n self.data_Y = self.data_Y[data_index]\n else:\n batch_images = self.data_X[batch_offset:batch_offset + self\n .batch_size]\n batch_codes = self.data_Y[batch_offset:batch_offset + self.\n batch_size]\n batch_z = np.random.uniform(-1, 1, [self.batch_size, self.\n z_dim]).astype(np.float32)\n for i_d_loss in range(3):\n _, d_loss = self.sess.run([self.d_updates, self.DC_loss\n ], feed_dict={self.img: batch_images, self.y:\n batch_codes, self.z: batch_z})\n for i_g_loss in range(1):\n _, g_loss, _ = self.sess.run([self.g_updates, self.\n GC_loss, self.G_sample], feed_dict={self.y:\n batch_codes, self.img: batch_images, self.z: batch_z})\n batch_offset = batch_offset + self.batch_size\n if counter % 10 == 0:\n print(\n 'Epoch: %2d counter: %5d d_loss: %.8f, g_loss: %.8f' %\n (epoch, counter, d_loss, g_loss))\n if counter % 500 == 0:\n samples = self.sess.run(self.sampler, feed_dict={self.z:\n self.sample, self.y: self.sample_y})\n save_images(samples[:manifold_h * manifold_w, :, :, :],\n [manifold_h, manifold_w], self.result_dir +\n '/{}.png'.format(str(counter).zfill(7)))\n if counter % 1000 == 0:\n saver = tf.train.Saver(max_to_keep=5)\n saver.save(self.sess, self.checkpoint_dir + '/{}'.\n format(str(counter).zfill(7)))\n if counter % 100 == 0:\n if self.Cycle_lr:\n self.learningRateD = self.learningRateD * 0.99\n if self.learningRateD < 0.0001:\n self.learningRateD = 0.0002\n if counter % 500 == 0:\n if self.Class_weight:\n if self.la > 25:\n self.la = 25\n else:\n self.la = self.la * 1.5\n counter += 1\n",
"step-5": "# -*- coding: utf-8 -*-\r\n# @Time : 2020/3/4 10:34\r\n# @Author : YYLin\r\n# @Email : 854280599@qq.com\r\n# @File : Skip_GAN.py\r\nfrom Dataload import load_anime_old, save_images, load_CelebA\r\nfrom Srresnet_Model import Generator_srresnet, Discriminator_srresnet\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport sys\r\n\r\n\r\nclass Skip_GAN(object):\r\n def __init__(self, sess, epoch, batch_size, dataset_name, result_dir, z_dim, y_dim, checkpoint_dir, num_resblock,\r\n Cycle_lr, Class_weight, Resnet_weight):\r\n self.sess = sess\r\n self.dataset_name = dataset_name\r\n self.result_dir = result_dir\r\n self.epoch = epoch\r\n self.batch_size = batch_size\r\n self.z_dim = z_dim\r\n self.y_dim = y_dim\r\n self.checkpoint_dir = checkpoint_dir\r\n self.num_resblock = num_resblock\r\n self.Cycle_lr = Cycle_lr\r\n self.Class_weight = Class_weight\r\n\r\n # La is used to increase the weight of image authenticity\r\n self.la = 10\r\n self.learningRateD = 2e-4\r\n self.learningRateG = 2e-4\r\n\r\n #\r\n self.Resnet_weight = Resnet_weight\r\n\r\n # 加载不同的数据集\r\n if self.dataset_name == 'anime':\r\n print('loading anime .............')\r\n self.height = 96\r\n self.width = 96\r\n self.c_dim = 3\r\n\r\n self.data_X, self.data_Y = load_anime_old()\r\n print('self.data_X:', self.data_X.shape, 'self.data_y:', self.data_Y.shape)\r\n\r\n elif self.dataset_name == 'celebA':\r\n print('loading celebA ...............')\r\n self.height = 96\r\n self.width = 96\r\n self.c_dim = 3\r\n\r\n self.data_X, self.data_Y = load_CelebA()\r\n print('self.data_X:', self.data_X.shape, 'self.data_y:', self.data_Y.shape)\r\n else:\r\n print('Sorry there is no option for ', self.dataset_name)\r\n sys.exit()\r\n\r\n def build_model(self):\r\n # some placeholder in our model\r\n self.y = tf.placeholder(tf.float32, [None, self.y_dim], name='y')\r\n self.img = tf.placeholder(tf.float32, [self.batch_size, self.height, self.width, 3], name='img')\r\n self.z = tf.placeholder(tf.float32, [None, self.z_dim])\r\n\r\n self.G_sample = Generator_srresnet(self.z, self.y, self.num_resblock, self.Resnet_weight)\r\n print('The return of Generator:', self.G_sample)\r\n\r\n # 识别器对真实图像进行判断\r\n D_real, C_real = Discriminator_srresnet(self.img, dataset=self.dataset_name)\r\n print('The return of Discriminator:', D_real, C_real)\r\n\r\n # 识别器对生成图像进行判断\r\n D_fake, C_fake = Discriminator_srresnet(self.G_sample, dataset=self.dataset_name, reuse=True)\r\n print('The return of Discriminator:', D_fake, C_fake)\r\n\r\n # 判断图像的类别\r\n self.C_real_loss = tf.reduce_mean(\r\n tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=C_real, labels=self.y), axis=1))\r\n self.C_fake_loss = tf.reduce_mean(\r\n tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=C_fake, labels=self.y), axis=1))\r\n\r\n # D_Loss 希望真实图像被判断为1 希望生成图像被判断为0\r\n D_real_loss = tf.reduce_mean(\r\n tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real, labels=tf.ones_like(D_real)))\r\n D_fake_loss = tf.reduce_mean(\r\n tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.zeros_like(D_fake)))\r\n\r\n '''注意 la也即是我是用动态学习率的时候要关注的参数 \r\n 但是我的目标是使得类别损失变得更加的大 而不是真伪的损失'''\r\n D_loss = D_real_loss + D_fake_loss\r\n self.DC_loss = (self.la * D_loss + self.C_real_loss)\r\n\r\n # 对生成模型的损失也在关注该模型\r\n G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake, labels=tf.ones_like(D_fake)))\r\n self.GC_loss = (self.la * G_loss + self.C_fake_loss)\r\n\r\n print('Calualtion the loss of Optimizer')\r\n self.theta_D = [v for v in tf.global_variables() if 'd_net' in v.name]\r\n self.theta_G = [v for v in tf.global_variables() if 'g_net' in v.name]\r\n\r\n with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\r\n self.d_updates = tf.train.AdamOptimizer(self.learningRateD, beta1=0.5, beta2=0.9).minimize(self.DC_loss,\r\n var_list=self.theta_D)\r\n self.g_updates = tf.train.AdamOptimizer(self.learningRateG, beta1=0.5, beta2=0.9).minimize(self.GC_loss,\r\n var_list=self.theta_G)\r\n self.sampler = Generator_srresnet(self.y, self.z, self.num_resblock, self.Resnet_weight, reuse=True, train=False)\r\n\r\n def train(self):\r\n print('begin training ...........')\r\n tf.global_variables_initializer().run()\r\n\r\n # sample_num 用于控制存储图像\r\n sample_num = 64\r\n tot_num_samples = min(sample_num, self.batch_size)\r\n manifold_h = int(np.floor(np.sqrt(tot_num_samples)))\r\n manifold_w = int(np.floor(np.sqrt(tot_num_samples)))\r\n\r\n # 定义随机噪音以及标签 2019/09/29\r\n self.sample = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim)).astype(np.float32)\r\n self.sample_y = self.data_Y[0:self.batch_size]\r\n\r\n counter = 0\r\n\r\n # shuffle the dataset 2019/9/29\r\n batch_offset = 0\r\n data_index = np.arange(self.data_X.shape[0])\r\n np.random.shuffle(data_index)\r\n self.data_X = self.data_X[data_index, :, :, :]\r\n self.data_Y = self.data_Y[data_index]\r\n\r\n # 这种方式会有使得小于batch_size个数据用不上\r\n for epoch in range(self.epoch):\r\n if batch_offset + self.batch_size > len(self.data_X):\r\n batch_offset = 0\r\n # shuffle dataset\r\n data_index = np.arange(self.data_X.shape[0])\r\n np.random.shuffle(data_index)\r\n self.data_X = self.data_X[data_index, :, :, :]\r\n self.data_Y = self.data_Y[data_index]\r\n else:\r\n # 首先是得到输入的数据\r\n batch_images = self.data_X[batch_offset:batch_offset + self.batch_size]\r\n batch_codes = self.data_Y[batch_offset:batch_offset + self.batch_size]\r\n\r\n batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_dim]).astype(np.float32)\r\n\r\n # 然后更新识别器\r\n for i_d_loss in range(3):\r\n _, d_loss = self.sess.run([self.d_updates, self.DC_loss], feed_dict={self.img: batch_images,\r\n self.y: batch_codes,\r\n self.z: batch_z})\r\n for i_g_loss in range(1):\r\n # 最后更新生成器模型\r\n _, g_loss, _ = self.sess.run([self.g_updates, self.GC_loss, self.G_sample],\r\n feed_dict={self.y: batch_codes, self.img: batch_images, self.z: batch_z})\r\n\r\n batch_offset = batch_offset + self.batch_size\r\n\r\n # display the loss every 10 steps\r\n if (counter % 10) == 0:\r\n print('Epoch: %2d counter: %5d d_loss: %.8f, g_loss: %.8f' % (epoch, counter, d_loss, g_loss))\r\n\r\n # save image every 500 steps\r\n if counter % 500 == 0:\r\n samples = self.sess.run(self.sampler,\r\n feed_dict={self.z: self.sample, self.y: self.sample_y})\r\n\r\n save_images(samples[:manifold_h * manifold_w, :, :, :], [manifold_h, manifold_w],\r\n self.result_dir + '/{}.png'.format(str(counter).zfill(7)))\r\n\r\n # save the model every 1000 steps\r\n if counter % 1000 == 0:\r\n saver = tf.train.Saver(max_to_keep=5)\r\n saver.save(self.sess, self.checkpoint_dir + '/{}'.format(str(counter).zfill(7)))\r\n\r\n if (counter % 100) == 0:\r\n if self.Cycle_lr:\r\n self.learningRateD = self.learningRateD * 0.99\r\n if self.learningRateD < 0.0001:\r\n self.learningRateD = 2e-4\r\n\r\n if (counter % 500) == 0:\r\n if self.Class_weight:\r\n if self.la > 25:\r\n self.la = 25\r\n else:\r\n self.la = self.la * 1.5\r\n\r\n counter += 1\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
import os
import tempfile
import shutil
import math
import sys
import subprocess
from irank.config import IrankOptionParser, IrankApp
from irank import db as irank_db
STATUS = 0
def main():
p = IrankOptionParser('%prog -d DEST playlist_name [playlist_name ...]')
p.add_option('-d', '--dest', help='export destination', default=None)
p.add_option('-l', '--limit', type="int", help='per-playlist filesize limit', default=None)
p.add_option('--no-checksum', dest='checksum', action="store_false", default=True)
p.add_option('-i', '--interactive', action='store_true', help='Interactively resolve errors')
p.add_option('--rsync-opt', dest='rsync_opts', action='append', default=[], help='Add rsync option (can be used multiple times)')
opts, args = p.parse_args()
assert opts.dest, p.get_usage()
assert len(args) > 0, p.get_usage()
app = IrankApp(opts)
music_base = os.path.expanduser(opts.music)
irank_base = os.path.expanduser(opts.irank)
export_base = os.path.expanduser(opts.dest)
export_music = export_base # Used to be __music, but android 4+ doesn't like sub-folders
songs = {}
all_songs = set()
# we use hard-links, so the export_temp must be on the same device as our music!
# export_temp = tempfile.mkdtemp(prefix='irank-export-')
export_temp = os.path.join(irank_base, "__export_temp")
if os.path.exists(export_temp):
shutil.rmtree(export_temp)
else:
os.makedirs(export_temp)
shutil.copy(
os.path.join(irank_base, "irank.sqlite"),
os.path.join(export_temp, "irank.sqlite")
)
try:
for playlist in args:
playlist_songs = set(app.songs_for(playlist, relative=True))
songs[playlist] = playlist_songs
all_songs.update(playlist_songs)
write_m3u(export_temp, playlist, sorted(playlist_songs))
print "Generated playlist %s: %s files" % (playlist, len(playlist_songs))
print "linking into %r ..." % (export_temp,)
total_size = link_all_files(all_songs, export_temp=export_temp, music_base=music_base, limit=opts.limit)
print "Syncing %s files (%0.2fgb)" % (len(all_songs),total_size / (math.pow(1000, 3)))
extra_sync_opts = []
syncing = True
while syncing:
try:
sync(export_temp, export_music, additional_opts=opts.rsync_opts + extra_sync_opts, checksum=opts.checksum)
break
except (subprocess.CalledProcessError, OSError) as e:
if not opts.interactive:
raise
print >> sys.stderr, "Error syncing: %s\n" % (e,)
while True:
print >> sys.stderr, "Press Ctrl-C to abort, <return> to restart, 'k' to retry (skipping existing files) and 's' to skip to next step"
result = raw_input().strip().lower()
if result == 'k':
extra_sync_opts = ['--ignore-existing']
break
elif result == '':
extra_sync_opts = []
break
elif result == 's':
syncing = False
break
else:
print >> sys.stderr, "Eh?"
finally:
shutil.rmtree(export_temp)
def link_all_files(all_songs, export_temp, music_base, limit=None):
total_size = 0
def file_size(path):
try:
return os.stat(path).st_size
except OSError:
print >> sys.stderr, "couldn't get file size of file: %s" % (path,)
return None
for file in all_songs:
#if not os.path.isdir(os.path.dirname(
src_file = os.path.join(music_base, file)
src_file_size = file_size(src_file)
if src_file_size is None:
continue
if limit and (total_size + src_file_size) > limit:
return total_size
else:
total_size += src_file_size
link_dest = os.path.join(export_temp, file)
link_dest_dir = os.path.dirname(link_dest)
if not os.path.isdir(link_dest_dir):
os.makedirs(link_dest_dir)
os.link(src_file, link_dest)
return total_size
def sync(src, dest, additional_opts=[], checksum=True):
cmd = [
'rsync',
#'-n',
'--progress',
'--modify-window=5',
'-r',
#'-v',
'--delete-before']
if checksum:
cmd.append('-c')
cmd = cmd + additional_opts + [src + os.path.sep, dest]
print "running: %r" % (cmd,)
subprocess.check_call(cmd, stdin=subprocess.PIPE)
def write_m3u(dest, name, files):
global STATUS
encoding = sys.getfilesystemencoding()
with open(os.path.join(dest, name + '.m3u'), 'w') as output:
for name in files:
try:
print >> output, name.encode(encoding)
except (UnicodeEncodeError, UnicodeDecodeError) as err:
print "FAILED to write song: %r" % (name,)
STATUS = 1
if __name__ == '__main__':
main()
sys.exit(STATUS)
|
normal
|
{
"blob_id": "df64d769ffba8cddac34282a526122e3c941249d",
"index": 245,
"step-1": "#!/usr/bin/env python\nimport os\nimport tempfile\nimport shutil\nimport math\nimport sys\nimport subprocess\n\nfrom irank.config import IrankOptionParser, IrankApp\nfrom irank import db as irank_db\nSTATUS = 0\n\ndef main():\n\tp = IrankOptionParser('%prog -d DEST playlist_name [playlist_name ...]')\n\tp.add_option('-d', '--dest', help='export destination', default=None)\n\tp.add_option('-l', '--limit', type=\"int\", help='per-playlist filesize limit', default=None)\n\tp.add_option('--no-checksum', dest='checksum', action=\"store_false\", default=True)\n\tp.add_option('-i', '--interactive', action='store_true', help='Interactively resolve errors')\n\tp.add_option('--rsync-opt', dest='rsync_opts', action='append', default=[], help='Add rsync option (can be used multiple times)')\n\topts, args = p.parse_args()\n\tassert opts.dest, p.get_usage()\n\tassert len(args) > 0, p.get_usage()\n\tapp = IrankApp(opts)\n\n\tmusic_base = os.path.expanduser(opts.music)\n\tirank_base = os.path.expanduser(opts.irank)\n\texport_base = os.path.expanduser(opts.dest)\n\texport_music = export_base # Used to be __music, but android 4+ doesn't like sub-folders\n\tsongs = {}\n\tall_songs = set()\n\n\t# we use hard-links, so the export_temp must be on the same device as our music!\n\t# export_temp = tempfile.mkdtemp(prefix='irank-export-')\n\texport_temp = os.path.join(irank_base, \"__export_temp\")\n\tif os.path.exists(export_temp):\n\t\tshutil.rmtree(export_temp)\n\telse:\n\t\tos.makedirs(export_temp)\n\t\n\tshutil.copy(\n\t\tos.path.join(irank_base, \"irank.sqlite\"),\n\t\tos.path.join(export_temp, \"irank.sqlite\")\n\t)\n\n\ttry:\n\t\tfor playlist in args:\n\t\t\tplaylist_songs = set(app.songs_for(playlist, relative=True))\n\t\t\tsongs[playlist] = playlist_songs\n\t\t\tall_songs.update(playlist_songs)\n\t\t\twrite_m3u(export_temp, playlist, sorted(playlist_songs))\n\t\t\tprint \"Generated playlist %s: %s files\" % (playlist, len(playlist_songs))\n\n\t\tprint \"linking into %r ...\" % (export_temp,)\n\t\ttotal_size = link_all_files(all_songs, export_temp=export_temp, music_base=music_base, limit=opts.limit)\n\n\t\tprint \"Syncing %s files (%0.2fgb)\" % (len(all_songs),total_size / (math.pow(1000, 3)))\n\t\textra_sync_opts = []\n\t\tsyncing = True\n\t\twhile syncing:\n\t\t\ttry:\n\t\t\t\tsync(export_temp, export_music, additional_opts=opts.rsync_opts + extra_sync_opts, checksum=opts.checksum)\n\t\t\t\tbreak\n\t\t\texcept (subprocess.CalledProcessError, OSError) as e:\n\t\t\t\tif not opts.interactive:\n\t\t\t\t\traise\n\t\t\t\tprint >> sys.stderr, \"Error syncing: %s\\n\" % (e,)\n\t\t\t\twhile True:\n\t\t\t\t\tprint >> sys.stderr, \"Press Ctrl-C to abort, <return> to restart, 'k' to retry (skipping existing files) and 's' to skip to next step\"\n\t\t\t\t\tresult = raw_input().strip().lower()\n\t\t\t\t\tif result == 'k':\n\t\t\t\t\t\textra_sync_opts = ['--ignore-existing']\n\t\t\t\t\t\tbreak\n\t\t\t\t\telif result == '':\n\t\t\t\t\t\textra_sync_opts = []\n\t\t\t\t\t\tbreak\n\t\t\t\t\telif result == 's':\n\t\t\t\t\t\tsyncing = False\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint >> sys.stderr, \"Eh?\"\n\tfinally:\n\t\tshutil.rmtree(export_temp)\n\ndef link_all_files(all_songs, export_temp, music_base, limit=None):\n\ttotal_size = 0\n\tdef file_size(path):\n\t\ttry:\n\t\t\treturn os.stat(path).st_size\n\t\texcept OSError:\n\t\t\tprint >> sys.stderr, \"couldn't get file size of file: %s\" % (path,)\n\t\treturn None\n\n\tfor file in all_songs:\n\t\t#if not os.path.isdir(os.path.dirname(\n\t\tsrc_file = os.path.join(music_base, file)\n\t\tsrc_file_size = file_size(src_file)\n\t\tif src_file_size is None:\n\t\t\tcontinue\n\t\tif limit and (total_size + src_file_size) > limit:\n\t\t\treturn total_size\n\t\telse:\n\t\t\ttotal_size += src_file_size\n\n\t\tlink_dest = os.path.join(export_temp, file)\n\t\tlink_dest_dir = os.path.dirname(link_dest)\n\t\tif not os.path.isdir(link_dest_dir):\n\t\t\tos.makedirs(link_dest_dir)\n\t\tos.link(src_file, link_dest)\n\treturn total_size\n\ndef sync(src, dest, additional_opts=[], checksum=True):\n\tcmd = [\n\t\t'rsync',\n\t\t#'-n',\n\t\t'--progress',\n\t\t'--modify-window=5',\n\t\t'-r',\n\t\t#'-v',\n\t\t'--delete-before']\n\tif checksum:\n\t\tcmd.append('-c')\n\tcmd = cmd + additional_opts + [src + os.path.sep, dest]\n\tprint \"running: %r\" % (cmd,)\n\tsubprocess.check_call(cmd, stdin=subprocess.PIPE)\n\ndef write_m3u(dest, name, files):\n\tglobal STATUS\n\tencoding = sys.getfilesystemencoding()\n\twith open(os.path.join(dest, name + '.m3u'), 'w') as output:\n\t\tfor name in files:\n\t\t\ttry:\n\t\t\t\tprint >> output, name.encode(encoding)\n\t\t\texcept (UnicodeEncodeError, UnicodeDecodeError) as err:\n\t\t\t\tprint \"FAILED to write song: %r\" % (name,)\n\t\t\t\tSTATUS = 1\n\nif __name__ == '__main__':\n\tmain()\n\tsys.exit(STATUS)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import src.engine.functions.root_analyzer.main as main
from src.engine.functions.function import Function
class GetRootData(Function):
def __init__(self, data_display):
self.data_display = data_display
def call(self, args):
image_folder_path = args[0]
output_path = args[1]
self.data_display.clear()
data = main.generate_data(image_folder_path, self.data_display.data_tracker)
error_message = self.data_display.display_data(data)
return ""
|
normal
|
{
"blob_id": "e8ea307352805bf0b5129e2ad7f7b68c44e78fc9",
"index": 9118,
"step-1": "<mask token>\n\n\nclass GetRootData(Function):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GetRootData(Function):\n\n def __init__(self, data_display):\n self.data_display = data_display\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass GetRootData(Function):\n\n def __init__(self, data_display):\n self.data_display = data_display\n\n def call(self, args):\n image_folder_path = args[0]\n output_path = args[1]\n self.data_display.clear()\n data = main.generate_data(image_folder_path, self.data_display.\n data_tracker)\n error_message = self.data_display.display_data(data)\n return ''\n",
"step-4": "import src.engine.functions.root_analyzer.main as main\nfrom src.engine.functions.function import Function\n\n\nclass GetRootData(Function):\n\n def __init__(self, data_display):\n self.data_display = data_display\n\n def call(self, args):\n image_folder_path = args[0]\n output_path = args[1]\n self.data_display.clear()\n data = main.generate_data(image_folder_path, self.data_display.\n data_tracker)\n error_message = self.data_display.display_data(data)\n return ''\n",
"step-5": "import src.engine.functions.root_analyzer.main as main\nfrom src.engine.functions.function import Function\n\nclass GetRootData(Function):\n\n def __init__(self, data_display):\n self.data_display = data_display\n\n def call(self, args):\n image_folder_path = args[0]\n output_path = args[1]\n self.data_display.clear()\n data = main.generate_data(image_folder_path, self.data_display.data_tracker)\n error_message = self.data_display.display_data(data)\n return \"\"\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: GB18030 -*-
import inspect
import os,sys
import subprocess
from lib.common.utils import *
from lib.common.loger import loger
from lib.common import checker
from lib.common.logreader import LogReader
import shutil
from lib.common.XmlHandler import *
from lib.common.Dict import *
class baseModule(object):
def __init__(self):
self.sys = Shell_System()
self.path =None
#模块bin 路径
self.bin_path = None
#模块配置路径
self.conf_path = None
#模块字典路径
self.dict_path = None
#log路径
self.log_path = None
#用于存储被分配得到的端口
self.port=[]
#用于表示本模块需要设置的端口数目
self.port_num = 0
#用于表示模块名
self.type=None
#是否进行conf 备份flag
self.conf_bak_flag = False
#是否进行dict备份
self.dict_back_flag = False
#以下变量根据需要在各个module中初始化
#notice 日志名称
self.ntlogname = None
#WF日志名称
self.wflogname = None
self.nt_logreader = None
self.wf_logreader = None
def add_relation(self,module):
"""
@note: 参数传递的是已经生成的其他module的实例
具体关联关系的建立
"""
self.module_rel_set.append(module)
loger.info("Topology is %s ----> %s",self.type,getattr(module,"type"))
return 0
def build_relation(self):
"""
@note: 如果有下游模块必须实现改方法
建本模块和下游模块关系
"""
pass
def get_port(self):
"""
@note: 返回本模块申请的端口list
"""
return self.port
def set_listen_port(self):
"""
@note:各模块实现设置对用的conf
"""
pass
def start(self):
"""
@note: 启动模块
注意可通过端口或进程是否存在判断是否启动成功
checker.check_process_exist(processpath)
checker.check_port_exist(port)
"""
pass
def stop(self):
"""
@note:停止运行
默认通过self.bin_path实现
"""
if self.bin_path <> None and os.path.exists(self.bin_path):
kill_process(self.bin_path)
loger.debug("kill process %s"%(self.bin_path))
else:
loger.warning("module [%s] has not bin_path!"%(self.type))
def bak_or_revert_env(self):
"""
@note:根据bakflag 进行bak 操作
默认进行两项bak conf dict
如果path.robotbak不存在,则将path备份
- 如果path.dtsbak存在,则用path.robotbak覆盖path
"""
#清理log目录
if self.log_path is not None:
cmd = "rm -rf " + self.log_path
loger.debug(cmd)
self.sys.shell(cmd)
# 重命名core
rename_cores(self.path)
#备份恢复conf
if self.conf_bak_flag:
bak_or_revert(self.conf_path)
#备份恢复dict
if self.dict_back_flag:
bak_or_revert(self.dict_path)
return 0
def __conf_op(self, optype, confid, k, v=None):
"""
@note: 封装 获取,删除、设置3种conf操作方法
optype为操作类型 0:设置、1:获取、2:删除
对外接口由 set_conf、get_conf、delete_conf
"""
if self.path is None:
raise AssertionError("get modulepath error[%s]"%(self.path))
path, seg = getconfitem(self.path, self.type, confid)
if path is None:
raise AssertionError("set conf error[%s][%s][%s][%s]"%(self.type, confid, k , v))
conf = UbConfigParser(path, seg)
if optype == 0:
conf.set(k , v)
return
if optype == 1:
return conf.get(k)
if optype == 2:
conf.delete(k)
return
def set_conf(self, confid, k, v):
"""
@note:设置conf
confid为conf.xml中注册id
"""
return self.__conf_op(0, confid, str(k), str(v))
def get_conf(self, confid, k):
return self.__conf_op(1, confid, str(k))
def delete_conf(self, confid, k):
return self.__conf_op(2, confid, str(k))
def set_dict(self, dictid, *line_item):
"""
@note:设置字典数据 将数据设置进不同的列中
"""
path, seg = getdictitem(self.type, dictid)
real_path = os.path.join(self.path, path)
dicth = DictHandler(real_path, seg)
dicth.set_dict(line_item)
def clear_dict(self, dictid):
"""
@note:清理字典
"""
path, seg = getdictitem(self.type, dictid)
real_path = os.path.join(self.path, path)
dicth = DictHandler(self, real_path, seg)
dicth.clear_dict()
#以下接口为测试接口
def check_notice_log_has(self, regex):
"""
@note:检查 notice log中是否包含某项
regex为匹配正则表达式
return: 包含返回True、否则为False
"""
if self.nt_logreader == None:
nt_log_path = os.path.join(self.log_path, self.ntlogname)
self.nt_logreader = LogReader(nt_log_path)
return checker.check_log_contain(self.nt_logreader,regex)
def check_wf_log_has(self, regex):
"""
检查wf日志包含某项
regex为匹配正则表达式
return: 包含返回True、否则为False
"""
if self.wf_logreader == None:
wf_log_path = os.path.join(self.log_path, self.wflogname)
self.wf_logreader = LogReader(wf_log_path)
return checker.check_log_contain(self.wf_logreader, regex)
def check_fatal(self):
"""
@note:检查结果中是否包含fatal
return: 包含fatal 返回 True, 否则返回false
"""
regex="^FATAL.*"
return self.check_wf_log_has(regex)
def set_req(self, reqresjs=None, *agrs):
"""
@note:设置请求
注意不是字典设置
"""
pass
def set_res():
"""
@note:设置返回
"""
pass
def common_check(self):
"""
通用commoncheck接口
该接口无传入参数
一般用作fatal、core等检查
"""
#将log打印出
if self.nt_logreader == None:
nt_log_path = os.path.join(self.log_path, self.ntlogname)
self.nt_logreader = LogReader(nt_log_path)
if self.wf_logreader == None:
wf_log_path = os.path.join(self.log_path, self.wflogname)
self.wf_logreader = LogReader(wf_log_path)
loger.diagnose("Module[%s] wf logs:\n%s"%(self.type, self.wf_logreader.read_fatal_and_last_lines(10)))
loger.diagnose("Module[%s] notice logs:\n%s"%(self.type, self.nt_logreader.read_last_lines(10)))
#检查core
log_cores(self.path)
#检查FATAL
if self.check_fatal():
raise AssertionError("There FATAL in module[%s]"%(self.type))
def check(self, checkjs=None):
"""
@note:check接口
"""
pass
def reqdata(self):
'''
@note: 将各个模块的req形成json赋值给内部变量
'''
pass
def get_used_port(self):
"""
@note:获得该模块所在机器的空闲端口号
"""
used_port_list = self.sys.shell("netstat -na 2>/dev/null|grep \":\"|awk -F \"[ :]\" '{print $17}'",output = "true")[1].splitlines()
return used_port_list
def test_system():
"单元测试"
npatSys = Shell_System()
npatSys.shell("echo '12345' > a.txt")
npatSys.shell("rm b.txt")
npatSys.shell("cat a.txt b.txt", output = True)
npatSys.shell("ttt")
npatSys.shell("ttt", output = True)
used_port_list = npatSys.shell("netstat -na 2>/dev/null|grep \":\"|awk -F \"[ :]\" '{print $17}'",output = "true")[1].splitlines()
print used_port_list
if __name__ == '__main__':
mm = baseModule()
print type(mm.sys)
|
normal
|
{
"blob_id": "a74d27d9e31872100b4f22512abe9de7d9277de7",
"index": 2970,
"step-1": "# -*- coding: GB18030 -*-\nimport inspect\nimport os,sys\nimport subprocess\nfrom lib.common.utils import *\nfrom lib.common.loger import loger\nfrom lib.common import checker\nfrom lib.common.logreader import LogReader\nimport shutil\nfrom lib.common.XmlHandler import *\nfrom lib.common.Dict import *\n\nclass baseModule(object):\n def __init__(self):\n self.sys = Shell_System()\n self.path =None\n #模块bin 路径\n self.bin_path = None\n #模块配置路径\n self.conf_path = None\n #模块字典路径\n self.dict_path = None\n #log路径\n self.log_path = None\n #用于存储被分配得到的端口\n self.port=[]\n #用于表示本模块需要设置的端口数目\n self.port_num = 0\n #用于表示模块名\n self.type=None\n #是否进行conf 备份flag\n self.conf_bak_flag = False\n #是否进行dict备份\n self.dict_back_flag = False\n #以下变量根据需要在各个module中初始化\n #notice 日志名称\n self.ntlogname = None\n #WF日志名称\n self.wflogname = None\n self.nt_logreader = None\n self.wf_logreader = None\n \n def add_relation(self,module):\n \"\"\"\n @note: 参数传递的是已经生成的其他module的实例\n 具体关联关系的建立\n \"\"\"\n self.module_rel_set.append(module)\n loger.info(\"Topology is %s ----> %s\",self.type,getattr(module,\"type\"))\n return 0\n\n def build_relation(self):\n \"\"\"\n @note: 如果有下游模块必须实现改方法\n 建本模块和下游模块关系\n \"\"\"\n pass\n \n def get_port(self):\n \"\"\"\n @note: 返回本模块申请的端口list\n \"\"\"\n return self.port\n\n def set_listen_port(self):\n \"\"\"\n @note:各模块实现设置对用的conf\n \"\"\"\n pass\n\n def start(self):\n \"\"\"\n @note: 启动模块\n 注意可通过端口或进程是否存在判断是否启动成功\n checker.check_process_exist(processpath)\n checker.check_port_exist(port)\n \"\"\"\n pass\n\n def stop(self):\n \"\"\"\n @note:停止运行\n 默认通过self.bin_path实现\n \"\"\"\n if self.bin_path <> None and os.path.exists(self.bin_path):\n kill_process(self.bin_path)\n loger.debug(\"kill process %s\"%(self.bin_path))\n else:\n loger.warning(\"module [%s] has not bin_path!\"%(self.type))\n\n def bak_or_revert_env(self):\n \"\"\"\n @note:根据bakflag 进行bak 操作\n 默认进行两项bak conf dict\n 如果path.robotbak不存在,则将path备份\n - 如果path.dtsbak存在,则用path.robotbak覆盖path\n \"\"\"\n #清理log目录\n if self.log_path is not None:\n cmd = \"rm -rf \" + self.log_path\n loger.debug(cmd)\n self.sys.shell(cmd)\n # 重命名core\n rename_cores(self.path)\n #备份恢复conf\n if self.conf_bak_flag:\n bak_or_revert(self.conf_path)\n #备份恢复dict\n if self.dict_back_flag:\n bak_or_revert(self.dict_path)\n return 0\n \n def __conf_op(self, optype, confid, k, v=None):\n \"\"\"\n @note: 封装 获取,删除、设置3种conf操作方法\n optype为操作类型 0:设置、1:获取、2:删除\n 对外接口由 set_conf、get_conf、delete_conf\n \"\"\"\n if self.path is None:\n raise AssertionError(\"get modulepath error[%s]\"%(self.path))\n path, seg = getconfitem(self.path, self.type, confid)\n if path is None:\n raise AssertionError(\"set conf error[%s][%s][%s][%s]\"%(self.type, confid, k , v))\n conf = UbConfigParser(path, seg)\n if optype == 0:\n conf.set(k , v)\n return \n if optype == 1:\n return conf.get(k)\n if optype == 2:\n conf.delete(k)\n return\n \n def set_conf(self, confid, k, v):\n \"\"\"\n @note:设置conf\n confid为conf.xml中注册id\n \"\"\"\n return self.__conf_op(0, confid, str(k), str(v)) \n\n def get_conf(self, confid, k):\n return self.__conf_op(1, confid, str(k))\n\n def delete_conf(self, confid, k):\n return self.__conf_op(2, confid, str(k))\n \n def set_dict(self, dictid, *line_item):\n \"\"\"\n @note:设置字典数据 将数据设置进不同的列中\n \"\"\"\n path, seg = getdictitem(self.type, dictid) \n real_path = os.path.join(self.path, path)\n dicth = DictHandler(real_path, seg)\n dicth.set_dict(line_item)\n\n def clear_dict(self, dictid):\n \"\"\"\n @note:清理字典\n \"\"\"\n path, seg = getdictitem(self.type, dictid) \n real_path = os.path.join(self.path, path)\n dicth = DictHandler(self, real_path, seg)\n dicth.clear_dict()\n\n #以下接口为测试接口\n def check_notice_log_has(self, regex):\n \"\"\"\n @note:检查 notice log中是否包含某项\n regex为匹配正则表达式\n return: 包含返回True、否则为False \n \"\"\"\n if self.nt_logreader == None:\n nt_log_path = os.path.join(self.log_path, self.ntlogname)\n self.nt_logreader = LogReader(nt_log_path)\n return checker.check_log_contain(self.nt_logreader,regex)\n \n def check_wf_log_has(self, regex):\n \"\"\"\n 检查wf日志包含某项\n regex为匹配正则表达式\n return: 包含返回True、否则为False \n \"\"\"\n if self.wf_logreader == None:\n wf_log_path = os.path.join(self.log_path, self.wflogname)\n self.wf_logreader = LogReader(wf_log_path)\n return checker.check_log_contain(self.wf_logreader, regex)\n \n def check_fatal(self):\n \"\"\"\n @note:检查结果中是否包含fatal\n return: 包含fatal 返回 True, 否则返回false\n \"\"\"\n regex=\"^FATAL.*\"\n return self.check_wf_log_has(regex)\n\n \n def set_req(self, reqresjs=None, *agrs):\n \"\"\"\n @note:设置请求\n 注意不是字典设置\n \"\"\"\n pass\n\n def set_res():\n \"\"\"\n @note:设置返回\n \"\"\"\n pass\n\n def common_check(self):\n \"\"\"\n 通用commoncheck接口\n 该接口无传入参数\n 一般用作fatal、core等检查\n \"\"\"\n #将log打印出\n if self.nt_logreader == None:\n nt_log_path = os.path.join(self.log_path, self.ntlogname)\n self.nt_logreader = LogReader(nt_log_path)\n if self.wf_logreader == None:\n wf_log_path = os.path.join(self.log_path, self.wflogname)\n self.wf_logreader = LogReader(wf_log_path)\n loger.diagnose(\"Module[%s] wf logs:\\n%s\"%(self.type, self.wf_logreader.read_fatal_and_last_lines(10)))\n loger.diagnose(\"Module[%s] notice logs:\\n%s\"%(self.type, self.nt_logreader.read_last_lines(10)))\n #检查core\n log_cores(self.path)\n #检查FATAL\n if self.check_fatal():\n raise AssertionError(\"There FATAL in module[%s]\"%(self.type))\n \n def check(self, checkjs=None):\n \"\"\"\n @note:check接口\n \"\"\"\n pass\n \n def reqdata(self):\n '''\n @note: 将各个模块的req形成json赋值给内部变量\n '''\n pass\n\n def get_used_port(self):\n \"\"\"\n @note:获得该模块所在机器的空闲端口号 \n \"\"\"\n used_port_list = self.sys.shell(\"netstat -na 2>/dev/null|grep \\\":\\\"|awk -F \\\"[ :]\\\" '{print $17}'\",output = \"true\")[1].splitlines()\n return used_port_list\n\ndef test_system():\n \"单元测试\"\n npatSys = Shell_System()\n npatSys.shell(\"echo '12345' > a.txt\")\n npatSys.shell(\"rm b.txt\")\n npatSys.shell(\"cat a.txt b.txt\", output = True)\n npatSys.shell(\"ttt\")\n npatSys.shell(\"ttt\", output = True)\n used_port_list = npatSys.shell(\"netstat -na 2>/dev/null|grep \\\":\\\"|awk -F \\\"[ :]\\\" '{print $17}'\",output = \"true\")[1].splitlines()\n print used_port_list\n\nif __name__ == '__main__':\n mm = baseModule()\n print type(mm.sys)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pytesseract
from PIL import Image
import tensorflow as tf
from keras.models import load_model
from tensorflow import Graph
import os
import json
import cv2
import numpy as np
global class_graph
def classify(img, c_model):
#global class_graph
""" classifies images in a given folder using the 'model'"""
#img = load_img(im_path,target_size=(input_height, input_width))
#img = img_to_array(img)
im_size = 128
# resize
img = cv2.resize(img, (im_size,im_size))
img = img.astype("float") / 255.0
img = np.expand_dims(img, axis=0)
with class_graph.as_default():
predictions = c_model.predict(img)[0]
return predictions
if __name__ == '__main__':
im_name = "data/demo/images(1).jpg"
# load model
model_path = "data/credit-card.model"
class_model = load_model(model_path)
class_graph=tf.get_default_graph()
crop_img = cv2.imread(im_name)
predictions = classify(crop_img, class_model)
print(predictions)
|
normal
|
{
"blob_id": "c7d51f6448400af5630bdc0c29493320af88288e",
"index": 7424,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef classify(img, c_model):\n \"\"\" classifies images in a given folder using the 'model'\"\"\"\n im_size = 128\n img = cv2.resize(img, (im_size, im_size))\n img = img.astype('float') / 255.0\n img = np.expand_dims(img, axis=0)\n with class_graph.as_default():\n predictions = c_model.predict(img)[0]\n return predictions\n\n\n<mask token>\n",
"step-3": "<mask token>\nglobal class_graph\n\n\ndef classify(img, c_model):\n \"\"\" classifies images in a given folder using the 'model'\"\"\"\n im_size = 128\n img = cv2.resize(img, (im_size, im_size))\n img = img.astype('float') / 255.0\n img = np.expand_dims(img, axis=0)\n with class_graph.as_default():\n predictions = c_model.predict(img)[0]\n return predictions\n\n\nif __name__ == '__main__':\n im_name = 'data/demo/images(1).jpg'\n model_path = 'data/credit-card.model'\n class_model = load_model(model_path)\n class_graph = tf.get_default_graph()\n crop_img = cv2.imread(im_name)\n predictions = classify(crop_img, class_model)\n print(predictions)\n",
"step-4": "import pytesseract\nfrom PIL import Image\nimport tensorflow as tf\nfrom keras.models import load_model\nfrom tensorflow import Graph\nimport os\nimport json\nimport cv2\nimport numpy as np\nglobal class_graph\n\n\ndef classify(img, c_model):\n \"\"\" classifies images in a given folder using the 'model'\"\"\"\n im_size = 128\n img = cv2.resize(img, (im_size, im_size))\n img = img.astype('float') / 255.0\n img = np.expand_dims(img, axis=0)\n with class_graph.as_default():\n predictions = c_model.predict(img)[0]\n return predictions\n\n\nif __name__ == '__main__':\n im_name = 'data/demo/images(1).jpg'\n model_path = 'data/credit-card.model'\n class_model = load_model(model_path)\n class_graph = tf.get_default_graph()\n crop_img = cv2.imread(im_name)\n predictions = classify(crop_img, class_model)\n print(predictions)\n",
"step-5": "import pytesseract\nfrom PIL import Image\nimport tensorflow as tf\n\nfrom keras.models import load_model\nfrom tensorflow import Graph\n\nimport os\nimport json\nimport cv2\nimport numpy as np\n\nglobal class_graph\n\n\n\n\ndef classify(img, c_model):\n #global class_graph\n \"\"\" classifies images in a given folder using the 'model'\"\"\"\n\n #img = load_img(im_path,target_size=(input_height, input_width))\n #img = img_to_array(img)\n im_size = 128\n # resize \n\n img = cv2.resize(img, (im_size,im_size))\n\n img = img.astype(\"float\") / 255.0\n img = np.expand_dims(img, axis=0)\n with class_graph.as_default():\n predictions = c_model.predict(img)[0]\n\n return predictions\n\nif __name__ == '__main__':\n im_name = \"data/demo/images(1).jpg\"\n # load model\n model_path = \"data/credit-card.model\"\n class_model = load_model(model_path)\n\n class_graph=tf.get_default_graph()\n\n\n crop_img = cv2.imread(im_name)\n\n predictions = classify(crop_img, class_model)\n print(predictions)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sublime
import sublime_plugin
class PromptSurrounderCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.show_input_panel("Surround by:", "", self.on_done, None, None)
def on_done(self, tag):
try:
if self.window.active_view():
self.window.active_view().run_command("surround_by", {"tag": tag})
except ValueError:
print('hi')
class SurroundByCommand(sublime_plugin.TextCommand):
def run(self, edit, tag):
for region in self.view.sel():
text = self.view.substr(region)
self.view.replace(edit,region,"<"+tag+">"+text+"</"+tag.split()[0]+">")
|
normal
|
{
"blob_id": "bcc4276ea240247519cabbf5fc5646a9147ee3be",
"index": 545,
"step-1": "<mask token>\n\n\nclass SurroundByCommand(sublime_plugin.TextCommand):\n\n def run(self, edit, tag):\n for region in self.view.sel():\n text = self.view.substr(region)\n self.view.replace(edit, region, '<' + tag + '>' + text + '</' +\n tag.split()[0] + '>')\n",
"step-2": "<mask token>\n\n\nclass PromptSurrounderCommand(sublime_plugin.WindowCommand):\n <mask token>\n <mask token>\n\n\nclass SurroundByCommand(sublime_plugin.TextCommand):\n\n def run(self, edit, tag):\n for region in self.view.sel():\n text = self.view.substr(region)\n self.view.replace(edit, region, '<' + tag + '>' + text + '</' +\n tag.split()[0] + '>')\n",
"step-3": "<mask token>\n\n\nclass PromptSurrounderCommand(sublime_plugin.WindowCommand):\n\n def run(self):\n self.window.show_input_panel('Surround by:', '', self.on_done, None,\n None)\n\n def on_done(self, tag):\n try:\n if self.window.active_view():\n self.window.active_view().run_command('surround_by', {'tag':\n tag})\n except ValueError:\n print('hi')\n\n\nclass SurroundByCommand(sublime_plugin.TextCommand):\n\n def run(self, edit, tag):\n for region in self.view.sel():\n text = self.view.substr(region)\n self.view.replace(edit, region, '<' + tag + '>' + text + '</' +\n tag.split()[0] + '>')\n",
"step-4": "import sublime\nimport sublime_plugin\n\n\nclass PromptSurrounderCommand(sublime_plugin.WindowCommand):\n\n def run(self):\n self.window.show_input_panel('Surround by:', '', self.on_done, None,\n None)\n\n def on_done(self, tag):\n try:\n if self.window.active_view():\n self.window.active_view().run_command('surround_by', {'tag':\n tag})\n except ValueError:\n print('hi')\n\n\nclass SurroundByCommand(sublime_plugin.TextCommand):\n\n def run(self, edit, tag):\n for region in self.view.sel():\n text = self.view.substr(region)\n self.view.replace(edit, region, '<' + tag + '>' + text + '</' +\n tag.split()[0] + '>')\n",
"step-5": "import sublime\nimport sublime_plugin\n\nclass PromptSurrounderCommand(sublime_plugin.WindowCommand):\n def run(self):\n self.window.show_input_panel(\"Surround by:\", \"\", self.on_done, None, None)\n\n def on_done(self, tag):\n try:\n if self.window.active_view():\n self.window.active_view().run_command(\"surround_by\", {\"tag\": tag})\n except ValueError:\n print('hi')\n\n\nclass SurroundByCommand(sublime_plugin.TextCommand):\n\tdef run(self, edit, tag):\n\t\tfor region in self.view.sel():\n\t\t\ttext = self.view.substr(region)\n\t\t\tself.view.replace(edit,region,\"<\"+tag+\">\"+text+\"</\"+tag.split()[0]+\">\")\n\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
# %matplotlib inline
import tensorflow as tf
#import tensorflow.keras as K
import numpy as np
import math
import matplotlib
matplotlib.use('GTKAgg')
import matplotlib.pyplot as plt
# from keras import backend as K
from keras.models import Sequential, load_model
# from K.models import Sequential, load_model
from keras.layers import InputLayer, Input, Dense, Dropout
from keras.callbacks import TensorBoard
from keras.optimizers import Adam
from keras.backend import clear_session
## pip install h5py scikit-optimize
## once you have that installed, you can run the following code.
import skopt
from skopt import gp_minimize, forest_minimize
from skopt.space import Real, Categorical, Integer
matplotlib.use('GTKAgg')
from skopt.plots import plot_convergence
matplotlib.use('GTKAgg')
from skopt.plots import plot_objective, plot_evaluations
matplotlib.use('GTKAgg')
import csv
from timeit import default_timer as timer
#from skopt.plots import plot_histogram, plot_objective_2D
from skopt.utils import use_named_args
from sklearn.metrics import roc_auc_score ## Computer Area Under the Curve
from datetime import datetime ## time the Optimization time
## Load Datset
train_samples = np.loadtxt("data/train_samples.txt", delimiter=' ', comments='# ', encoding=None)
train_labels = np.loadtxt("data/train_labels.txt", delimiter=' ', comments='# ', encoding=None)
valid_samples = np.loadtxt("data/valid_samples.txt", delimiter=' ', comments='# ', encoding=None)
valid_labels = np.loadtxt("data/valid_labels.txt", delimiter=' ', comments='# ', encoding=None)
## To set up this search space, I first need to define the search space dimension, what parameters are we gonna explore.
## for each of the parameters, we define a dimension explicitly
##
## The learning rate is any real number between 0.000001 and 0.1. But the seraching is done not in bounds.
## 'log-uniform' specifies how the trasformation(updates) of these values is
learning_rate_dim = Real(low=1e-6, high=1e-2, prior='log-uniform', name='learning_rate')
## The number of alyers on the other hand is explored in bounds, increments are done using integers
dense_layers_dim = Integer(low=1, high=5, name='dense_layers')
## We'll also different number of nodes in a layer
nodes_dim = Integer(low=5, high=512, name='nodes')
## Finally we have a Categorical dimension, this needs to be specified explicitly, because scikit-learn
## isn't gonna generate some randomly for you
activation_dim = Categorical(categories=['relu', 'sigmoid'], name='activation')
## Combine all the parameters into a list, so that we can pass it to a function
dimensions = [learning_rate_dim,
dense_layers_dim,
nodes_dim,
activation_dim]
## To kick off, it's helpful to start the serach using a set of hyperparameters that we
## intuitively know performes well
## These default parameters aren't horrible, but they don't perform great either
default_parameters = [1e-5, 1, 16, 'relu']
## To log the performance of the model
def log_dir_name(learning_rate, dense_layers, nodes, activation):
"""
Creates a directory named after the set of hyperparameters that was recently selected. A helper function
to log the results of training every constructed model.
"""
# the dir-name for the TensorBoard log-dir
s = "./2_logs/lr_{0:.0e}_layers{1}_nodes{2}_{3}/"
log_dir = s.format(learning_rate, dense_layers, nodes, activation)
return log_dir
## This funcion is copied from my previous solution on Grid SearchCV
def create_model(learning_rate, dense_layers, nodes, activation, dropout_rate=0.1):
"""
A helper function for the classifier to help construct a model after each run.
learing_rate: Learning-rate for the optimizer.
dense_layer: Number of dense layers for the sequentail model
nodes: Number of nodes in each inner dense layer.
activation: Activation function for all layers.
Additionally, we can improve on this function by adding a separate activation for
the output layer.
"""
model = Sequential()
global train_samples
## Input-shape must be a tuple without the batch size.
input_shape = (1,) + train_samples.shape
model.add(InputLayer(input_shape=(len(train_samples[0]),)))
## Needful only in case of convolutional layers.
# model.add(Reshape(img_shape_full))
for i in range(dense_layers):
## Name each layer, because Keras should give them unique names.
name = 'layer_dense_{0}'.format(i+1)
## Add these fully-connected layers to the model.
model.add(Dense(nodes, activation=activation, name=name))
model.add(Dropout(dropout_rate))
## Last output layer with softmax-activation.
## Used heavily for classification.
model.add(Dense(1, activation='sigmoid'))
optimizer = Adam(lr=learning_rate)
## Compile the model
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
return model
## Before we start training any model, let's first save the path where we'll store the best-performing model.
best_model_path = '19_best_model.keras'
## A global variable to keep track of the best obtained accuracy.
best_auc = 0.0
@use_named_args(dimensions=dimensions)
def fitness(learning_rate, dense_layers, nodes, activation):
"""
"""
# Print the selected hyperparameters.
print('learning rate: {0:.1f}'.format(learning_rate))
print('num_dense_layers:', dense_layers)
print('num_nodes:', nodes)
print('activation:', activation)
print("")
## Create the neural network with these hyperparameters.
model = create_model(learning_rate, dense_layers, nodes, activation)
## Create log files for the model.
## Not important for now!
# callback_log = TensorBoard(
# log_dir=log_dir,
# histogram_freq=0,
# batch_size=32,
# write_graph=True,
# write_grads=False,
# write_images=False)
## Use Keras to train the model.
history = model.fit(x=train_samples,
y=train_labels,
epochs=10,
batch_size=int(4010/4))
#callbacks=[callback_log])
## Get the classification accuracy on the validation set after the last training epoch.
# accuracy = history.history['val_acc'][-1]
predictions = model.predict(valid_samples)
auc = roc_auc_score(valid_labels, predictions)
## Print the calssification accuracy.
print('')
print("AUC = : {0:.2%}".format(auc))
print('')
## Save the model if it improves on the best-found performance.
## We use the global keyword so we update the variable outside of this function.
global best_auc
if auc > best_auc:
## Save the new model to harddisk.
model.save(best_model_path)
## Update the classification accuracy.
best_auc = auc
## Delete the Keras model with these heyper parameters from memory.
## Also clear the session.
del model
# tf.keras.clear_session()
clear_session()
return -auc
## Now we run our fitness function with the default hyperparameters that we set earlier.
## That's the reason for the @ annotation
fitness(x=default_parameters)
search_result = gp_minimize(func=fitness,
dimensions=dimensions,
acq_func='EI', # Expected Improvement.
n_calls=40,
x0=default_parameters)
## Report Result of the optimizer.
print("Best serach results:")
print(search_result.x)
print(search_result.space)
print("Lowest fitness value:")
print(search_result.fun)
zipped = sorted(zip(search_result.func_vals, search_result.x_iters))
print(zipped)
## Write sorted results to csv file for exporting
of = open('output_bayesian_optimization.csv', 'w')
header="Fit Value; Learning Rate; Dense Layers; Num. Neurons; Activation\n"
of.write(header)
for i in zipped:
row = "{0}; {1}; {2}; {3}; {4};\n".format(i[0], i[1][0], i[1][1], i[1][2], i[1][3])
of.write(row)
of.close()
## Plot results of optimizer
dim_names = ['learning_rate', 'dense_layers', 'nodes', 'activation']
plot_objective(search_result, dimensions=dim_names)
plot_evaluations(search_result)
|
normal
|
{
"blob_id": "db9068e54607e9df48328435ef07f15b4c25a6db",
"index": 7412,
"step-1": "<mask token>\n\n\ndef log_dir_name(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\tCreates a directory named after the set of hyperparameters that was recently selected. A helper function\n\tto log the results of training every constructed model.\n\t\"\"\"\n s = './2_logs/lr_{0:.0e}_layers{1}_nodes{2}_{3}/'\n log_dir = s.format(learning_rate, dense_layers, nodes, activation)\n return log_dir\n\n\ndef create_model(learning_rate, dense_layers, nodes, activation,\n dropout_rate=0.1):\n \"\"\"\n\tA helper function for the classifier to help construct a model after each run.\n\n\tlearing_rate:\tLearning-rate for the optimizer.\n\tdense_layer: \tNumber of dense layers for the sequentail model\n\tnodes:\t\t\tNumber of nodes in each inner dense layer.\n\tactivation:\t\tActivation function for all layers.\n\tAdditionally, we can improve on this function by adding a separate activation for\n\tthe output layer.\n\t\"\"\"\n model = Sequential()\n global train_samples\n input_shape = (1,) + train_samples.shape\n model.add(InputLayer(input_shape=(len(train_samples[0]),)))\n for i in range(dense_layers):\n name = 'layer_dense_{0}'.format(i + 1)\n model.add(Dense(nodes, activation=activation, name=name))\n model.add(Dropout(dropout_rate))\n model.add(Dense(1, activation='sigmoid'))\n optimizer = Adam(lr=learning_rate)\n model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=\n ['accuracy'])\n return model\n\n\n<mask token>\n\n\n@use_named_args(dimensions=dimensions)\ndef fitness(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\t\"\"\"\n print('learning rate: {0:.1f}'.format(learning_rate))\n print('num_dense_layers:', dense_layers)\n print('num_nodes:', nodes)\n print('activation:', activation)\n print('')\n model = create_model(learning_rate, dense_layers, nodes, activation)\n history = model.fit(x=train_samples, y=train_labels, epochs=10,\n batch_size=int(4010 / 4))\n predictions = model.predict(valid_samples)\n auc = roc_auc_score(valid_labels, predictions)\n print('')\n print('AUC = : {0:.2%}'.format(auc))\n print('')\n global best_auc\n if auc > best_auc:\n model.save(best_model_path)\n best_auc = auc\n del model\n clear_session()\n return -auc\n\n\n<mask token>\n",
"step-2": "<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\n\n\ndef log_dir_name(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\tCreates a directory named after the set of hyperparameters that was recently selected. A helper function\n\tto log the results of training every constructed model.\n\t\"\"\"\n s = './2_logs/lr_{0:.0e}_layers{1}_nodes{2}_{3}/'\n log_dir = s.format(learning_rate, dense_layers, nodes, activation)\n return log_dir\n\n\ndef create_model(learning_rate, dense_layers, nodes, activation,\n dropout_rate=0.1):\n \"\"\"\n\tA helper function for the classifier to help construct a model after each run.\n\n\tlearing_rate:\tLearning-rate for the optimizer.\n\tdense_layer: \tNumber of dense layers for the sequentail model\n\tnodes:\t\t\tNumber of nodes in each inner dense layer.\n\tactivation:\t\tActivation function for all layers.\n\tAdditionally, we can improve on this function by adding a separate activation for\n\tthe output layer.\n\t\"\"\"\n model = Sequential()\n global train_samples\n input_shape = (1,) + train_samples.shape\n model.add(InputLayer(input_shape=(len(train_samples[0]),)))\n for i in range(dense_layers):\n name = 'layer_dense_{0}'.format(i + 1)\n model.add(Dense(nodes, activation=activation, name=name))\n model.add(Dropout(dropout_rate))\n model.add(Dense(1, activation='sigmoid'))\n optimizer = Adam(lr=learning_rate)\n model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=\n ['accuracy'])\n return model\n\n\n<mask token>\n\n\n@use_named_args(dimensions=dimensions)\ndef fitness(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\t\"\"\"\n print('learning rate: {0:.1f}'.format(learning_rate))\n print('num_dense_layers:', dense_layers)\n print('num_nodes:', nodes)\n print('activation:', activation)\n print('')\n model = create_model(learning_rate, dense_layers, nodes, activation)\n history = model.fit(x=train_samples, y=train_labels, epochs=10,\n batch_size=int(4010 / 4))\n predictions = model.predict(valid_samples)\n auc = roc_auc_score(valid_labels, predictions)\n print('')\n print('AUC = : {0:.2%}'.format(auc))\n print('')\n global best_auc\n if auc > best_auc:\n model.save(best_model_path)\n best_auc = auc\n del model\n clear_session()\n return -auc\n\n\nfitness(x=default_parameters)\n<mask token>\nprint('Best serach results:')\nprint(search_result.x)\nprint(search_result.space)\nprint('Lowest fitness value:')\nprint(search_result.fun)\n<mask token>\nprint(zipped)\n<mask token>\nof.write(header)\nfor i in zipped:\n row = '{0}; {1}; {2}; {3}; {4};\\n'.format(i[0], i[1][0], i[1][1], i[1][\n 2], i[1][3])\n of.write(row)\nof.close()\n<mask token>\nplot_objective(search_result, dimensions=dim_names)\nplot_evaluations(search_result)\n",
"step-3": "<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\nmatplotlib.use('GTKAgg')\n<mask token>\ntrain_samples = np.loadtxt('data/train_samples.txt', delimiter=' ',\n comments='# ', encoding=None)\ntrain_labels = np.loadtxt('data/train_labels.txt', delimiter=' ', comments=\n '# ', encoding=None)\nvalid_samples = np.loadtxt('data/valid_samples.txt', delimiter=' ',\n comments='# ', encoding=None)\nvalid_labels = np.loadtxt('data/valid_labels.txt', delimiter=' ', comments=\n '# ', encoding=None)\nlearning_rate_dim = Real(low=1e-06, high=0.01, prior='log-uniform', name=\n 'learning_rate')\ndense_layers_dim = Integer(low=1, high=5, name='dense_layers')\nnodes_dim = Integer(low=5, high=512, name='nodes')\nactivation_dim = Categorical(categories=['relu', 'sigmoid'], name='activation')\ndimensions = [learning_rate_dim, dense_layers_dim, nodes_dim, activation_dim]\ndefault_parameters = [1e-05, 1, 16, 'relu']\n\n\ndef log_dir_name(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\tCreates a directory named after the set of hyperparameters that was recently selected. A helper function\n\tto log the results of training every constructed model.\n\t\"\"\"\n s = './2_logs/lr_{0:.0e}_layers{1}_nodes{2}_{3}/'\n log_dir = s.format(learning_rate, dense_layers, nodes, activation)\n return log_dir\n\n\ndef create_model(learning_rate, dense_layers, nodes, activation,\n dropout_rate=0.1):\n \"\"\"\n\tA helper function for the classifier to help construct a model after each run.\n\n\tlearing_rate:\tLearning-rate for the optimizer.\n\tdense_layer: \tNumber of dense layers for the sequentail model\n\tnodes:\t\t\tNumber of nodes in each inner dense layer.\n\tactivation:\t\tActivation function for all layers.\n\tAdditionally, we can improve on this function by adding a separate activation for\n\tthe output layer.\n\t\"\"\"\n model = Sequential()\n global train_samples\n input_shape = (1,) + train_samples.shape\n model.add(InputLayer(input_shape=(len(train_samples[0]),)))\n for i in range(dense_layers):\n name = 'layer_dense_{0}'.format(i + 1)\n model.add(Dense(nodes, activation=activation, name=name))\n model.add(Dropout(dropout_rate))\n model.add(Dense(1, activation='sigmoid'))\n optimizer = Adam(lr=learning_rate)\n model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=\n ['accuracy'])\n return model\n\n\nbest_model_path = '19_best_model.keras'\nbest_auc = 0.0\n\n\n@use_named_args(dimensions=dimensions)\ndef fitness(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\t\"\"\"\n print('learning rate: {0:.1f}'.format(learning_rate))\n print('num_dense_layers:', dense_layers)\n print('num_nodes:', nodes)\n print('activation:', activation)\n print('')\n model = create_model(learning_rate, dense_layers, nodes, activation)\n history = model.fit(x=train_samples, y=train_labels, epochs=10,\n batch_size=int(4010 / 4))\n predictions = model.predict(valid_samples)\n auc = roc_auc_score(valid_labels, predictions)\n print('')\n print('AUC = : {0:.2%}'.format(auc))\n print('')\n global best_auc\n if auc > best_auc:\n model.save(best_model_path)\n best_auc = auc\n del model\n clear_session()\n return -auc\n\n\nfitness(x=default_parameters)\nsearch_result = gp_minimize(func=fitness, dimensions=dimensions, acq_func=\n 'EI', n_calls=40, x0=default_parameters)\nprint('Best serach results:')\nprint(search_result.x)\nprint(search_result.space)\nprint('Lowest fitness value:')\nprint(search_result.fun)\nzipped = sorted(zip(search_result.func_vals, search_result.x_iters))\nprint(zipped)\nof = open('output_bayesian_optimization.csv', 'w')\nheader = 'Fit Value; Learning Rate; Dense Layers; Num. Neurons; Activation\\n'\nof.write(header)\nfor i in zipped:\n row = '{0}; {1}; {2}; {3}; {4};\\n'.format(i[0], i[1][0], i[1][1], i[1][\n 2], i[1][3])\n of.write(row)\nof.close()\ndim_names = ['learning_rate', 'dense_layers', 'nodes', 'activation']\nplot_objective(search_result, dimensions=dim_names)\nplot_evaluations(search_result)\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nimport math\nimport matplotlib\nmatplotlib.use('GTKAgg')\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential, load_model\nfrom keras.layers import InputLayer, Input, Dense, Dropout\nfrom keras.callbacks import TensorBoard\nfrom keras.optimizers import Adam\nfrom keras.backend import clear_session\nimport skopt\nfrom skopt import gp_minimize, forest_minimize\nfrom skopt.space import Real, Categorical, Integer\nmatplotlib.use('GTKAgg')\nfrom skopt.plots import plot_convergence\nmatplotlib.use('GTKAgg')\nfrom skopt.plots import plot_objective, plot_evaluations\nmatplotlib.use('GTKAgg')\nimport csv\nfrom timeit import default_timer as timer\nfrom skopt.utils import use_named_args\nfrom sklearn.metrics import roc_auc_score\nfrom datetime import datetime\ntrain_samples = np.loadtxt('data/train_samples.txt', delimiter=' ',\n comments='# ', encoding=None)\ntrain_labels = np.loadtxt('data/train_labels.txt', delimiter=' ', comments=\n '# ', encoding=None)\nvalid_samples = np.loadtxt('data/valid_samples.txt', delimiter=' ',\n comments='# ', encoding=None)\nvalid_labels = np.loadtxt('data/valid_labels.txt', delimiter=' ', comments=\n '# ', encoding=None)\nlearning_rate_dim = Real(low=1e-06, high=0.01, prior='log-uniform', name=\n 'learning_rate')\ndense_layers_dim = Integer(low=1, high=5, name='dense_layers')\nnodes_dim = Integer(low=5, high=512, name='nodes')\nactivation_dim = Categorical(categories=['relu', 'sigmoid'], name='activation')\ndimensions = [learning_rate_dim, dense_layers_dim, nodes_dim, activation_dim]\ndefault_parameters = [1e-05, 1, 16, 'relu']\n\n\ndef log_dir_name(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\tCreates a directory named after the set of hyperparameters that was recently selected. A helper function\n\tto log the results of training every constructed model.\n\t\"\"\"\n s = './2_logs/lr_{0:.0e}_layers{1}_nodes{2}_{3}/'\n log_dir = s.format(learning_rate, dense_layers, nodes, activation)\n return log_dir\n\n\ndef create_model(learning_rate, dense_layers, nodes, activation,\n dropout_rate=0.1):\n \"\"\"\n\tA helper function for the classifier to help construct a model after each run.\n\n\tlearing_rate:\tLearning-rate for the optimizer.\n\tdense_layer: \tNumber of dense layers for the sequentail model\n\tnodes:\t\t\tNumber of nodes in each inner dense layer.\n\tactivation:\t\tActivation function for all layers.\n\tAdditionally, we can improve on this function by adding a separate activation for\n\tthe output layer.\n\t\"\"\"\n model = Sequential()\n global train_samples\n input_shape = (1,) + train_samples.shape\n model.add(InputLayer(input_shape=(len(train_samples[0]),)))\n for i in range(dense_layers):\n name = 'layer_dense_{0}'.format(i + 1)\n model.add(Dense(nodes, activation=activation, name=name))\n model.add(Dropout(dropout_rate))\n model.add(Dense(1, activation='sigmoid'))\n optimizer = Adam(lr=learning_rate)\n model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=\n ['accuracy'])\n return model\n\n\nbest_model_path = '19_best_model.keras'\nbest_auc = 0.0\n\n\n@use_named_args(dimensions=dimensions)\ndef fitness(learning_rate, dense_layers, nodes, activation):\n \"\"\"\n\t\"\"\"\n print('learning rate: {0:.1f}'.format(learning_rate))\n print('num_dense_layers:', dense_layers)\n print('num_nodes:', nodes)\n print('activation:', activation)\n print('')\n model = create_model(learning_rate, dense_layers, nodes, activation)\n history = model.fit(x=train_samples, y=train_labels, epochs=10,\n batch_size=int(4010 / 4))\n predictions = model.predict(valid_samples)\n auc = roc_auc_score(valid_labels, predictions)\n print('')\n print('AUC = : {0:.2%}'.format(auc))\n print('')\n global best_auc\n if auc > best_auc:\n model.save(best_model_path)\n best_auc = auc\n del model\n clear_session()\n return -auc\n\n\nfitness(x=default_parameters)\nsearch_result = gp_minimize(func=fitness, dimensions=dimensions, acq_func=\n 'EI', n_calls=40, x0=default_parameters)\nprint('Best serach results:')\nprint(search_result.x)\nprint(search_result.space)\nprint('Lowest fitness value:')\nprint(search_result.fun)\nzipped = sorted(zip(search_result.func_vals, search_result.x_iters))\nprint(zipped)\nof = open('output_bayesian_optimization.csv', 'w')\nheader = 'Fit Value; Learning Rate; Dense Layers; Num. Neurons; Activation\\n'\nof.write(header)\nfor i in zipped:\n row = '{0}; {1}; {2}; {3}; {4};\\n'.format(i[0], i[1][0], i[1][1], i[1][\n 2], i[1][3])\n of.write(row)\nof.close()\ndim_names = ['learning_rate', 'dense_layers', 'nodes', 'activation']\nplot_objective(search_result, dimensions=dim_names)\nplot_evaluations(search_result)\n",
"step-5": "# %matplotlib inline\nimport tensorflow as tf\n#import tensorflow.keras as K\nimport numpy as np\nimport math\nimport matplotlib\nmatplotlib.use('GTKAgg')\nimport matplotlib.pyplot as plt\n\n# from keras import backend as K\nfrom keras.models import Sequential, load_model\n# from K.models import Sequential, load_model\nfrom keras.layers import InputLayer, Input, Dense, Dropout\nfrom keras.callbacks import TensorBoard\nfrom keras.optimizers import Adam\nfrom keras.backend import clear_session\n## pip install h5py scikit-optimize\n## once you have that installed, you can run the following code.\nimport skopt\nfrom skopt import gp_minimize, forest_minimize\nfrom skopt.space import Real, Categorical, Integer\nmatplotlib.use('GTKAgg')\nfrom skopt.plots import plot_convergence\nmatplotlib.use('GTKAgg')\nfrom skopt.plots import plot_objective, plot_evaluations\nmatplotlib.use('GTKAgg')\nimport csv\nfrom timeit import default_timer as timer\n\n#from skopt.plots import plot_histogram, plot_objective_2D\nfrom skopt.utils import use_named_args\nfrom sklearn.metrics import roc_auc_score ## Computer Area Under the Curve\nfrom datetime import datetime ## time the Optimization time\n\n## Load Datset\ntrain_samples = np.loadtxt(\"data/train_samples.txt\", delimiter=' ', comments='# ', encoding=None)\ntrain_labels = np.loadtxt(\"data/train_labels.txt\", delimiter=' ', comments='# ', encoding=None)\nvalid_samples = np.loadtxt(\"data/valid_samples.txt\", delimiter=' ', comments='# ', encoding=None)\nvalid_labels = np.loadtxt(\"data/valid_labels.txt\", delimiter=' ', comments='# ', encoding=None)\n\n## To set up this search space, I first need to define the search space dimension, what parameters are we gonna explore.\n## for each of the parameters, we define a dimension explicitly\n##\n## The learning rate is any real number between 0.000001 and 0.1. But the seraching is done not in bounds.\n## 'log-uniform' specifies how the trasformation(updates) of these values is \nlearning_rate_dim = Real(low=1e-6, high=1e-2, prior='log-uniform', name='learning_rate')\n## The number of alyers on the other hand is explored in bounds, increments are done using integers\ndense_layers_dim = Integer(low=1, high=5, name='dense_layers')\n## We'll also different number of nodes in a layer\nnodes_dim = Integer(low=5, high=512, name='nodes')\n## Finally we have a Categorical dimension, this needs to be specified explicitly, because scikit-learn\n## isn't gonna generate some randomly for you\nactivation_dim = Categorical(categories=['relu', 'sigmoid'], name='activation')\n## Combine all the parameters into a list, so that we can pass it to a function\ndimensions = [learning_rate_dim,\n\t\t\tdense_layers_dim,\n\t\t\tnodes_dim,\n\t\t\tactivation_dim]\n\n\n## To kick off, it's helpful to start the serach using a set of hyperparameters that we\n## intuitively know performes well\n## These default parameters aren't horrible, but they don't perform great either\ndefault_parameters = [1e-5, 1, 16, 'relu']\n\n\n## To log the performance of the model\ndef log_dir_name(learning_rate, dense_layers, nodes, activation):\n\t\"\"\"\n\tCreates a directory named after the set of hyperparameters that was recently selected. A helper function\n\tto log the results of training every constructed model.\n\t\"\"\"\t\n\t# the dir-name for the TensorBoard log-dir\n\ts = \"./2_logs/lr_{0:.0e}_layers{1}_nodes{2}_{3}/\"\n\tlog_dir = s.format(learning_rate, dense_layers, nodes, activation)\n\n\treturn log_dir\n\n\n## This funcion is copied from my previous solution on Grid SearchCV\ndef create_model(learning_rate, dense_layers, nodes, activation, dropout_rate=0.1):\n\t\"\"\"\n\tA helper function for the classifier to help construct a model after each run.\n\n\tlearing_rate:\tLearning-rate for the optimizer.\n\tdense_layer: \tNumber of dense layers for the sequentail model\n\tnodes:\t\t\tNumber of nodes in each inner dense layer.\n\tactivation:\t\tActivation function for all layers.\n\tAdditionally, we can improve on this function by adding a separate activation for\n\tthe output layer.\n\t\"\"\"\n\tmodel = Sequential()\n\tglobal train_samples\n\t## Input-shape must be a tuple without the batch size.\n\tinput_shape = (1,) + train_samples.shape\n\tmodel.add(InputLayer(input_shape=(len(train_samples[0]),)))\n\t## Needful only in case of convolutional layers.\n\t# model.add(Reshape(img_shape_full))\n\tfor i in range(dense_layers):\n\t\t## Name each layer, because Keras should give them unique names.\n\t\tname = 'layer_dense_{0}'.format(i+1)\n\t\t## Add these fully-connected layers to the model.\n\t\tmodel.add(Dense(nodes, activation=activation, name=name))\n\t\tmodel.add(Dropout(dropout_rate))\n\n\t## Last output layer with softmax-activation.\n\t## Used heavily for classification.\n\tmodel.add(Dense(1, activation='sigmoid'))\n\n\toptimizer = Adam(lr=learning_rate)\n\t## Compile the model\n\tmodel.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])\n\n\treturn model\n\n \n## Before we start training any model, let's first save the path where we'll store the best-performing model.\nbest_model_path = '19_best_model.keras'\n## A global variable to keep track of the best obtained accuracy.\nbest_auc = 0.0\n\n@use_named_args(dimensions=dimensions)\ndef fitness(learning_rate, dense_layers, nodes, activation):\n\t\"\"\"\n\t\"\"\"\n\t# Print the selected hyperparameters.\n\tprint('learning rate: {0:.1f}'.format(learning_rate))\n\tprint('num_dense_layers:', dense_layers)\n\tprint('num_nodes:', nodes)\n\tprint('activation:', activation)\n\tprint(\"\")\n\t## Create the neural network with these hyperparameters.\n\tmodel = create_model(learning_rate, dense_layers, nodes, activation)\n\t## Create log files for the model.\n\t## Not important for now!\n\t# callback_log = TensorBoard(\n\t# \tlog_dir=log_dir,\n\t# \thistogram_freq=0,\n\t# \tbatch_size=32,\n\t# \twrite_graph=True,\n\t# \twrite_grads=False,\n\t# \twrite_images=False)\n\t## Use Keras to train the model.\n\thistory = model.fit(x=train_samples,\n\t\ty=train_labels,\n\t\tepochs=10,\n\t\tbatch_size=int(4010/4))\n\t\t#callbacks=[callback_log])\n\t## Get the classification accuracy on the validation set after the last training epoch.\n\t# accuracy = history.history['val_acc'][-1]\n\tpredictions = model.predict(valid_samples)\n\tauc = roc_auc_score(valid_labels, predictions)\n\t## Print the calssification accuracy.\n\tprint('')\n\tprint(\"AUC = : {0:.2%}\".format(auc))\n\tprint('')\n\n\t## Save the model if it improves on the best-found performance.\n\t## We use the global keyword so we update the variable outside of this function.\n\tglobal best_auc\n\tif auc > best_auc:\n\t\t## Save the new model to harddisk.\n\t\tmodel.save(best_model_path)\n\t\t## Update the classification accuracy.\n\t\tbest_auc = auc\n\n\t## Delete the Keras model with these heyper parameters from memory.\n\n\t## Also clear the session.\n\tdel model\n# tf.keras.clear_session()\n\tclear_session()\n\n\treturn -auc\n\n## Now we run our fitness function with the default hyperparameters that we set earlier.\n## That's the reason for the @ annotation \nfitness(x=default_parameters)\n\nsearch_result = gp_minimize(func=fitness,\n\tdimensions=dimensions,\n\tacq_func='EI', # Expected Improvement.\n\tn_calls=40,\n\tx0=default_parameters)\n\n## Report Result of the optimizer.\nprint(\"Best serach results:\")\nprint(search_result.x)\nprint(search_result.space)\nprint(\"Lowest fitness value:\")\nprint(search_result.fun)\nzipped = sorted(zip(search_result.func_vals, search_result.x_iters))\nprint(zipped)\n\n## Write sorted results to csv file for exporting\nof = open('output_bayesian_optimization.csv', 'w')\nheader=\"Fit Value; Learning Rate; Dense Layers; Num. Neurons; Activation\\n\"\nof.write(header)\nfor i in zipped:\n row = \"{0}; {1}; {2}; {3}; {4};\\n\".format(i[0], i[1][0], i[1][1], i[1][2], i[1][3])\n of.write(row)\nof.close()\n\n## Plot results of optimizer\ndim_names = ['learning_rate', 'dense_layers', 'nodes', 'activation']\nplot_objective(search_result, dimensions=dim_names)\nplot_evaluations(search_result)\n\n\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
array_length = int(input())
source = [int(x) for x in input().split()]
def find_neighbors():
previous_zero_index = -1
count = 0
result = []
for index, value in enumerate(source):
count += 1
if value == 0:
if index == 0:
previous_zero_index = 0
count = 0
result.append(0)
continue
if previous_zero_index == -1:
result[0: index] = reversed(result[0:index])
previous_zero_index = index
count = 0
result.append(0)
continue
result.append(0)
diff = (index - previous_zero_index) // 2
result[index - diff: index] = reversed(result[previous_zero_index + 1: previous_zero_index + 1 + diff])
previous_zero_index = index
count = 0
continue
result.append(count)
for i in result:
print(i, end=" ")
find_neighbors()
|
normal
|
{
"blob_id": "6d362b87b595fc59df31d1f0bb561dc83633a2ac",
"index": 9216,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_neighbors():\n previous_zero_index = -1\n count = 0\n result = []\n for index, value in enumerate(source):\n count += 1\n if value == 0:\n if index == 0:\n previous_zero_index = 0\n count = 0\n result.append(0)\n continue\n if previous_zero_index == -1:\n result[0:index] = reversed(result[0:index])\n previous_zero_index = index\n count = 0\n result.append(0)\n continue\n result.append(0)\n diff = (index - previous_zero_index) // 2\n result[index - diff:index] = reversed(result[\n previous_zero_index + 1:previous_zero_index + 1 + diff])\n previous_zero_index = index\n count = 0\n continue\n result.append(count)\n for i in result:\n print(i, end=' ')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_neighbors():\n previous_zero_index = -1\n count = 0\n result = []\n for index, value in enumerate(source):\n count += 1\n if value == 0:\n if index == 0:\n previous_zero_index = 0\n count = 0\n result.append(0)\n continue\n if previous_zero_index == -1:\n result[0:index] = reversed(result[0:index])\n previous_zero_index = index\n count = 0\n result.append(0)\n continue\n result.append(0)\n diff = (index - previous_zero_index) // 2\n result[index - diff:index] = reversed(result[\n previous_zero_index + 1:previous_zero_index + 1 + diff])\n previous_zero_index = index\n count = 0\n continue\n result.append(count)\n for i in result:\n print(i, end=' ')\n\n\nfind_neighbors()\n",
"step-4": "array_length = int(input())\nsource = [int(x) for x in input().split()]\n\n\ndef find_neighbors():\n previous_zero_index = -1\n count = 0\n result = []\n for index, value in enumerate(source):\n count += 1\n if value == 0:\n if index == 0:\n previous_zero_index = 0\n count = 0\n result.append(0)\n continue\n if previous_zero_index == -1:\n result[0:index] = reversed(result[0:index])\n previous_zero_index = index\n count = 0\n result.append(0)\n continue\n result.append(0)\n diff = (index - previous_zero_index) // 2\n result[index - diff:index] = reversed(result[\n previous_zero_index + 1:previous_zero_index + 1 + diff])\n previous_zero_index = index\n count = 0\n continue\n result.append(count)\n for i in result:\n print(i, end=' ')\n\n\nfind_neighbors()\n",
"step-5": "array_length = int(input())\nsource = [int(x) for x in input().split()]\n\ndef find_neighbors():\n previous_zero_index = -1\n count = 0\n result = []\n for index, value in enumerate(source):\n count += 1\n\n if value == 0:\n if index == 0:\n previous_zero_index = 0\n count = 0\n result.append(0)\n continue\n\n if previous_zero_index == -1:\n result[0: index] = reversed(result[0:index])\n previous_zero_index = index\n count = 0\n result.append(0)\n continue\n\n result.append(0)\n diff = (index - previous_zero_index) // 2\n result[index - diff: index] = reversed(result[previous_zero_index + 1: previous_zero_index + 1 + diff])\n\n previous_zero_index = index\n count = 0\n continue\n\n result.append(count)\n for i in result:\n print(i, end=\" \")\n\nfind_neighbors()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.shortcuts import render
import datetime
from django.http import*
from django.core.files.storage import FileSystemStorage
import uuid
import os
import cv2
import numpy as np
from pathlib import Path
def index(request):
print(request.session);
today=datetime.datetime.now()
return render(request,'index.html',{
"today":today.strftime("%d-%m=%Y")})
def isFileOpen(request):
stack=request.session['stack']
if stack>0 and request.session.get('name')!=None and request.session.get('email')!=None:
return true
else:
return false
def getState(request):
if(isFileOpen):
fileName=request.session['stack'][0]
email=request.session['email']
name=request.session['name']
return JsonResponse({'state':'open','name':name,'email':email,'fileName':fileName})
else:
return JsonResponse({'state':none,'name':'',email:'','fileName':''})
def openFile(request):
if request.method=='POST' and request.FILES['fileName']:
imageFile=request.FILES['fileName']
fs=FileSystemStorage()
imageFileName=fs.save(imageFile.name,imageFile)
stack=[]
redostack=[]
imgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%imageFileName))
img=cv2.imread(imgpath)
(h, w) = img.shape[:2]
r = 500 / float(h)
dim = (int(w * r),500)
stdimg=cv2.resize(img,dim,interpolation=cv2.INTER_AREA)
stdimgPath=str(Path(imgpath).with_suffix(''))+str(uuid.uuid4())[-3:]+'.png'
print(stdimgPath)
cv2.imwrite(stdimgPath,stdimg)
stdFileName=stdimgPath.split('/')[-1];
stack.append(stdFileName)
request.session['stack']=stack
print(img.shape)
request.session['size']=()
request.session['redo']=True
request.session['oriImg']=imageFileName
request.session['borderSize']=0;
request.session['email']=request.POST['email']
request.session['name']=request.POST.get('name')
request.session['redostack']=redostack
return JsonResponse({'fileName':imageFileName})
def getImage(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
if len(stack)>0:
fileToServer=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]))
return FileResponse(open(fileToServer,'rb'))
return HttpResponse('')
def showOrignal(request):
if request.method=="GET" and request.session.has_key('oriImg'):
stack=request.session['stack']
for file in stack:
fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%file))
os.remove(fileDelete);
request.session.pop('stack')
stack=[]
stack.insert(0,request.session['oriImg'])
request.session['stack']=stack
return JsonResponse({'response':'orignal'})
else:
return HttpResponse('')
def closeFile(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
for file in stack:
fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%file))
os.remove(fileDelete);
request.session.pop('stack')
request.session.pop('email')
request.session.pop('name')
return JsonResponse({'response':'closed'})
else:
return HttpResponse('');
def undo(request):
if request.method=="GET" and request.session.has_key('stack') and len(request.session['stack'])>1:
stack=request.session['stack']
fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack.pop(0)))
os.remove(fileDelete);
request.session['stack']=stack;
return JsonResponse({"response":"undid"})
else:
return HttpResponse('')
def redo(request):
if request.method=="GET" and request.session.has_key('redostack') and len(request.session['redostack'])>0:
redoStack=request.session['redostack']
request.session['redo']=False;
value=redoStack.pop()
if(value=='grayscale'):
toGrayscale(request)
if(value=='cool'):
cool(request)
if(value=='scaleIt'):
scaleit(request)
if(value=='setBorder'):
setBorder(request);
request.session['redostack']=redoStack;
return JsonResponse({'response':'redo'})
def toGrayscale(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
redostack=request.session['redostack']
if len(stack)>0:
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
grayscalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......
grayImage=cv2.imread(fileAbsPath)
grayImage=cv2.cvtColor(grayImage,cv2.COLOR_BGR2GRAY)
cv2.imwrite(grayscalefilepath,grayImage)
gfilename=grayscalefilepath.split('/')[-1];
stack.insert(0,gfilename)
if request.session['redo']:
redostack.insert(0,'grayscale')
request.session['redo']=True
request.session['stack']=stack
request.session['redostack']=redostack
return JsonResponse({'response':'convertedToGrayscale'})
else:
return HttpResponse()
def scaleit(request):
if request.method=="POST" and request.session.has_key('stack'):
newX=int(request.POST['newX'])
newY=int(request.POST['newY'])
request.session['size']=(newX,newY)
stack=request.session['stack']
redostack=request.session['redostack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
scalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
newimg=cv2.resize(oriimg,(newX,newY),interpolation=cv2.INTER_AREA)
request.session['size']=newimg.shape;
cv2.imwrite(scalefilepath,newimg);
scalefilename=scalefilepath.split('/')[-1]
stack.insert(0,scalefilename)
redostack.insert(0,'scaleIt')
request.session['redostack']=redostack
request.session['stack']=stack;
return JsonResponse({'response':'scaled'})
if request.method=="GET" and request.session.has_key('size'):
newX=request.session['size'][0]
newY=request.session['size'][1]
stack=request.session['stack']
redostack=request.session['redostack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
scalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
newimg=cv2.resize(oriimg,(int(newX),int(newY)))
request.session['size']=newimg.shape;
cv2.imwrite(scalefilepath,newimg);
scalefilename=scalefilepath.split('/')[-1]
stack.insert(0,scalefilename)
redostack.insert(0,'scaleit')
request.session['redostack']=redostack
request.session['stack']=stack;
return JsonResponse({'response':'scaled'})
else:
return HttpResponse('')
def cropIt(request):
if request.method=="POST" and request.session.has_key('stack'):
x=int(request.POST['X']);
y=int(request.POST['Y']);
h=int(request.POST['h'])
w=int(request.POST['w'])
stack=request.session['stack']
redostack=request.session['redostack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
cropfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
crop_img = oriimg[y:h, x:w]
cv2.imwrite(cropfilepath,crop_img);
cropfilename=cropfilepath.split('/')[-1]
stack.insert(0,cropfilename)
request.session['redostack']=redostack;
request.session['stack']=stack;
return JsonResponse({'response':'croped'})
else:
return HttpResponse('')
def setBorder(request):
if request.method=="POST" and request.session.has_key('stack'):
bordersize=int(request.POST['size']);
stack=request.session['stack']
redostack=request.session['redostack']
request.session['borderSize']=bordersize
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
row,col=oriimg.shape[:2]
bottom=oriimg[row-2:row,0:col]
mean=cv2.mean(bottom)[0]
border=cv2.copyMakeBorder(oriimg, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType= cv2.BORDER_CONSTANT, value=[mean,mean,mean])
cv2.imwrite(borderfilepath,border);
borderfilename=borderfilepath.split('/')[-1]
stack.insert(0,borderfilename)
if request.session['redo']:
redostack.insert(0,'setBorder')
request.session['redo']=True
request.session['redostack']=redostack
request.session['stack']=stack;
return JsonResponse({'response':'croped'})
if request.method=="GET" and request.session.has_key('borderSize'):
bordersize=request.session['borderSize'];
stack=request.session['stack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
row,col=oriimg.shape[:2]
bottom=oriimg[row-2:row,0:col]
mean=cv2.mean(bottom)[0]
border=cv2.copyMakeBorder(oriimg, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType= cv2.BORDER_CONSTANT, value=[mean,mean,mean])
cv2.imwrite(borderfilepath,border);
borderfilename=borderfilepath.split('/')[-1]
stack.insert(0,borderfilename)
request.session['stack']=stack;
return JsonResponse({'response':'croped'})
else:
return HttpResponse('')
def cool(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
redostack=request.session['redostack']
if len(stack)>0:
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
grayscalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......
grayImage=cv2.imread(fileAbsPath)
grayImage=cv2.applyColorMap(grayImage,cv2.COLORMAP_PARULA)
cv2.imwrite(grayscalefilepath,grayImage)
gfilename=grayscalefilepath.split('/')[-1];
stack.insert(0,gfilename)
if request.session['redo']:
redostack.insert(0,'cool')
request.session['redo']=True
request.session['stack']=stack
request.session['redostack']=redostack
return JsonResponse({'response':'convertedToGrayscale'})
else:
return HttpResponse()
def addWatermark(request):
if request.method=="POST" and request.session.has_key('stack'):
text=request.POST['t']
print(text);
stack=request.session['stack']
redostack=request.session['redostack']
request.session['text']=text
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
textimgPath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
overlay=oriimg.copy()
output=oriimg.copy()
cv2.putText(overlay,text.format(0.5),(10,30),cv2. cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)
cv2.addWeighted(overlay,0.5,output,1-0.5,0,output)
cv2.imwrite(textimgPath,output);
textimgName=textimgPath.split('/')[-1]
stack.insert(0,textimgName)
if request.session['redo']:
redostack.insert(0,'addWatermark')
request.session['redo']=True
request.session['redostack']=redostack
request.session['stack']=stack;
return JsonResponse({'response':'croped'})
if request.method=="GET" and request.session.has_key('borderSize'):
bordersize=request.session['borderSize'];
stack=request.session['stack']
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...
oriimg=cv2.imread(fileAbsPath)
row,col=oriimg.shape[:2]
bottom=oriimg[row-2:row,0:col]
def rotateRight(request):
if request.method=="GET" and request.session.has_key('stack'):
stack=request.session['stack']
redostack=request.session['redostack']
if len(stack)>0:
fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));
rotatefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......
rotateImage=cv2.imread(fileAbsPath)
(h,w)=rotateImage.shape[:2]
center=(w/2,h/2)
angle90=90
scale=1.0
M=cv2.getRotationMatrix2D(center,angle90,scale)
rotated180=cv2.warpAffine(rotateImage,M,(h,w))
cv2.imwrite(rotatefilepath,rotated180)
gfilename=rotatefilepath.split('/')[-1];
stack.insert(0,gfilename)
if request.session['redo']:
redostack.insert(0,'rotateRight')
request.session['redo']=True
request.session['stack']=stack
request.session['redostack']=redostack
return JsonResponse({'response':'rotated'})
else:
return HttpResponse()
def overlay(request):
if request.method=="POST" and request.session.has_key('stack'):
stack=request.session['stack']
if len(stack)>0:
imageFile=request.FILES['fileName']
fs=FileSystemStorage()
imageFileName=fs.save(imageFile.name,imageFile)
imgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%imageFileName))
img=cv2.imread(imgpath)
oriimgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]))
oriimg=cv2.imread(oriimgpath)
h,w=oriimg.shape[:2]
print(h,w);
tsa='large_white_square.png';
transImgPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%tsa))
tsa=cv2.imread(transImgPath);
tsa=cv2.resize(tsa,(w,h))
h,w=tsa.shape[:2]
print(h,w)
x_offset=y_offset=50
tsa[y_offset:y_offset+img.shape[0], x_offset:x_offset+img.shape[1]] = img
h,w=tsa.shape[:2]
print(h,w)
dst=cv2.addWeighted(oriimg,0.7,tsa,0.3,0);
uui=str(uuid.uuid4())
print(uui)
print(uui[-3:])
overlayfilepath=str(Path(oriimgpath).with_suffix(''))+uui[-3:]+'.png' #here dirty coding......
cv2.imwrite(overlayfilepath,dst);
overlayfilename=overlayfilepath.split('/')[-1]
stack.insert(0,overlayfilename)
print(stack[0]);
if request.session['redo']:
#redostack.insert(0,'overlayed')
request.session['redo']=True
request.session['stack']=stack
#request.session['redostack']=redostack
return JsonResponse({'response':'rotated'})
else:
return HttpResponse()
|
normal
|
{
"blob_id": "3378ce72ae67d09258554048138b7f9023000922",
"index": 6619,
"step-1": "<mask token>\n\n\ndef index(request):\n print(request.session)\n today = datetime.datetime.now()\n return render(request, 'index.html', {'today': today.strftime('%d-%m=%Y')})\n\n\ndef isFileOpen(request):\n stack = request.session['stack']\n if stack > 0 and request.session.get('name'\n ) != None and request.session.get('email') != None:\n return true\n else:\n return false\n\n\n<mask token>\n\n\ndef openFile(request):\n if request.method == 'POST' and request.FILES['fileName']:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n stack = []\n redostack = []\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n h, w = img.shape[:2]\n r = 500 / float(h)\n dim = int(w * r), 500\n stdimg = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n stdimgPath = str(Path(imgpath).with_suffix('')) + str(uuid.uuid4())[-3:\n ] + '.png'\n print(stdimgPath)\n cv2.imwrite(stdimgPath, stdimg)\n stdFileName = stdimgPath.split('/')[-1]\n stack.append(stdFileName)\n request.session['stack'] = stack\n print(img.shape)\n request.session['size'] = ()\n request.session['redo'] = True\n request.session['oriImg'] = imageFileName\n request.session['borderSize'] = 0\n request.session['email'] = request.POST['email']\n request.session['name'] = request.POST.get('name')\n request.session['redostack'] = redostack\n return JsonResponse({'fileName': imageFileName})\n\n\ndef getImage(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n fileToServer = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n return FileResponse(open(fileToServer, 'rb'))\n return HttpResponse('')\n\n\n<mask token>\n\n\ndef closeFile(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n for file in stack:\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % file))\n os.remove(fileDelete)\n request.session.pop('stack')\n request.session.pop('email')\n request.session.pop('name')\n return JsonResponse({'response': 'closed'})\n else:\n return HttpResponse('')\n\n\ndef undo(request):\n if request.method == 'GET' and request.session.has_key('stack') and len(\n request.session['stack']) > 1:\n stack = request.session['stack']\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % stack.pop(0)))\n os.remove(fileDelete)\n request.session['stack'] = stack\n return JsonResponse({'response': 'undid'})\n else:\n return HttpResponse('')\n\n\ndef redo(request):\n if request.method == 'GET' and request.session.has_key('redostack'\n ) and len(request.session['redostack']) > 0:\n redoStack = request.session['redostack']\n request.session['redo'] = False\n value = redoStack.pop()\n if value == 'grayscale':\n toGrayscale(request)\n if value == 'cool':\n cool(request)\n if value == 'scaleIt':\n scaleit(request)\n if value == 'setBorder':\n setBorder(request)\n request.session['redostack'] = redoStack\n return JsonResponse({'response': 'redo'})\n\n\ndef toGrayscale(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n grayscalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid\n .uuid4()) + '.png'\n grayImage = cv2.imread(fileAbsPath)\n grayImage = cv2.cvtColor(grayImage, cv2.COLOR_BGR2GRAY)\n cv2.imwrite(grayscalefilepath, grayImage)\n gfilename = grayscalefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'grayscale')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\ndef scaleit(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n newX = int(request.POST['newX'])\n newY = int(request.POST['newY'])\n request.session['size'] = newX, newY\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (newX, newY), interpolation=cv2.INTER_AREA)\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleIt')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n if request.method == 'GET' and request.session.has_key('size'):\n newX = request.session['size'][0]\n newY = request.session['size'][1]\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (int(newX), int(newY)))\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleit')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n else:\n return HttpResponse('')\n\n\ndef cropIt(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n x = int(request.POST['X'])\n y = int(request.POST['Y'])\n h = int(request.POST['h'])\n w = int(request.POST['w'])\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n cropfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n crop_img = oriimg[y:h, x:w]\n cv2.imwrite(cropfilepath, crop_img)\n cropfilename = cropfilepath.split('/')[-1]\n stack.insert(0, cropfilename)\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n else:\n return HttpResponse('')\n\n\n<mask token>\n\n\ndef rotateRight(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n rotatefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n rotateImage = cv2.imread(fileAbsPath)\n h, w = rotateImage.shape[:2]\n center = w / 2, h / 2\n angle90 = 90\n scale = 1.0\n M = cv2.getRotationMatrix2D(center, angle90, scale)\n rotated180 = cv2.warpAffine(rotateImage, M, (h, w))\n cv2.imwrite(rotatefilepath, rotated180)\n gfilename = rotatefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'rotateRight')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n\n\ndef overlay(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n oriimgpath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n oriimg = cv2.imread(oriimgpath)\n h, w = oriimg.shape[:2]\n print(h, w)\n tsa = 'large_white_square.png'\n transImgPath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % tsa))\n tsa = cv2.imread(transImgPath)\n tsa = cv2.resize(tsa, (w, h))\n h, w = tsa.shape[:2]\n print(h, w)\n x_offset = y_offset = 50\n tsa[y_offset:y_offset + img.shape[0], x_offset:x_offset + img.\n shape[1]] = img\n h, w = tsa.shape[:2]\n print(h, w)\n dst = cv2.addWeighted(oriimg, 0.7, tsa, 0.3, 0)\n uui = str(uuid.uuid4())\n print(uui)\n print(uui[-3:])\n overlayfilepath = str(Path(oriimgpath).with_suffix('')) + uui[-3:\n ] + '.png'\n cv2.imwrite(overlayfilepath, dst)\n overlayfilename = overlayfilepath.split('/')[-1]\n stack.insert(0, overlayfilename)\n print(stack[0])\n if request.session['redo']:\n request.session['redo'] = True\n request.session['stack'] = stack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n",
"step-2": "<mask token>\n\n\ndef index(request):\n print(request.session)\n today = datetime.datetime.now()\n return render(request, 'index.html', {'today': today.strftime('%d-%m=%Y')})\n\n\ndef isFileOpen(request):\n stack = request.session['stack']\n if stack > 0 and request.session.get('name'\n ) != None and request.session.get('email') != None:\n return true\n else:\n return false\n\n\n<mask token>\n\n\ndef openFile(request):\n if request.method == 'POST' and request.FILES['fileName']:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n stack = []\n redostack = []\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n h, w = img.shape[:2]\n r = 500 / float(h)\n dim = int(w * r), 500\n stdimg = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n stdimgPath = str(Path(imgpath).with_suffix('')) + str(uuid.uuid4())[-3:\n ] + '.png'\n print(stdimgPath)\n cv2.imwrite(stdimgPath, stdimg)\n stdFileName = stdimgPath.split('/')[-1]\n stack.append(stdFileName)\n request.session['stack'] = stack\n print(img.shape)\n request.session['size'] = ()\n request.session['redo'] = True\n request.session['oriImg'] = imageFileName\n request.session['borderSize'] = 0\n request.session['email'] = request.POST['email']\n request.session['name'] = request.POST.get('name')\n request.session['redostack'] = redostack\n return JsonResponse({'fileName': imageFileName})\n\n\ndef getImage(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n fileToServer = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n return FileResponse(open(fileToServer, 'rb'))\n return HttpResponse('')\n\n\ndef showOrignal(request):\n if request.method == 'GET' and request.session.has_key('oriImg'):\n stack = request.session['stack']\n for file in stack:\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % file))\n os.remove(fileDelete)\n request.session.pop('stack')\n stack = []\n stack.insert(0, request.session['oriImg'])\n request.session['stack'] = stack\n return JsonResponse({'response': 'orignal'})\n else:\n return HttpResponse('')\n\n\ndef closeFile(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n for file in stack:\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % file))\n os.remove(fileDelete)\n request.session.pop('stack')\n request.session.pop('email')\n request.session.pop('name')\n return JsonResponse({'response': 'closed'})\n else:\n return HttpResponse('')\n\n\ndef undo(request):\n if request.method == 'GET' and request.session.has_key('stack') and len(\n request.session['stack']) > 1:\n stack = request.session['stack']\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % stack.pop(0)))\n os.remove(fileDelete)\n request.session['stack'] = stack\n return JsonResponse({'response': 'undid'})\n else:\n return HttpResponse('')\n\n\ndef redo(request):\n if request.method == 'GET' and request.session.has_key('redostack'\n ) and len(request.session['redostack']) > 0:\n redoStack = request.session['redostack']\n request.session['redo'] = False\n value = redoStack.pop()\n if value == 'grayscale':\n toGrayscale(request)\n if value == 'cool':\n cool(request)\n if value == 'scaleIt':\n scaleit(request)\n if value == 'setBorder':\n setBorder(request)\n request.session['redostack'] = redoStack\n return JsonResponse({'response': 'redo'})\n\n\ndef toGrayscale(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n grayscalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid\n .uuid4()) + '.png'\n grayImage = cv2.imread(fileAbsPath)\n grayImage = cv2.cvtColor(grayImage, cv2.COLOR_BGR2GRAY)\n cv2.imwrite(grayscalefilepath, grayImage)\n gfilename = grayscalefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'grayscale')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\ndef scaleit(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n newX = int(request.POST['newX'])\n newY = int(request.POST['newY'])\n request.session['size'] = newX, newY\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (newX, newY), interpolation=cv2.INTER_AREA)\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleIt')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n if request.method == 'GET' and request.session.has_key('size'):\n newX = request.session['size'][0]\n newY = request.session['size'][1]\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (int(newX), int(newY)))\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleit')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n else:\n return HttpResponse('')\n\n\ndef cropIt(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n x = int(request.POST['X'])\n y = int(request.POST['Y'])\n h = int(request.POST['h'])\n w = int(request.POST['w'])\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n cropfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n crop_img = oriimg[y:h, x:w]\n cv2.imwrite(cropfilepath, crop_img)\n cropfilename = cropfilepath.split('/')[-1]\n stack.insert(0, cropfilename)\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n else:\n return HttpResponse('')\n\n\n<mask token>\n\n\ndef cool(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n grayscalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid\n .uuid4()) + '.png'\n grayImage = cv2.imread(fileAbsPath)\n grayImage = cv2.applyColorMap(grayImage, cv2.COLORMAP_PARULA)\n cv2.imwrite(grayscalefilepath, grayImage)\n gfilename = grayscalefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'cool')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\n<mask token>\n\n\ndef rotateRight(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n rotatefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n rotateImage = cv2.imread(fileAbsPath)\n h, w = rotateImage.shape[:2]\n center = w / 2, h / 2\n angle90 = 90\n scale = 1.0\n M = cv2.getRotationMatrix2D(center, angle90, scale)\n rotated180 = cv2.warpAffine(rotateImage, M, (h, w))\n cv2.imwrite(rotatefilepath, rotated180)\n gfilename = rotatefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'rotateRight')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n\n\ndef overlay(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n oriimgpath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n oriimg = cv2.imread(oriimgpath)\n h, w = oriimg.shape[:2]\n print(h, w)\n tsa = 'large_white_square.png'\n transImgPath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % tsa))\n tsa = cv2.imread(transImgPath)\n tsa = cv2.resize(tsa, (w, h))\n h, w = tsa.shape[:2]\n print(h, w)\n x_offset = y_offset = 50\n tsa[y_offset:y_offset + img.shape[0], x_offset:x_offset + img.\n shape[1]] = img\n h, w = tsa.shape[:2]\n print(h, w)\n dst = cv2.addWeighted(oriimg, 0.7, tsa, 0.3, 0)\n uui = str(uuid.uuid4())\n print(uui)\n print(uui[-3:])\n overlayfilepath = str(Path(oriimgpath).with_suffix('')) + uui[-3:\n ] + '.png'\n cv2.imwrite(overlayfilepath, dst)\n overlayfilename = overlayfilepath.split('/')[-1]\n stack.insert(0, overlayfilename)\n print(stack[0])\n if request.session['redo']:\n request.session['redo'] = True\n request.session['stack'] = stack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n",
"step-3": "<mask token>\n\n\ndef index(request):\n print(request.session)\n today = datetime.datetime.now()\n return render(request, 'index.html', {'today': today.strftime('%d-%m=%Y')})\n\n\ndef isFileOpen(request):\n stack = request.session['stack']\n if stack > 0 and request.session.get('name'\n ) != None and request.session.get('email') != None:\n return true\n else:\n return false\n\n\n<mask token>\n\n\ndef openFile(request):\n if request.method == 'POST' and request.FILES['fileName']:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n stack = []\n redostack = []\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n h, w = img.shape[:2]\n r = 500 / float(h)\n dim = int(w * r), 500\n stdimg = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n stdimgPath = str(Path(imgpath).with_suffix('')) + str(uuid.uuid4())[-3:\n ] + '.png'\n print(stdimgPath)\n cv2.imwrite(stdimgPath, stdimg)\n stdFileName = stdimgPath.split('/')[-1]\n stack.append(stdFileName)\n request.session['stack'] = stack\n print(img.shape)\n request.session['size'] = ()\n request.session['redo'] = True\n request.session['oriImg'] = imageFileName\n request.session['borderSize'] = 0\n request.session['email'] = request.POST['email']\n request.session['name'] = request.POST.get('name')\n request.session['redostack'] = redostack\n return JsonResponse({'fileName': imageFileName})\n\n\ndef getImage(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n fileToServer = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n return FileResponse(open(fileToServer, 'rb'))\n return HttpResponse('')\n\n\ndef showOrignal(request):\n if request.method == 'GET' and request.session.has_key('oriImg'):\n stack = request.session['stack']\n for file in stack:\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % file))\n os.remove(fileDelete)\n request.session.pop('stack')\n stack = []\n stack.insert(0, request.session['oriImg'])\n request.session['stack'] = stack\n return JsonResponse({'response': 'orignal'})\n else:\n return HttpResponse('')\n\n\ndef closeFile(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n for file in stack:\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % file))\n os.remove(fileDelete)\n request.session.pop('stack')\n request.session.pop('email')\n request.session.pop('name')\n return JsonResponse({'response': 'closed'})\n else:\n return HttpResponse('')\n\n\ndef undo(request):\n if request.method == 'GET' and request.session.has_key('stack') and len(\n request.session['stack']) > 1:\n stack = request.session['stack']\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % stack.pop(0)))\n os.remove(fileDelete)\n request.session['stack'] = stack\n return JsonResponse({'response': 'undid'})\n else:\n return HttpResponse('')\n\n\ndef redo(request):\n if request.method == 'GET' and request.session.has_key('redostack'\n ) and len(request.session['redostack']) > 0:\n redoStack = request.session['redostack']\n request.session['redo'] = False\n value = redoStack.pop()\n if value == 'grayscale':\n toGrayscale(request)\n if value == 'cool':\n cool(request)\n if value == 'scaleIt':\n scaleit(request)\n if value == 'setBorder':\n setBorder(request)\n request.session['redostack'] = redoStack\n return JsonResponse({'response': 'redo'})\n\n\ndef toGrayscale(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n grayscalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid\n .uuid4()) + '.png'\n grayImage = cv2.imread(fileAbsPath)\n grayImage = cv2.cvtColor(grayImage, cv2.COLOR_BGR2GRAY)\n cv2.imwrite(grayscalefilepath, grayImage)\n gfilename = grayscalefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'grayscale')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\ndef scaleit(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n newX = int(request.POST['newX'])\n newY = int(request.POST['newY'])\n request.session['size'] = newX, newY\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (newX, newY), interpolation=cv2.INTER_AREA)\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleIt')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n if request.method == 'GET' and request.session.has_key('size'):\n newX = request.session['size'][0]\n newY = request.session['size'][1]\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (int(newX), int(newY)))\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleit')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n else:\n return HttpResponse('')\n\n\ndef cropIt(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n x = int(request.POST['X'])\n y = int(request.POST['Y'])\n h = int(request.POST['h'])\n w = int(request.POST['w'])\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n cropfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n crop_img = oriimg[y:h, x:w]\n cv2.imwrite(cropfilepath, crop_img)\n cropfilename = cropfilepath.split('/')[-1]\n stack.insert(0, cropfilename)\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n else:\n return HttpResponse('')\n\n\ndef setBorder(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n bordersize = int(request.POST['size'])\n stack = request.session['stack']\n redostack = request.session['redostack']\n request.session['borderSize'] = bordersize\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n borderfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n row, col = oriimg.shape[:2]\n bottom = oriimg[row - 2:row, 0:col]\n mean = cv2.mean(bottom)[0]\n border = cv2.copyMakeBorder(oriimg, top=bordersize, bottom=\n bordersize, left=bordersize, right=bordersize, borderType=cv2.\n BORDER_CONSTANT, value=[mean, mean, mean])\n cv2.imwrite(borderfilepath, border)\n borderfilename = borderfilepath.split('/')[-1]\n stack.insert(0, borderfilename)\n if request.session['redo']:\n redostack.insert(0, 'setBorder')\n request.session['redo'] = True\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n if request.method == 'GET' and request.session.has_key('borderSize'):\n bordersize = request.session['borderSize']\n stack = request.session['stack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n borderfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n row, col = oriimg.shape[:2]\n bottom = oriimg[row - 2:row, 0:col]\n mean = cv2.mean(bottom)[0]\n border = cv2.copyMakeBorder(oriimg, top=bordersize, bottom=\n bordersize, left=bordersize, right=bordersize, borderType=cv2.\n BORDER_CONSTANT, value=[mean, mean, mean])\n cv2.imwrite(borderfilepath, border)\n borderfilename = borderfilepath.split('/')[-1]\n stack.insert(0, borderfilename)\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n else:\n return HttpResponse('')\n\n\ndef cool(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n grayscalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid\n .uuid4()) + '.png'\n grayImage = cv2.imread(fileAbsPath)\n grayImage = cv2.applyColorMap(grayImage, cv2.COLORMAP_PARULA)\n cv2.imwrite(grayscalefilepath, grayImage)\n gfilename = grayscalefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'cool')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\n<mask token>\n\n\ndef rotateRight(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n rotatefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n rotateImage = cv2.imread(fileAbsPath)\n h, w = rotateImage.shape[:2]\n center = w / 2, h / 2\n angle90 = 90\n scale = 1.0\n M = cv2.getRotationMatrix2D(center, angle90, scale)\n rotated180 = cv2.warpAffine(rotateImage, M, (h, w))\n cv2.imwrite(rotatefilepath, rotated180)\n gfilename = rotatefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'rotateRight')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n\n\ndef overlay(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n oriimgpath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n oriimg = cv2.imread(oriimgpath)\n h, w = oriimg.shape[:2]\n print(h, w)\n tsa = 'large_white_square.png'\n transImgPath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % tsa))\n tsa = cv2.imread(transImgPath)\n tsa = cv2.resize(tsa, (w, h))\n h, w = tsa.shape[:2]\n print(h, w)\n x_offset = y_offset = 50\n tsa[y_offset:y_offset + img.shape[0], x_offset:x_offset + img.\n shape[1]] = img\n h, w = tsa.shape[:2]\n print(h, w)\n dst = cv2.addWeighted(oriimg, 0.7, tsa, 0.3, 0)\n uui = str(uuid.uuid4())\n print(uui)\n print(uui[-3:])\n overlayfilepath = str(Path(oriimgpath).with_suffix('')) + uui[-3:\n ] + '.png'\n cv2.imwrite(overlayfilepath, dst)\n overlayfilename = overlayfilepath.split('/')[-1]\n stack.insert(0, overlayfilename)\n print(stack[0])\n if request.session['redo']:\n request.session['redo'] = True\n request.session['stack'] = stack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n",
"step-4": "<mask token>\n\n\ndef index(request):\n print(request.session)\n today = datetime.datetime.now()\n return render(request, 'index.html', {'today': today.strftime('%d-%m=%Y')})\n\n\ndef isFileOpen(request):\n stack = request.session['stack']\n if stack > 0 and request.session.get('name'\n ) != None and request.session.get('email') != None:\n return true\n else:\n return false\n\n\ndef getState(request):\n if isFileOpen:\n fileName = request.session['stack'][0]\n email = request.session['email']\n name = request.session['name']\n return JsonResponse({'state': 'open', 'name': name, 'email': email,\n 'fileName': fileName})\n else:\n return JsonResponse({'state': none, 'name': '', email: '',\n 'fileName': ''})\n\n\ndef openFile(request):\n if request.method == 'POST' and request.FILES['fileName']:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n stack = []\n redostack = []\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n h, w = img.shape[:2]\n r = 500 / float(h)\n dim = int(w * r), 500\n stdimg = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n stdimgPath = str(Path(imgpath).with_suffix('')) + str(uuid.uuid4())[-3:\n ] + '.png'\n print(stdimgPath)\n cv2.imwrite(stdimgPath, stdimg)\n stdFileName = stdimgPath.split('/')[-1]\n stack.append(stdFileName)\n request.session['stack'] = stack\n print(img.shape)\n request.session['size'] = ()\n request.session['redo'] = True\n request.session['oriImg'] = imageFileName\n request.session['borderSize'] = 0\n request.session['email'] = request.POST['email']\n request.session['name'] = request.POST.get('name')\n request.session['redostack'] = redostack\n return JsonResponse({'fileName': imageFileName})\n\n\ndef getImage(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n fileToServer = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n return FileResponse(open(fileToServer, 'rb'))\n return HttpResponse('')\n\n\ndef showOrignal(request):\n if request.method == 'GET' and request.session.has_key('oriImg'):\n stack = request.session['stack']\n for file in stack:\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % file))\n os.remove(fileDelete)\n request.session.pop('stack')\n stack = []\n stack.insert(0, request.session['oriImg'])\n request.session['stack'] = stack\n return JsonResponse({'response': 'orignal'})\n else:\n return HttpResponse('')\n\n\ndef closeFile(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n for file in stack:\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % file))\n os.remove(fileDelete)\n request.session.pop('stack')\n request.session.pop('email')\n request.session.pop('name')\n return JsonResponse({'response': 'closed'})\n else:\n return HttpResponse('')\n\n\ndef undo(request):\n if request.method == 'GET' and request.session.has_key('stack') and len(\n request.session['stack']) > 1:\n stack = request.session['stack']\n fileDelete = os.path.abspath(os.path.join(os.path.dirname(__file__),\n '..', 'filestore/%s' % stack.pop(0)))\n os.remove(fileDelete)\n request.session['stack'] = stack\n return JsonResponse({'response': 'undid'})\n else:\n return HttpResponse('')\n\n\ndef redo(request):\n if request.method == 'GET' and request.session.has_key('redostack'\n ) and len(request.session['redostack']) > 0:\n redoStack = request.session['redostack']\n request.session['redo'] = False\n value = redoStack.pop()\n if value == 'grayscale':\n toGrayscale(request)\n if value == 'cool':\n cool(request)\n if value == 'scaleIt':\n scaleit(request)\n if value == 'setBorder':\n setBorder(request)\n request.session['redostack'] = redoStack\n return JsonResponse({'response': 'redo'})\n\n\ndef toGrayscale(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n grayscalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid\n .uuid4()) + '.png'\n grayImage = cv2.imread(fileAbsPath)\n grayImage = cv2.cvtColor(grayImage, cv2.COLOR_BGR2GRAY)\n cv2.imwrite(grayscalefilepath, grayImage)\n gfilename = grayscalefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'grayscale')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\ndef scaleit(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n newX = int(request.POST['newX'])\n newY = int(request.POST['newY'])\n request.session['size'] = newX, newY\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (newX, newY), interpolation=cv2.INTER_AREA)\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleIt')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n if request.method == 'GET' and request.session.has_key('size'):\n newX = request.session['size'][0]\n newY = request.session['size'][1]\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n scalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n newimg = cv2.resize(oriimg, (int(newX), int(newY)))\n request.session['size'] = newimg.shape\n cv2.imwrite(scalefilepath, newimg)\n scalefilename = scalefilepath.split('/')[-1]\n stack.insert(0, scalefilename)\n redostack.insert(0, 'scaleit')\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'scaled'})\n else:\n return HttpResponse('')\n\n\ndef cropIt(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n x = int(request.POST['X'])\n y = int(request.POST['Y'])\n h = int(request.POST['h'])\n w = int(request.POST['w'])\n stack = request.session['stack']\n redostack = request.session['redostack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n cropfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n crop_img = oriimg[y:h, x:w]\n cv2.imwrite(cropfilepath, crop_img)\n cropfilename = cropfilepath.split('/')[-1]\n stack.insert(0, cropfilename)\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n else:\n return HttpResponse('')\n\n\ndef setBorder(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n bordersize = int(request.POST['size'])\n stack = request.session['stack']\n redostack = request.session['redostack']\n request.session['borderSize'] = bordersize\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n borderfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n row, col = oriimg.shape[:2]\n bottom = oriimg[row - 2:row, 0:col]\n mean = cv2.mean(bottom)[0]\n border = cv2.copyMakeBorder(oriimg, top=bordersize, bottom=\n bordersize, left=bordersize, right=bordersize, borderType=cv2.\n BORDER_CONSTANT, value=[mean, mean, mean])\n cv2.imwrite(borderfilepath, border)\n borderfilename = borderfilepath.split('/')[-1]\n stack.insert(0, borderfilename)\n if request.session['redo']:\n redostack.insert(0, 'setBorder')\n request.session['redo'] = True\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n if request.method == 'GET' and request.session.has_key('borderSize'):\n bordersize = request.session['borderSize']\n stack = request.session['stack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n borderfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n row, col = oriimg.shape[:2]\n bottom = oriimg[row - 2:row, 0:col]\n mean = cv2.mean(bottom)[0]\n border = cv2.copyMakeBorder(oriimg, top=bordersize, bottom=\n bordersize, left=bordersize, right=bordersize, borderType=cv2.\n BORDER_CONSTANT, value=[mean, mean, mean])\n cv2.imwrite(borderfilepath, border)\n borderfilename = borderfilepath.split('/')[-1]\n stack.insert(0, borderfilename)\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n else:\n return HttpResponse('')\n\n\ndef cool(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n grayscalefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid\n .uuid4()) + '.png'\n grayImage = cv2.imread(fileAbsPath)\n grayImage = cv2.applyColorMap(grayImage, cv2.COLORMAP_PARULA)\n cv2.imwrite(grayscalefilepath, grayImage)\n gfilename = grayscalefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'cool')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\ndef addWatermark(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n text = request.POST['t']\n print(text)\n stack = request.session['stack']\n redostack = request.session['redostack']\n request.session['text'] = text\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n textimgPath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.uuid4()\n ) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n overlay = oriimg.copy()\n output = oriimg.copy()\n cv2.putText(overlay, text.format(0.5), (10, 30), cv2.cv2.\n FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)\n cv2.addWeighted(overlay, 0.5, output, 1 - 0.5, 0, output)\n cv2.imwrite(textimgPath, output)\n textimgName = textimgPath.split('/')[-1]\n stack.insert(0, textimgName)\n if request.session['redo']:\n redostack.insert(0, 'addWatermark')\n request.session['redo'] = True\n request.session['redostack'] = redostack\n request.session['stack'] = stack\n return JsonResponse({'response': 'croped'})\n if request.method == 'GET' and request.session.has_key('borderSize'):\n bordersize = request.session['borderSize']\n stack = request.session['stack']\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n borderfilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n oriimg = cv2.imread(fileAbsPath)\n row, col = oriimg.shape[:2]\n bottom = oriimg[row - 2:row, 0:col]\n\n\ndef rotateRight(request):\n if request.method == 'GET' and request.session.has_key('stack'):\n stack = request.session['stack']\n redostack = request.session['redostack']\n if len(stack) > 0:\n fileAbsPath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % stack[0]))\n rotatefilepath = str(Path(fileAbsPath).with_suffix('')) + str(uuid.\n uuid4()) + '.png'\n rotateImage = cv2.imread(fileAbsPath)\n h, w = rotateImage.shape[:2]\n center = w / 2, h / 2\n angle90 = 90\n scale = 1.0\n M = cv2.getRotationMatrix2D(center, angle90, scale)\n rotated180 = cv2.warpAffine(rotateImage, M, (h, w))\n cv2.imwrite(rotatefilepath, rotated180)\n gfilename = rotatefilepath.split('/')[-1]\n stack.insert(0, gfilename)\n if request.session['redo']:\n redostack.insert(0, 'rotateRight')\n request.session['redo'] = True\n request.session['stack'] = stack\n request.session['redostack'] = redostack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n\n\ndef overlay(request):\n if request.method == 'POST' and request.session.has_key('stack'):\n stack = request.session['stack']\n if len(stack) > 0:\n imageFile = request.FILES['fileName']\n fs = FileSystemStorage()\n imageFileName = fs.save(imageFile.name, imageFile)\n imgpath = os.path.abspath(os.path.join(os.path.dirname(__file__\n ), '..', 'filestore/%s' % imageFileName))\n img = cv2.imread(imgpath)\n oriimgpath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % stack[0]))\n oriimg = cv2.imread(oriimgpath)\n h, w = oriimg.shape[:2]\n print(h, w)\n tsa = 'large_white_square.png'\n transImgPath = os.path.abspath(os.path.join(os.path.dirname(\n __file__), '..', 'filestore/%s' % tsa))\n tsa = cv2.imread(transImgPath)\n tsa = cv2.resize(tsa, (w, h))\n h, w = tsa.shape[:2]\n print(h, w)\n x_offset = y_offset = 50\n tsa[y_offset:y_offset + img.shape[0], x_offset:x_offset + img.\n shape[1]] = img\n h, w = tsa.shape[:2]\n print(h, w)\n dst = cv2.addWeighted(oriimg, 0.7, tsa, 0.3, 0)\n uui = str(uuid.uuid4())\n print(uui)\n print(uui[-3:])\n overlayfilepath = str(Path(oriimgpath).with_suffix('')) + uui[-3:\n ] + '.png'\n cv2.imwrite(overlayfilepath, dst)\n overlayfilename = overlayfilepath.split('/')[-1]\n stack.insert(0, overlayfilename)\n print(stack[0])\n if request.session['redo']:\n request.session['redo'] = True\n request.session['stack'] = stack\n return JsonResponse({'response': 'rotated'})\n else:\n return HttpResponse()\n",
"step-5": "from django.shortcuts import render\nimport datetime\nfrom django.http import*\nfrom django.core.files.storage import FileSystemStorage\nimport uuid \nimport os\nimport cv2\nimport numpy as np\nfrom pathlib import Path\n\ndef index(request):\n print(request.session);\n today=datetime.datetime.now()\n return render(request,'index.html',{\n \"today\":today.strftime(\"%d-%m=%Y\")})\n\ndef isFileOpen(request):\n stack=request.session['stack']\n if stack>0 and request.session.get('name')!=None and request.session.get('email')!=None:\n return true\n \n else:\n return false\n \n\t\n\ndef getState(request):\n if(isFileOpen):\n fileName=request.session['stack'][0]\n email=request.session['email']\n name=request.session['name']\n return JsonResponse({'state':'open','name':name,'email':email,'fileName':fileName})\n \n else:\n return JsonResponse({'state':none,'name':'',email:'','fileName':''})\t\n \n \n\ndef openFile(request):\n if request.method=='POST' and request.FILES['fileName']:\n imageFile=request.FILES['fileName']\n fs=FileSystemStorage()\n imageFileName=fs.save(imageFile.name,imageFile)\n stack=[]\n redostack=[]\n \n imgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%imageFileName))\n img=cv2.imread(imgpath)\n (h, w) = img.shape[:2]\n r = 500 / float(h)\n dim = (int(w * r),500)\n \n stdimg=cv2.resize(img,dim,interpolation=cv2.INTER_AREA)\n stdimgPath=str(Path(imgpath).with_suffix(''))+str(uuid.uuid4())[-3:]+'.png' \n print(stdimgPath)\n cv2.imwrite(stdimgPath,stdimg)\n stdFileName=stdimgPath.split('/')[-1];\n\n stack.append(stdFileName)\n request.session['stack']=stack\n print(img.shape)\n request.session['size']=()\n request.session['redo']=True\n request.session['oriImg']=imageFileName\n request.session['borderSize']=0;\n request.session['email']=request.POST['email']\n request.session['name']=request.POST.get('name')\n request.session['redostack']=redostack\n \t\n return JsonResponse({'fileName':imageFileName})\n\ndef getImage(request):\n if request.method==\"GET\" and request.session.has_key('stack'):\n stack=request.session['stack']\n if len(stack)>0:\n fileToServer=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]))\n \n return FileResponse(open(fileToServer,'rb'))\n return HttpResponse('')\n\n\ndef showOrignal(request):\n if request.method==\"GET\" and request.session.has_key('oriImg'):\n stack=request.session['stack']\n for file in stack:\n fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%file))\n os.remove(fileDelete);\n request.session.pop('stack')\n stack=[]\n stack.insert(0,request.session['oriImg'])\n request.session['stack']=stack\n return JsonResponse({'response':'orignal'})\n else:\n return HttpResponse('')\n \n \n\n\ndef closeFile(request):\n if request.method==\"GET\" and request.session.has_key('stack'):\n stack=request.session['stack']\n for file in stack:\n fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%file))\n os.remove(fileDelete);\n request.session.pop('stack')\n request.session.pop('email')\n request.session.pop('name')\n return JsonResponse({'response':'closed'})\n else:\n return HttpResponse('');\n\ndef undo(request):\n if request.method==\"GET\" and request.session.has_key('stack') and len(request.session['stack'])>1:\n stack=request.session['stack']\n fileDelete=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack.pop(0)))\n os.remove(fileDelete);\n request.session['stack']=stack;\n return JsonResponse({\"response\":\"undid\"})\n else:\n return HttpResponse('')\n\ndef redo(request):\n if request.method==\"GET\" and request.session.has_key('redostack') and len(request.session['redostack'])>0:\n redoStack=request.session['redostack']\n request.session['redo']=False;\n value=redoStack.pop()\n if(value=='grayscale'):\n toGrayscale(request)\n if(value=='cool'):\n cool(request)\n if(value=='scaleIt'):\n scaleit(request)\n if(value=='setBorder'):\n setBorder(request); \n request.session['redostack']=redoStack;\n return JsonResponse({'response':'redo'})\n\n\ndef toGrayscale(request):\n if request.method==\"GET\" and request.session.has_key('stack'):\n stack=request.session['stack']\n redostack=request.session['redostack']\n if len(stack)>0:\n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n grayscalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......\n grayImage=cv2.imread(fileAbsPath)\n grayImage=cv2.cvtColor(grayImage,cv2.COLOR_BGR2GRAY)\n cv2.imwrite(grayscalefilepath,grayImage)\n gfilename=grayscalefilepath.split('/')[-1];\n stack.insert(0,gfilename)\n if request.session['redo']:\n redostack.insert(0,'grayscale')\n request.session['redo']=True\n request.session['stack']=stack\n request.session['redostack']=redostack\n return JsonResponse({'response':'convertedToGrayscale'}) \n else:\n return HttpResponse()\n\ndef scaleit(request):\n if request.method==\"POST\" and request.session.has_key('stack'):\n newX=int(request.POST['newX'])\n newY=int(request.POST['newY'])\n \n request.session['size']=(newX,newY)\n stack=request.session['stack']\n redostack=request.session['redostack']\n \n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n scalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...\n \n oriimg=cv2.imread(fileAbsPath)\n newimg=cv2.resize(oriimg,(newX,newY),interpolation=cv2.INTER_AREA)\n request.session['size']=newimg.shape;\n cv2.imwrite(scalefilepath,newimg);\n \n scalefilename=scalefilepath.split('/')[-1]\n stack.insert(0,scalefilename)\n redostack.insert(0,'scaleIt')\n request.session['redostack']=redostack\n request.session['stack']=stack;\n return JsonResponse({'response':'scaled'})\n if request.method==\"GET\" and request.session.has_key('size'):\n newX=request.session['size'][0]\n newY=request.session['size'][1]\n \n \n stack=request.session['stack']\n redostack=request.session['redostack']\n \n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n scalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...\n \n oriimg=cv2.imread(fileAbsPath)\n newimg=cv2.resize(oriimg,(int(newX),int(newY)))\n request.session['size']=newimg.shape;\n cv2.imwrite(scalefilepath,newimg);\n \n scalefilename=scalefilepath.split('/')[-1]\n stack.insert(0,scalefilename)\n redostack.insert(0,'scaleit')\n request.session['redostack']=redostack\n request.session['stack']=stack;\n return JsonResponse({'response':'scaled'})\n else:\n return HttpResponse('')\n \n\ndef cropIt(request):\n if request.method==\"POST\" and request.session.has_key('stack'):\n x=int(request.POST['X']);\n y=int(request.POST['Y']);\n h=int(request.POST['h'])\n w=int(request.POST['w'])\n stack=request.session['stack']\n redostack=request.session['redostack']\n \n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n cropfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...\n\n oriimg=cv2.imread(fileAbsPath)\n\n \n crop_img = oriimg[y:h, x:w]\n cv2.imwrite(cropfilepath,crop_img);\n cropfilename=cropfilepath.split('/')[-1]\n stack.insert(0,cropfilename)\n \n request.session['redostack']=redostack;\n request.session['stack']=stack;\n\n return JsonResponse({'response':'croped'})\n else:\n return HttpResponse('') \n \ndef setBorder(request):\n if request.method==\"POST\" and request.session.has_key('stack'):\n bordersize=int(request.POST['size']);\n stack=request.session['stack']\n redostack=request.session['redostack']\n request.session['borderSize']=bordersize\n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...\n\n oriimg=cv2.imread(fileAbsPath)\n\n row,col=oriimg.shape[:2]\n bottom=oriimg[row-2:row,0:col]\n mean=cv2.mean(bottom)[0]\n border=cv2.copyMakeBorder(oriimg, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType= cv2.BORDER_CONSTANT, value=[mean,mean,mean]) \n \n cv2.imwrite(borderfilepath,border);\n borderfilename=borderfilepath.split('/')[-1]\n stack.insert(0,borderfilename)\n if request.session['redo']:\n redostack.insert(0,'setBorder')\n request.session['redo']=True\n request.session['redostack']=redostack\n request.session['stack']=stack;\n return JsonResponse({'response':'croped'})\n if request.method==\"GET\" and request.session.has_key('borderSize'):\n bordersize=request.session['borderSize'];\n stack=request.session['stack']\n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...\n oriimg=cv2.imread(fileAbsPath)\n row,col=oriimg.shape[:2]\n bottom=oriimg[row-2:row,0:col]\n mean=cv2.mean(bottom)[0]\n border=cv2.copyMakeBorder(oriimg, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType= cv2.BORDER_CONSTANT, value=[mean,mean,mean])\n cv2.imwrite(borderfilepath,border);\n borderfilename=borderfilepath.split('/')[-1]\n stack.insert(0,borderfilename)\n request.session['stack']=stack;\n return JsonResponse({'response':'croped'})\n\n else:\n return HttpResponse('')\n\n\ndef cool(request):\n if request.method==\"GET\" and request.session.has_key('stack'):\n stack=request.session['stack']\n redostack=request.session['redostack']\n if len(stack)>0:\n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n grayscalefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......\n grayImage=cv2.imread(fileAbsPath)\n grayImage=cv2.applyColorMap(grayImage,cv2.COLORMAP_PARULA)\n cv2.imwrite(grayscalefilepath,grayImage)\n gfilename=grayscalefilepath.split('/')[-1];\n stack.insert(0,gfilename)\n if request.session['redo']:\n redostack.insert(0,'cool')\n request.session['redo']=True\n request.session['stack']=stack\n request.session['redostack']=redostack\n return JsonResponse({'response':'convertedToGrayscale'})\n else:\n return HttpResponse()\n\n\n\n\n\n\n\ndef addWatermark(request):\n if request.method==\"POST\" and request.session.has_key('stack'):\n text=request.POST['t']\n print(text);\n stack=request.session['stack']\n redostack=request.session['redostack']\n request.session['text']=text\n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n textimgPath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...\n\n oriimg=cv2.imread(fileAbsPath)\n\n overlay=oriimg.copy()\n output=oriimg.copy()\n cv2.putText(overlay,text.format(0.5),(10,30),cv2. cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 3)\n \n\n cv2.addWeighted(overlay,0.5,output,1-0.5,0,output)\n \n cv2.imwrite(textimgPath,output);\n textimgName=textimgPath.split('/')[-1]\n stack.insert(0,textimgName)\n if request.session['redo']:\n redostack.insert(0,'addWatermark')\n request.session['redo']=True\n request.session['redostack']=redostack\n request.session['stack']=stack;\n return JsonResponse({'response':'croped'})\n if request.method==\"GET\" and request.session.has_key('borderSize'):\n bordersize=request.session['borderSize'];\n stack=request.session['stack']\n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n borderfilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding...\n oriimg=cv2.imread(fileAbsPath)\n row,col=oriimg.shape[:2]\n bottom=oriimg[row-2:row,0:col]\n\ndef rotateRight(request):\n if request.method==\"GET\" and request.session.has_key('stack'):\n stack=request.session['stack']\n redostack=request.session['redostack']\n if len(stack)>0:\n fileAbsPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]));\n rotatefilepath=str(Path(fileAbsPath).with_suffix(''))+str(uuid.uuid4())+'.png' #here dirty coding......\n rotateImage=cv2.imread(fileAbsPath)\n (h,w)=rotateImage.shape[:2]\n center=(w/2,h/2)\n angle90=90\n scale=1.0\n M=cv2.getRotationMatrix2D(center,angle90,scale)\n rotated180=cv2.warpAffine(rotateImage,M,(h,w))\n\n cv2.imwrite(rotatefilepath,rotated180)\n gfilename=rotatefilepath.split('/')[-1];\n stack.insert(0,gfilename)\n if request.session['redo']:\n redostack.insert(0,'rotateRight')\n request.session['redo']=True\n request.session['stack']=stack\n request.session['redostack']=redostack\n return JsonResponse({'response':'rotated'})\n else:\n return HttpResponse()\n\ndef overlay(request):\n if request.method==\"POST\" and request.session.has_key('stack'):\n stack=request.session['stack']\n if len(stack)>0:\n imageFile=request.FILES['fileName']\n fs=FileSystemStorage()\n imageFileName=fs.save(imageFile.name,imageFile)\n imgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%imageFileName))\n img=cv2.imread(imgpath)\n oriimgpath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%stack[0]))\n oriimg=cv2.imread(oriimgpath)\n h,w=oriimg.shape[:2]\n print(h,w);\n\n tsa='large_white_square.png'; \n transImgPath=os.path.abspath(os.path.join(os.path.dirname(__file__),'..','filestore/%s'%tsa))\n tsa=cv2.imread(transImgPath);\n tsa=cv2.resize(tsa,(w,h))\n h,w=tsa.shape[:2]\n print(h,w)\n x_offset=y_offset=50\n tsa[y_offset:y_offset+img.shape[0], x_offset:x_offset+img.shape[1]] = img\n h,w=tsa.shape[:2]\n print(h,w)\n\n \n dst=cv2.addWeighted(oriimg,0.7,tsa,0.3,0);\n uui=str(uuid.uuid4())\n print(uui)\n print(uui[-3:])\n overlayfilepath=str(Path(oriimgpath).with_suffix(''))+uui[-3:]+'.png' #here dirty coding......\n cv2.imwrite(overlayfilepath,dst);\n overlayfilename=overlayfilepath.split('/')[-1]\n stack.insert(0,overlayfilename) \n print(stack[0]);\n if request.session['redo']:\n #redostack.insert(0,'overlayed')\n request.session['redo']=True\n request.session['stack']=stack\n #request.session['redostack']=redostack\n return JsonResponse({'response':'rotated'})\n else:\n return HttpResponse()\n\n \n\n",
"step-ids": [
12,
14,
15,
17,
19
]
}
|
[
12,
14,
15,
17,
19
] |
from setuptools import setup, find_packages
setup(name='qn',
version='0.2.2',
description='Handy functions I use everyday.',
url='https://github.com/frlender/qn',
author='Qiaonan Duan',
author_email='geonann@gmail.com',
license='MIT',
packages=find_packages(),
# install_requires=[
# 'matplotlib',
# 'seaborn',
# 'numpy',
# 'scipy',
# 'pandas',
# 'PyYAML',
# 'matplotlib-venn',
# 'scikit-learn'
# ],
zip_safe=False)
|
normal
|
{
"blob_id": "3b307ae7f8b8b25c93eb2dc54b2603b1291b6232",
"index": 1789,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='qn', version='0.2.2', description=\n 'Handy functions I use everyday.', url='https://github.com/frlender/qn',\n author='Qiaonan Duan', author_email='geonann@gmail.com', license='MIT',\n packages=find_packages(), zip_safe=False)\n",
"step-3": "from setuptools import setup, find_packages\nsetup(name='qn', version='0.2.2', description=\n 'Handy functions I use everyday.', url='https://github.com/frlender/qn',\n author='Qiaonan Duan', author_email='geonann@gmail.com', license='MIT',\n packages=find_packages(), zip_safe=False)\n",
"step-4": "from setuptools import setup, find_packages\n\nsetup(name='qn',\n version='0.2.2',\n description='Handy functions I use everyday.',\n url='https://github.com/frlender/qn',\n author='Qiaonan Duan',\n author_email='geonann@gmail.com',\n license='MIT',\n packages=find_packages(),\n # install_requires=[\n # 'matplotlib',\n # 'seaborn',\n # 'numpy',\n # 'scipy',\n # 'pandas',\n # 'PyYAML',\n # 'matplotlib-venn',\n # 'scikit-learn'\n # ],\n zip_safe=False)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-15 18:46
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('aposta', '0003_aposta_nome'),
]
operations = [
migrations.CreateModel(
name='Aposta2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('aposta_identificacao', models.CharField(max_length=200)),
('valor', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Concurso2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('concurso_edicao', models.CharField(max_length=20)),
('pub_data', models.DateTimeField(verbose_name='data de publicacao')),
],
),
migrations.AlterField(
model_name='aposta',
name='dataAposta',
field=models.DateField(),
),
migrations.AddField(
model_name='aposta2',
name='Concurso2_identificao',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='aposta.Concurso2'),
),
]
|
normal
|
{
"blob_id": "a917dd6171a78142fefa8c8bfad0110729fc1bb0",
"index": 3190,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('aposta', '0003_aposta_nome')]\n operations = [migrations.CreateModel(name='Aposta2', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('aposta_identificacao', models.\n CharField(max_length=200)), ('valor', models.IntegerField(default=0\n ))]), migrations.CreateModel(name='Concurso2', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('concurso_edicao', models.CharField(\n max_length=20)), ('pub_data', models.DateTimeField(verbose_name=\n 'data de publicacao'))]), migrations.AlterField(model_name='aposta',\n name='dataAposta', field=models.DateField()), migrations.AddField(\n model_name='aposta2', name='Concurso2_identificao', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'aposta.Concurso2'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('aposta', '0003_aposta_nome')]\n operations = [migrations.CreateModel(name='Aposta2', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('aposta_identificacao', models.\n CharField(max_length=200)), ('valor', models.IntegerField(default=0\n ))]), migrations.CreateModel(name='Concurso2', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('concurso_edicao', models.CharField(\n max_length=20)), ('pub_data', models.DateTimeField(verbose_name=\n 'data de publicacao'))]), migrations.AlterField(model_name='aposta',\n name='dataAposta', field=models.DateField()), migrations.AddField(\n model_name='aposta2', name='Concurso2_identificao', field=models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'aposta.Concurso2'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-04-15 18:46\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('aposta', '0003_aposta_nome'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Aposta2',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('aposta_identificacao', models.CharField(max_length=200)),\n ('valor', models.IntegerField(default=0)),\n ],\n ),\n migrations.CreateModel(\n name='Concurso2',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('concurso_edicao', models.CharField(max_length=20)),\n ('pub_data', models.DateTimeField(verbose_name='data de publicacao')),\n ],\n ),\n migrations.AlterField(\n model_name='aposta',\n name='dataAposta',\n field=models.DateField(),\n ),\n migrations.AddField(\n model_name='aposta2',\n name='Concurso2_identificao',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='aposta.Concurso2'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
Factory for creating and running ssimulations against optimization tools
Author:
Matthew Barber <mfmbarber@gmail.com>
'''
from .strategy_annealer import StrategyAnnealer
from .strategy_deap import StrategyDeap
class CalulateStrategyWith:
@staticmethod
def Annealing(car, include_initial_tyre=False, iterations=100000):
'''
Use simulated annealing to determine the best strategy
Args:
car (Car): An initial car to test with
include_initial_tyre (bool): Include the initial tyre in moves
iterations (int): Iteration limit
Returns:
Car
'''
sim = StrategyAnnealer(car)
sim.setIncludeInitialTyreInMove(include_initial_tyre)
sim.steps = iterations
state, e = sim.anneal()
return state
@staticmethod
def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):
'''
Use genetic evolution to determine the best strategy
Args:
car (Car): An initial car to test with
include_initial_tyre (bool): Include the initial tyre in moves
generations (int): Evolution generation limit
Returns:
Car
'''
return StrategyDeap(car, include_initial_tyre, generations).run()
|
normal
|
{
"blob_id": "1cab38721e6b96a9877bd67cbddaa4d6b4e53d1b",
"index": 8175,
"step-1": "<mask token>\n\n\nclass CalulateStrategyWith:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CalulateStrategyWith:\n <mask token>\n\n @staticmethod\n def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):\n \"\"\"\n Use genetic evolution to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n generations (int): Evolution generation limit\n\n Returns:\n Car\n \"\"\"\n return StrategyDeap(car, include_initial_tyre, generations).run()\n",
"step-3": "<mask token>\n\n\nclass CalulateStrategyWith:\n\n @staticmethod\n def Annealing(car, include_initial_tyre=False, iterations=100000):\n \"\"\"\n Use simulated annealing to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n iterations (int): Iteration limit\n\n Returns:\n Car\n \"\"\"\n sim = StrategyAnnealer(car)\n sim.setIncludeInitialTyreInMove(include_initial_tyre)\n sim.steps = iterations\n state, e = sim.anneal()\n return state\n\n @staticmethod\n def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):\n \"\"\"\n Use genetic evolution to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n generations (int): Evolution generation limit\n\n Returns:\n Car\n \"\"\"\n return StrategyDeap(car, include_initial_tyre, generations).run()\n",
"step-4": "<mask token>\nfrom .strategy_annealer import StrategyAnnealer\nfrom .strategy_deap import StrategyDeap\n\n\nclass CalulateStrategyWith:\n\n @staticmethod\n def Annealing(car, include_initial_tyre=False, iterations=100000):\n \"\"\"\n Use simulated annealing to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n iterations (int): Iteration limit\n\n Returns:\n Car\n \"\"\"\n sim = StrategyAnnealer(car)\n sim.setIncludeInitialTyreInMove(include_initial_tyre)\n sim.steps = iterations\n state, e = sim.anneal()\n return state\n\n @staticmethod\n def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):\n \"\"\"\n Use genetic evolution to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n generations (int): Evolution generation limit\n\n Returns:\n Car\n \"\"\"\n return StrategyDeap(car, include_initial_tyre, generations).run()\n",
"step-5": "'''\n Factory for creating and running ssimulations against optimization tools\n\n Author:\n Matthew Barber <mfmbarber@gmail.com>\n'''\nfrom .strategy_annealer import StrategyAnnealer\nfrom .strategy_deap import StrategyDeap\n\n\nclass CalulateStrategyWith:\n @staticmethod\n def Annealing(car, include_initial_tyre=False, iterations=100000):\n '''\n Use simulated annealing to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n iterations (int): Iteration limit\n\n Returns:\n Car\n '''\n sim = StrategyAnnealer(car)\n sim.setIncludeInitialTyreInMove(include_initial_tyre)\n sim.steps = iterations\n state, e = sim.anneal()\n return state\n\n @staticmethod\n def geneticAlgorithm(car, include_initial_tyre=False, generations=1000):\n '''\n Use genetic evolution to determine the best strategy\n\n Args:\n car (Car): An initial car to test with\n include_initial_tyre (bool): Include the initial tyre in moves\n generations (int): Evolution generation limit\n\n Returns:\n Car\n '''\n return StrategyDeap(car, include_initial_tyre, generations).run()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
u"""Hellweg execution template.
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcollections
from pykern import pkio
from pykern.pkdebug import pkdc, pkdp
from rslinac import solver
from sirepo import simulation_db
from sirepo.template import template_common, hellweg_dump_reader
import math
import numpy as np
import os.path
import py.path
import re
HELLWEG_DUMP_FILE = 'all-data.bin'
HELLWEG_SUMMARY_FILE = 'output.txt'
HELLWEG_INI_FILE = 'defaults.ini'
HELLWEG_INPUT_FILE = 'input.txt'
#: Simulation type
SIM_TYPE = 'hellweg'
WANT_BROWSER_FRAME_CACHE = True
# lattice element is required so make it very short and wide drift
_DEFAULT_DRIFT_ELEMENT = 'DRIFT 1e-16 1e+16 2' + "\n"
_HELLWEG_PARSED_FILE = 'PARSED.TXT'
_REPORT_STYLE_FIELDS = ['colorMap', 'notes']
_SCHEMA = simulation_db.get_schema(SIM_TYPE)
def background_percent_complete(report, run_dir, is_running):
if is_running:
return {
'percentComplete': 0,
'frameCount': 0,
}
dump_file = _dump_file(run_dir)
if os.path.exists(dump_file):
beam_header = hellweg_dump_reader.beam_header(dump_file)
last_update_time = int(os.path.getmtime(dump_file))
frame_count = beam_header.NPoints
return {
'lastUpdateTime': last_update_time,
'percentComplete': 100,
'frameCount': frame_count,
'summaryData': _summary_text(run_dir),
}
return {
'percentComplete': 100,
'frameCount': 0,
'error': _parse_error_message(run_dir)
}
def extract_beam_histrogram(report, run_dir, frame):
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
points = hellweg_dump_reader.get_points(beam_info, report.reportType)
hist, edges = np.histogram(points, template_common.histogram_bins(report.histogramBins))
return {
'title': _report_title(report.reportType, 'BeamHistogramReportType', beam_info),
'x_range': [edges[0], edges[-1]],
'y_label': 'Number of Particles',
'x_label': hellweg_dump_reader.get_label(report.reportType),
'points': hist.T.tolist(),
}
def extract_beam_report(report, run_dir, frame):
data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))
model = data.models.beamAnimation
model.update(report)
beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)
x, y = report.reportType.split('-')
values = [
hellweg_dump_reader.get_points(beam_info, x),
hellweg_dump_reader.get_points(beam_info, y),
]
model['x'] = x
model['y'] = y
return template_common.heatmap(values, model, {
'x_label': hellweg_dump_reader.get_label(x),
'y_label': hellweg_dump_reader.get_label(y),
'title': _report_title(report.reportType, 'BeamReportType', beam_info),
'z_label': 'Number of Particles',
'summaryData': _summary_text(run_dir),
})
def extract_parameter_report(report, run_dir):
s = solver.BeamSolver(
os.path.join(str(run_dir), HELLWEG_INI_FILE),
os.path.join(str(run_dir), HELLWEG_INPUT_FILE))
s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))
y1_var, y2_var = report.reportType.split('-')
x_field = 'z'
x = s.get_structure_parameters(_parameter_index(x_field))
y1 = s.get_structure_parameters(_parameter_index(y1_var))
y1_extent = [np.min(y1), np.max(y1)]
y2 = s.get_structure_parameters(_parameter_index(y2_var))
y2_extent = [np.min(y2), np.max(y2)]
return {
'title': _enum_text('ParameterReportType', report.reportType),
'x_range': [x[0], x[-1]],
'y_label': hellweg_dump_reader.get_parameter_label(y1_var),
'x_label': hellweg_dump_reader.get_parameter_label(x_field),
'x_points': x,
'points': [
y1,
y2,
],
'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1], y2_extent[1])],
'y1_title': hellweg_dump_reader.get_parameter_title(y1_var),
'y2_title': hellweg_dump_reader.get_parameter_title(y2_var),
}
def extract_particle_report(report, run_dir):
x_field = 'z0'
particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir), report.reportType, int(report.renderCount))
x = particle_info['z_values']
return {
'title': _enum_text('ParticleReportType', report.reportType),
'x_range': [np.min(x), np.max(x)],
'y_label': hellweg_dump_reader.get_label(report.reportType),
'x_label': hellweg_dump_reader.get_label(x_field),
'x_points': x,
'points': particle_info['y_values'],
'y_range': particle_info['y_range'],
}
def fixup_old_data(data):
for m in ('beamAnimation', 'beamHistogramAnimation', 'parameterAnimation', 'particleAnimation'):
if m not in data.models:
data.models[m] = pkcollections.Dict({})
template_common.update_model_defaults(data.models[m], m, _SCHEMA)
if 'solenoidFile' not in data['models']['solenoid']:
data['models']['solenoid']['solenoidFile'] = ''
if 'beamDefinition' not in data['models']['beam']:
beam = data['models']['beam']
beam['beamDefinition'] = 'transverse_longitude'
beam['cstCompress'] = '0'
beam['transversalFile2d'] = ''
beam['transversalFile4d'] = ''
beam['longitudinalFile1d'] = ''
beam['longitudinalFile2d'] = ''
beam['cstFile'] = ''
template_common.organize_example(data)
def get_animation_name(data):
return 'animation'
def get_application_data(data):
if data['method'] == 'compute_particle_ranges':
return template_common.compute_field_range(data, _compute_range_across_files)
assert False, 'unknown application data method: {}'.format(data['method'])
def lib_files(data, source_lib):
return template_common.filename_to_path(_simulation_files(data), source_lib)
def get_simulation_frame(run_dir, data, model_data):
frame_index = int(data['frameIndex'])
if data['modelName'] == 'beamAnimation':
args = template_common.parse_animation_args(
data,
{
'1': ['reportType', 'histogramBins', 'startTime'],
'': ['reportType', 'histogramBins', 'plotRangeType', 'horizontalSize', 'horizontalOffset', 'verticalSize', 'verticalOffset', 'isRunning', 'startTime'],
},
)
return extract_beam_report(args, run_dir, frame_index)
elif data['modelName'] == 'beamHistogramAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'histogramBins', 'startTime']},
)
return extract_beam_histrogram(args, run_dir, frame_index)
elif data['modelName'] == 'particleAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'renderCount', 'startTime']},
)
return extract_particle_report(args, run_dir)
elif data['modelName'] == 'parameterAnimation':
args = template_common.parse_animation_args(
data,
{'': ['reportType', 'startTime']},
)
return extract_parameter_report(args, run_dir)
raise RuntimeError('unknown animation model: {}'.format(data['modelName']))
def models_related_to_report(data):
"""What models are required for this data['report']
Args:
data (dict): simulation
Returns:
list: Named models, model fields or values (dict, list) that affect report
"""
r = data['report']
if r == 'animation':
return []
res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [
'beam',
'ellipticalDistribution',
'energyPhaseDistribution',
'solenoid',
'sphericalDistribution',
'twissDistribution',
]
for f in template_common.lib_files(data):
res.append(f.mtime())
return res
def python_source_for_model(data, model):
return '''
from rslinac import solver
{}
with open('input.txt', 'w') as f:
f.write(input_file)
with open('defaults.ini', 'w') as f:
f.write(ini_file)
s = solver.BeamSolver('defaults.ini', 'input.txt')
s.solve()
s.save_output('output.txt')
'''.format(_generate_parameters_file(data, is_parallel=len(data.models.beamline)))
def remove_last_frame(run_dir):
pass
def validate_delete_file(data, filename, file_type):
"""Returns True if the filename is in use by the simulation data."""
return filename in _simulation_files(data)
def write_parameters(data, run_dir, is_parallel):
"""Write the parameters file
Args:
data (dict): input
run_dir (py.path): where to write
is_parallel (bool): run in background?
"""
pkio.write_text(
run_dir.join(template_common.PARAMETERS_PYTHON_FILE),
_generate_parameters_file(
data,
run_dir,
is_parallel,
),
)
def _compute_range_across_files(run_dir, data):
res = {}
for v in _SCHEMA.enum.BeamReportType:
x, y = v[0].split('-')
res[x] = []
res[y] = []
dump_file = _dump_file(run_dir)
if not os.path.exists(dump_file):
return res
beam_header = hellweg_dump_reader.beam_header(dump_file)
for frame in xrange(beam_header.NPoints):
beam_info = hellweg_dump_reader.beam_info(dump_file, frame)
for field in res:
values = hellweg_dump_reader.get_points(beam_info, field)
if not len(values):
pass
elif len(res[field]):
res[field][0] = min(min(values), res[field][0])
res[field][1] = max(max(values), res[field][1])
else:
res[field] = [min(values), max(values)]
return res
def _dump_file(run_dir):
return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)
def _enum_text(enum_name, v):
enum_values = _SCHEMA['enum'][enum_name]
for e in enum_values:
if e[0] == v:
return e[1]
raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))
def _generate_beam(models):
# BEAM SPH2D 0.564 -15 5 NORM2D 0.30 0.0000001 90 180
beam_def = models.beam.beamDefinition
if beam_def == 'transverse_longitude':
return 'BEAM {} {}'.format(_generate_transverse_dist(models), _generate_longitude_dist(models))
if beam_def == 'cst_pit':
return 'BEAM CST_PIT {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
'COMPRESS' if models.beam.cstCompress else '',
)
if beam_def == 'cst_pid':
return 'BEAM CST_PID {} {}'.format(
template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
raise RuntimeError('invalid beam def: {}'.format(beam_def))
def _generate_cell_params(el):
#TODO(pjm): add an option field to select auto-calculate
if el.attenuation == 0 and el.aperture == 0:
return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant)
return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant, el.attenuation, el.aperture)
def _generate_charge(models):
if models.beam.spaceCharge == 'none':
return ''
return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.beam.spaceChargeCore)
def _generate_current(models):
return 'CURRENT {} {}'.format(models.beam.current, models.beam.numberOfParticles)
def _generate_energy_phase_distribution(dist):
return '{} {} {}'.format(
dist.meanPhase,
dist.phaseLength,
dist.phaseDeviation if dist.distributionType == 'gaussian' else '',
)
def _generate_lattice(models):
res = ''
for el in models.beamline:
if el.type == 'powerElement':
res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.phaseShift)
elif el.type == 'cellElement':
res += 'CELL {}'.format(_generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'cellsElement':
res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))
has_cell_or_drift = True
elif el.type == 'driftElement':
res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)
has_cell_or_drift = True
elif el.type == 'saveElement':
#TODO(pjm): implement this
pass
else:
raise RuntimeError('unknown element type: {}'.format(el.type))
res += "\n"
return res
def _generate_longitude_dist(models):
dist_type = models.beam.longitudinalDistribution
if dist_type == 'norm2d':
dist = models.energyPhaseDistribution
if dist.distributionType == 'uniform':
return 'NORM2D {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.meanPhase, dist.phaseLength)
if dist.distributionType == 'gaussian':
return 'NORM2D {} {} {} {} {} {}'.format(
dist.meanEnergy, dist.energySpread, dist.energyDeviation, dist.meanPhase, dist.phaseLength, dist.phaseDeviation)
raise RuntimeError('unknown longitudinal distribution type: {}'.format(models.longitudinalDistribution.distributionType))
if dist_type == 'file1d':
return 'FILE1D {} {}'.format(
template_common.lib_file_name('beam', 'longitudinalFile1d', models.beam.longitudinalFile1d),
_generate_energy_phase_distribution(models.energyPhaseDistribution),
)
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
raise RuntimeError('unknown longitudinal distribution: {}'.format(models.beam.longitudinalDistribution))
def _generate_options(models):
if models.simulationSettings.allowBackwardWaves == '1':
return 'OPTIONS REVERSE'
return ''
def _generate_parameters_file(data, run_dir=None, is_parallel=False):
template_common.validate_models(data, _SCHEMA)
v = template_common.flatten_data(data['models'], {})
v['optionsCommand'] = _generate_options(data['models'])
v['solenoidCommand'] = _generate_solenoid(data['models'])
v['beamCommand'] = _generate_beam(data['models'])
v['currentCommand'] = _generate_current(data['models'])
v['chargeCommand'] = _generate_charge(data['models'])
if is_parallel:
v['latticeCommands'] = _generate_lattice(data['models'])
else:
v['latticeCommands'] = _DEFAULT_DRIFT_ELEMENT
return template_common.render_jinja(SIM_TYPE, v)
def _generate_solenoid(models):
solenoid = models.solenoid
if solenoid.sourceDefinition == 'none':
return ''
if solenoid.sourceDefinition == 'values':
#TODO(pjm): latest version also has solenoid.fringeRegion
return 'SOLENOID {} {} {}'.format(
solenoid.fieldStrength, solenoid.length, solenoid.z0)
if solenoid.sourceDefinition == 'file':
return 'SOLENOID {}'.format(
template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))
raise RuntimeError('unknown solenoidDefinition: {}'.format(solenoid.sourceDefinition))
def _generate_transverse_dist(models):
dist_type = models.beam.transversalDistribution
if dist_type == 'twiss4d':
dist = models.twissDistribution
return 'TWISS4D {} {} {} {} {} {}'.format(
dist.horizontalAlpha, dist.horizontalBeta, dist.horizontalEmittance,
dist.verticalAlpha, dist.verticalBeta, dist.verticalEmittance)
if dist_type == 'sph2d':
dist = models.sphericalDistribution
if dist.curvature == 'flat':
dist.curvatureFactor = 0
return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.curvatureFactor, dist.thermalEmittance)
if dist_type == 'ell2d':
dist = models.ellipticalDistribution
return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.rotationAngle, dist.rmsDeviationFactor)
beam = models.beam
if dist_type == 'file2d':
return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
if dist_type == 'file4d':
return 'FILE4D {}'.format(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))
raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))
def _parameter_index(name):
return hellweg_dump_reader.parameter_index(name)
def _parse_error_message(run_dir):
path = os.path.join(str(run_dir), _HELLWEG_PARSED_FILE)
if not os.path.exists(path):
return 'No elements generated'
text = pkio.read_text(str(path))
for line in text.split("\n"):
match = re.search('^ERROR:\s(.*)$', line)
if match:
return match.group(1)
return 'No output generated'
def _report_title(report_type, enum_name, beam_info):
return '{}, z={:.4f} cm'.format(
_enum_text(enum_name, report_type),
100 * hellweg_dump_reader.get_parameter(beam_info, 'z'))
def _simulation_files(data):
res = []
solenoid = data.models.solenoid
if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:
res.append(template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))
beam = data.models.beam
if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':
res.append(template_common.lib_file_name('beam', 'cstFile', beam.cstFile))
if beam.beamDefinition == 'transverse_longitude':
if beam.transversalDistribution == 'file2d':
res.append(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))
elif beam.transversalDistribution == 'file4d':
res.append(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))
if beam.longitudinalDistribution == 'file1d':
res.append(template_common.lib_file_name('beam', 'longitudinalFile1d', beam.longitudinalFile1d))
if beam.longitudinalDistribution == 'file2d':
res.append(template_common.lib_file_name('beam', 'longitudinalFile2d', beam.longitudinalFile2d))
return res
def _summary_text(run_dir):
return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))
|
normal
|
{
"blob_id": "9e6fd6620b4ec6a574d7948fb0d14b0a2ad0d24e",
"index": 5240,
"step-1": "<mask token>\n\n\ndef background_percent_complete(report, run_dir, is_running):\n if is_running:\n return {'percentComplete': 0, 'frameCount': 0}\n dump_file = _dump_file(run_dir)\n if os.path.exists(dump_file):\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n last_update_time = int(os.path.getmtime(dump_file))\n frame_count = beam_header.NPoints\n return {'lastUpdateTime': last_update_time, 'percentComplete': 100,\n 'frameCount': frame_count, 'summaryData': _summary_text(run_dir)}\n return {'percentComplete': 100, 'frameCount': 0, 'error':\n _parse_error_message(run_dir)}\n\n\ndef extract_beam_histrogram(report, run_dir, frame):\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n points = hellweg_dump_reader.get_points(beam_info, report.reportType)\n hist, edges = np.histogram(points, template_common.histogram_bins(\n report.histogramBins))\n return {'title': _report_title(report.reportType,\n 'BeamHistogramReportType', beam_info), 'x_range': [edges[0], edges[\n -1]], 'y_label': 'Number of Particles', 'x_label':\n hellweg_dump_reader.get_label(report.reportType), 'points': hist.T.\n tolist()}\n\n\ndef extract_beam_report(report, run_dir, frame):\n data = simulation_db.read_json(run_dir.join(template_common.\n INPUT_BASE_NAME))\n model = data.models.beamAnimation\n model.update(report)\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n x, y = report.reportType.split('-')\n values = [hellweg_dump_reader.get_points(beam_info, x),\n hellweg_dump_reader.get_points(beam_info, y)]\n model['x'] = x\n model['y'] = y\n return template_common.heatmap(values, model, {'x_label':\n hellweg_dump_reader.get_label(x), 'y_label': hellweg_dump_reader.\n get_label(y), 'title': _report_title(report.reportType,\n 'BeamReportType', beam_info), 'z_label': 'Number of Particles',\n 'summaryData': _summary_text(run_dir)})\n\n\ndef extract_parameter_report(report, run_dir):\n s = solver.BeamSolver(os.path.join(str(run_dir), HELLWEG_INI_FILE), os.\n path.join(str(run_dir), HELLWEG_INPUT_FILE))\n s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))\n y1_var, y2_var = report.reportType.split('-')\n x_field = 'z'\n x = s.get_structure_parameters(_parameter_index(x_field))\n y1 = s.get_structure_parameters(_parameter_index(y1_var))\n y1_extent = [np.min(y1), np.max(y1)]\n y2 = s.get_structure_parameters(_parameter_index(y2_var))\n y2_extent = [np.min(y2), np.max(y2)]\n return {'title': _enum_text('ParameterReportType', report.reportType),\n 'x_range': [x[0], x[-1]], 'y_label': hellweg_dump_reader.\n get_parameter_label(y1_var), 'x_label': hellweg_dump_reader.\n get_parameter_label(x_field), 'x_points': x, 'points': [y1, y2],\n 'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1],\n y2_extent[1])], 'y1_title': hellweg_dump_reader.get_parameter_title\n (y1_var), 'y2_title': hellweg_dump_reader.get_parameter_title(y2_var)}\n\n\ndef extract_particle_report(report, run_dir):\n x_field = 'z0'\n particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir),\n report.reportType, int(report.renderCount))\n x = particle_info['z_values']\n return {'title': _enum_text('ParticleReportType', report.reportType),\n 'x_range': [np.min(x), np.max(x)], 'y_label': hellweg_dump_reader.\n get_label(report.reportType), 'x_label': hellweg_dump_reader.\n get_label(x_field), 'x_points': x, 'points': particle_info[\n 'y_values'], 'y_range': particle_info['y_range']}\n\n\ndef fixup_old_data(data):\n for m in ('beamAnimation', 'beamHistogramAnimation',\n 'parameterAnimation', 'particleAnimation'):\n if m not in data.models:\n data.models[m] = pkcollections.Dict({})\n template_common.update_model_defaults(data.models[m], m, _SCHEMA)\n if 'solenoidFile' not in data['models']['solenoid']:\n data['models']['solenoid']['solenoidFile'] = ''\n if 'beamDefinition' not in data['models']['beam']:\n beam = data['models']['beam']\n beam['beamDefinition'] = 'transverse_longitude'\n beam['cstCompress'] = '0'\n beam['transversalFile2d'] = ''\n beam['transversalFile4d'] = ''\n beam['longitudinalFile1d'] = ''\n beam['longitudinalFile2d'] = ''\n beam['cstFile'] = ''\n template_common.organize_example(data)\n\n\n<mask token>\n\n\ndef get_simulation_frame(run_dir, data, model_data):\n frame_index = int(data['frameIndex'])\n if data['modelName'] == 'beamAnimation':\n args = template_common.parse_animation_args(data, {'1': [\n 'reportType', 'histogramBins', 'startTime'], '': ['reportType',\n 'histogramBins', 'plotRangeType', 'horizontalSize',\n 'horizontalOffset', 'verticalSize', 'verticalOffset',\n 'isRunning', 'startTime']})\n return extract_beam_report(args, run_dir, frame_index)\n elif data['modelName'] == 'beamHistogramAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'histogramBins', 'startTime']})\n return extract_beam_histrogram(args, run_dir, frame_index)\n elif data['modelName'] == 'particleAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'renderCount', 'startTime']})\n return extract_particle_report(args, run_dir)\n elif data['modelName'] == 'parameterAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'startTime']})\n return extract_parameter_report(args, run_dir)\n raise RuntimeError('unknown animation model: {}'.format(data['modelName']))\n\n\n<mask token>\n\n\ndef remove_last_frame(run_dir):\n pass\n\n\ndef validate_delete_file(data, filename, file_type):\n \"\"\"Returns True if the filename is in use by the simulation data.\"\"\"\n return filename in _simulation_files(data)\n\n\ndef write_parameters(data, run_dir, is_parallel):\n \"\"\"Write the parameters file\n\n Args:\n data (dict): input\n run_dir (py.path): where to write\n is_parallel (bool): run in background?\n \"\"\"\n pkio.write_text(run_dir.join(template_common.PARAMETERS_PYTHON_FILE),\n _generate_parameters_file(data, run_dir, is_parallel))\n\n\ndef _compute_range_across_files(run_dir, data):\n res = {}\n for v in _SCHEMA.enum.BeamReportType:\n x, y = v[0].split('-')\n res[x] = []\n res[y] = []\n dump_file = _dump_file(run_dir)\n if not os.path.exists(dump_file):\n return res\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n for frame in xrange(beam_header.NPoints):\n beam_info = hellweg_dump_reader.beam_info(dump_file, frame)\n for field in res:\n values = hellweg_dump_reader.get_points(beam_info, field)\n if not len(values):\n pass\n elif len(res[field]):\n res[field][0] = min(min(values), res[field][0])\n res[field][1] = max(max(values), res[field][1])\n else:\n res[field] = [min(values), max(values)]\n return res\n\n\ndef _dump_file(run_dir):\n return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)\n\n\ndef _enum_text(enum_name, v):\n enum_values = _SCHEMA['enum'][enum_name]\n for e in enum_values:\n if e[0] == v:\n return e[1]\n raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))\n\n\n<mask token>\n\n\ndef _generate_cell_params(el):\n if el.attenuation == 0 and el.aperture == 0:\n return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant)\n return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant, el.attenuation, el.aperture)\n\n\ndef _generate_charge(models):\n if models.beam.spaceCharge == 'none':\n return ''\n return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.\n beam.spaceChargeCore)\n\n\n<mask token>\n\n\ndef _generate_energy_phase_distribution(dist):\n return '{} {} {}'.format(dist.meanPhase, dist.phaseLength, dist.\n phaseDeviation if dist.distributionType == 'gaussian' else '')\n\n\ndef _generate_lattice(models):\n res = ''\n for el in models.beamline:\n if el.type == 'powerElement':\n res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.\n phaseShift)\n elif el.type == 'cellElement':\n res += 'CELL {}'.format(_generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'cellsElement':\n res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'driftElement':\n res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)\n has_cell_or_drift = True\n elif el.type == 'saveElement':\n pass\n else:\n raise RuntimeError('unknown element type: {}'.format(el.type))\n res += '\\n'\n return res\n\n\n<mask token>\n\n\ndef _generate_options(models):\n if models.simulationSettings.allowBackwardWaves == '1':\n return 'OPTIONS REVERSE'\n return ''\n\n\n<mask token>\n\n\ndef _generate_transverse_dist(models):\n dist_type = models.beam.transversalDistribution\n if dist_type == 'twiss4d':\n dist = models.twissDistribution\n return 'TWISS4D {} {} {} {} {} {}'.format(dist.horizontalAlpha,\n dist.horizontalBeta, dist.horizontalEmittance, dist.\n verticalAlpha, dist.verticalBeta, dist.verticalEmittance)\n if dist_type == 'sph2d':\n dist = models.sphericalDistribution\n if dist.curvature == 'flat':\n dist.curvatureFactor = 0\n return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.\n curvatureFactor, dist.thermalEmittance)\n if dist_type == 'ell2d':\n dist = models.ellipticalDistribution\n return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.\n rotationAngle, dist.rmsDeviationFactor)\n beam = models.beam\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n if dist_type == 'file4d':\n return 'FILE4D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile4d', beam.transversalFile4d))\n raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))\n\n\ndef _parameter_index(name):\n return hellweg_dump_reader.parameter_index(name)\n\n\n<mask token>\n\n\ndef _report_title(report_type, enum_name, beam_info):\n return '{}, z={:.4f} cm'.format(_enum_text(enum_name, report_type), 100 *\n hellweg_dump_reader.get_parameter(beam_info, 'z'))\n\n\n<mask token>\n\n\ndef _summary_text(run_dir):\n return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))\n",
"step-2": "<mask token>\n\n\ndef background_percent_complete(report, run_dir, is_running):\n if is_running:\n return {'percentComplete': 0, 'frameCount': 0}\n dump_file = _dump_file(run_dir)\n if os.path.exists(dump_file):\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n last_update_time = int(os.path.getmtime(dump_file))\n frame_count = beam_header.NPoints\n return {'lastUpdateTime': last_update_time, 'percentComplete': 100,\n 'frameCount': frame_count, 'summaryData': _summary_text(run_dir)}\n return {'percentComplete': 100, 'frameCount': 0, 'error':\n _parse_error_message(run_dir)}\n\n\ndef extract_beam_histrogram(report, run_dir, frame):\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n points = hellweg_dump_reader.get_points(beam_info, report.reportType)\n hist, edges = np.histogram(points, template_common.histogram_bins(\n report.histogramBins))\n return {'title': _report_title(report.reportType,\n 'BeamHistogramReportType', beam_info), 'x_range': [edges[0], edges[\n -1]], 'y_label': 'Number of Particles', 'x_label':\n hellweg_dump_reader.get_label(report.reportType), 'points': hist.T.\n tolist()}\n\n\ndef extract_beam_report(report, run_dir, frame):\n data = simulation_db.read_json(run_dir.join(template_common.\n INPUT_BASE_NAME))\n model = data.models.beamAnimation\n model.update(report)\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n x, y = report.reportType.split('-')\n values = [hellweg_dump_reader.get_points(beam_info, x),\n hellweg_dump_reader.get_points(beam_info, y)]\n model['x'] = x\n model['y'] = y\n return template_common.heatmap(values, model, {'x_label':\n hellweg_dump_reader.get_label(x), 'y_label': hellweg_dump_reader.\n get_label(y), 'title': _report_title(report.reportType,\n 'BeamReportType', beam_info), 'z_label': 'Number of Particles',\n 'summaryData': _summary_text(run_dir)})\n\n\ndef extract_parameter_report(report, run_dir):\n s = solver.BeamSolver(os.path.join(str(run_dir), HELLWEG_INI_FILE), os.\n path.join(str(run_dir), HELLWEG_INPUT_FILE))\n s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))\n y1_var, y2_var = report.reportType.split('-')\n x_field = 'z'\n x = s.get_structure_parameters(_parameter_index(x_field))\n y1 = s.get_structure_parameters(_parameter_index(y1_var))\n y1_extent = [np.min(y1), np.max(y1)]\n y2 = s.get_structure_parameters(_parameter_index(y2_var))\n y2_extent = [np.min(y2), np.max(y2)]\n return {'title': _enum_text('ParameterReportType', report.reportType),\n 'x_range': [x[0], x[-1]], 'y_label': hellweg_dump_reader.\n get_parameter_label(y1_var), 'x_label': hellweg_dump_reader.\n get_parameter_label(x_field), 'x_points': x, 'points': [y1, y2],\n 'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1],\n y2_extent[1])], 'y1_title': hellweg_dump_reader.get_parameter_title\n (y1_var), 'y2_title': hellweg_dump_reader.get_parameter_title(y2_var)}\n\n\ndef extract_particle_report(report, run_dir):\n x_field = 'z0'\n particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir),\n report.reportType, int(report.renderCount))\n x = particle_info['z_values']\n return {'title': _enum_text('ParticleReportType', report.reportType),\n 'x_range': [np.min(x), np.max(x)], 'y_label': hellweg_dump_reader.\n get_label(report.reportType), 'x_label': hellweg_dump_reader.\n get_label(x_field), 'x_points': x, 'points': particle_info[\n 'y_values'], 'y_range': particle_info['y_range']}\n\n\ndef fixup_old_data(data):\n for m in ('beamAnimation', 'beamHistogramAnimation',\n 'parameterAnimation', 'particleAnimation'):\n if m not in data.models:\n data.models[m] = pkcollections.Dict({})\n template_common.update_model_defaults(data.models[m], m, _SCHEMA)\n if 'solenoidFile' not in data['models']['solenoid']:\n data['models']['solenoid']['solenoidFile'] = ''\n if 'beamDefinition' not in data['models']['beam']:\n beam = data['models']['beam']\n beam['beamDefinition'] = 'transverse_longitude'\n beam['cstCompress'] = '0'\n beam['transversalFile2d'] = ''\n beam['transversalFile4d'] = ''\n beam['longitudinalFile1d'] = ''\n beam['longitudinalFile2d'] = ''\n beam['cstFile'] = ''\n template_common.organize_example(data)\n\n\n<mask token>\n\n\ndef get_application_data(data):\n if data['method'] == 'compute_particle_ranges':\n return template_common.compute_field_range(data,\n _compute_range_across_files)\n assert False, 'unknown application data method: {}'.format(data['method'])\n\n\n<mask token>\n\n\ndef get_simulation_frame(run_dir, data, model_data):\n frame_index = int(data['frameIndex'])\n if data['modelName'] == 'beamAnimation':\n args = template_common.parse_animation_args(data, {'1': [\n 'reportType', 'histogramBins', 'startTime'], '': ['reportType',\n 'histogramBins', 'plotRangeType', 'horizontalSize',\n 'horizontalOffset', 'verticalSize', 'verticalOffset',\n 'isRunning', 'startTime']})\n return extract_beam_report(args, run_dir, frame_index)\n elif data['modelName'] == 'beamHistogramAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'histogramBins', 'startTime']})\n return extract_beam_histrogram(args, run_dir, frame_index)\n elif data['modelName'] == 'particleAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'renderCount', 'startTime']})\n return extract_particle_report(args, run_dir)\n elif data['modelName'] == 'parameterAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'startTime']})\n return extract_parameter_report(args, run_dir)\n raise RuntimeError('unknown animation model: {}'.format(data['modelName']))\n\n\n<mask token>\n\n\ndef remove_last_frame(run_dir):\n pass\n\n\ndef validate_delete_file(data, filename, file_type):\n \"\"\"Returns True if the filename is in use by the simulation data.\"\"\"\n return filename in _simulation_files(data)\n\n\ndef write_parameters(data, run_dir, is_parallel):\n \"\"\"Write the parameters file\n\n Args:\n data (dict): input\n run_dir (py.path): where to write\n is_parallel (bool): run in background?\n \"\"\"\n pkio.write_text(run_dir.join(template_common.PARAMETERS_PYTHON_FILE),\n _generate_parameters_file(data, run_dir, is_parallel))\n\n\ndef _compute_range_across_files(run_dir, data):\n res = {}\n for v in _SCHEMA.enum.BeamReportType:\n x, y = v[0].split('-')\n res[x] = []\n res[y] = []\n dump_file = _dump_file(run_dir)\n if not os.path.exists(dump_file):\n return res\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n for frame in xrange(beam_header.NPoints):\n beam_info = hellweg_dump_reader.beam_info(dump_file, frame)\n for field in res:\n values = hellweg_dump_reader.get_points(beam_info, field)\n if not len(values):\n pass\n elif len(res[field]):\n res[field][0] = min(min(values), res[field][0])\n res[field][1] = max(max(values), res[field][1])\n else:\n res[field] = [min(values), max(values)]\n return res\n\n\ndef _dump_file(run_dir):\n return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)\n\n\ndef _enum_text(enum_name, v):\n enum_values = _SCHEMA['enum'][enum_name]\n for e in enum_values:\n if e[0] == v:\n return e[1]\n raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))\n\n\ndef _generate_beam(models):\n beam_def = models.beam.beamDefinition\n if beam_def == 'transverse_longitude':\n return 'BEAM {} {}'.format(_generate_transverse_dist(models),\n _generate_longitude_dist(models))\n if beam_def == 'cst_pit':\n return 'BEAM CST_PIT {} {}'.format(template_common.lib_file_name(\n 'beam', 'cstFile', models.beam.cstFile), 'COMPRESS' if models.\n beam.cstCompress else '')\n if beam_def == 'cst_pid':\n return 'BEAM CST_PID {} {}'.format(template_common.lib_file_name(\n 'beam', 'cstFile', models.beam.cstFile),\n _generate_energy_phase_distribution(models.energyPhaseDistribution)\n )\n raise RuntimeError('invalid beam def: {}'.format(beam_def))\n\n\ndef _generate_cell_params(el):\n if el.attenuation == 0 and el.aperture == 0:\n return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant)\n return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant, el.attenuation, el.aperture)\n\n\ndef _generate_charge(models):\n if models.beam.spaceCharge == 'none':\n return ''\n return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.\n beam.spaceChargeCore)\n\n\n<mask token>\n\n\ndef _generate_energy_phase_distribution(dist):\n return '{} {} {}'.format(dist.meanPhase, dist.phaseLength, dist.\n phaseDeviation if dist.distributionType == 'gaussian' else '')\n\n\ndef _generate_lattice(models):\n res = ''\n for el in models.beamline:\n if el.type == 'powerElement':\n res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.\n phaseShift)\n elif el.type == 'cellElement':\n res += 'CELL {}'.format(_generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'cellsElement':\n res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'driftElement':\n res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)\n has_cell_or_drift = True\n elif el.type == 'saveElement':\n pass\n else:\n raise RuntimeError('unknown element type: {}'.format(el.type))\n res += '\\n'\n return res\n\n\ndef _generate_longitude_dist(models):\n dist_type = models.beam.longitudinalDistribution\n if dist_type == 'norm2d':\n dist = models.energyPhaseDistribution\n if dist.distributionType == 'uniform':\n return 'NORM2D {} {} {} {}'.format(dist.meanEnergy, dist.\n energySpread, dist.meanPhase, dist.phaseLength)\n if dist.distributionType == 'gaussian':\n return 'NORM2D {} {} {} {} {} {}'.format(dist.meanEnergy, dist.\n energySpread, dist.energyDeviation, dist.meanPhase, dist.\n phaseLength, dist.phaseDeviation)\n raise RuntimeError('unknown longitudinal distribution type: {}'.\n format(models.longitudinalDistribution.distributionType))\n if dist_type == 'file1d':\n return 'FILE1D {} {}'.format(template_common.lib_file_name('beam',\n 'longitudinalFile1d', models.beam.longitudinalFile1d),\n _generate_energy_phase_distribution(models.energyPhaseDistribution)\n )\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n raise RuntimeError('unknown longitudinal distribution: {}'.format(\n models.beam.longitudinalDistribution))\n\n\ndef _generate_options(models):\n if models.simulationSettings.allowBackwardWaves == '1':\n return 'OPTIONS REVERSE'\n return ''\n\n\n<mask token>\n\n\ndef _generate_transverse_dist(models):\n dist_type = models.beam.transversalDistribution\n if dist_type == 'twiss4d':\n dist = models.twissDistribution\n return 'TWISS4D {} {} {} {} {} {}'.format(dist.horizontalAlpha,\n dist.horizontalBeta, dist.horizontalEmittance, dist.\n verticalAlpha, dist.verticalBeta, dist.verticalEmittance)\n if dist_type == 'sph2d':\n dist = models.sphericalDistribution\n if dist.curvature == 'flat':\n dist.curvatureFactor = 0\n return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.\n curvatureFactor, dist.thermalEmittance)\n if dist_type == 'ell2d':\n dist = models.ellipticalDistribution\n return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.\n rotationAngle, dist.rmsDeviationFactor)\n beam = models.beam\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n if dist_type == 'file4d':\n return 'FILE4D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile4d', beam.transversalFile4d))\n raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))\n\n\ndef _parameter_index(name):\n return hellweg_dump_reader.parameter_index(name)\n\n\n<mask token>\n\n\ndef _report_title(report_type, enum_name, beam_info):\n return '{}, z={:.4f} cm'.format(_enum_text(enum_name, report_type), 100 *\n hellweg_dump_reader.get_parameter(beam_info, 'z'))\n\n\ndef _simulation_files(data):\n res = []\n solenoid = data.models.solenoid\n if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:\n res.append(template_common.lib_file_name('solenoid', 'solenoidFile',\n solenoid.solenoidFile))\n beam = data.models.beam\n if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':\n res.append(template_common.lib_file_name('beam', 'cstFile', beam.\n cstFile))\n if beam.beamDefinition == 'transverse_longitude':\n if beam.transversalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n elif beam.transversalDistribution == 'file4d':\n res.append(template_common.lib_file_name('beam',\n 'transversalFile4d', beam.transversalFile4d))\n if beam.longitudinalDistribution == 'file1d':\n res.append(template_common.lib_file_name('beam',\n 'longitudinalFile1d', beam.longitudinalFile1d))\n if beam.longitudinalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam',\n 'longitudinalFile2d', beam.longitudinalFile2d))\n return res\n\n\ndef _summary_text(run_dir):\n return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))\n",
"step-3": "<mask token>\n\n\ndef background_percent_complete(report, run_dir, is_running):\n if is_running:\n return {'percentComplete': 0, 'frameCount': 0}\n dump_file = _dump_file(run_dir)\n if os.path.exists(dump_file):\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n last_update_time = int(os.path.getmtime(dump_file))\n frame_count = beam_header.NPoints\n return {'lastUpdateTime': last_update_time, 'percentComplete': 100,\n 'frameCount': frame_count, 'summaryData': _summary_text(run_dir)}\n return {'percentComplete': 100, 'frameCount': 0, 'error':\n _parse_error_message(run_dir)}\n\n\ndef extract_beam_histrogram(report, run_dir, frame):\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n points = hellweg_dump_reader.get_points(beam_info, report.reportType)\n hist, edges = np.histogram(points, template_common.histogram_bins(\n report.histogramBins))\n return {'title': _report_title(report.reportType,\n 'BeamHistogramReportType', beam_info), 'x_range': [edges[0], edges[\n -1]], 'y_label': 'Number of Particles', 'x_label':\n hellweg_dump_reader.get_label(report.reportType), 'points': hist.T.\n tolist()}\n\n\ndef extract_beam_report(report, run_dir, frame):\n data = simulation_db.read_json(run_dir.join(template_common.\n INPUT_BASE_NAME))\n model = data.models.beamAnimation\n model.update(report)\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n x, y = report.reportType.split('-')\n values = [hellweg_dump_reader.get_points(beam_info, x),\n hellweg_dump_reader.get_points(beam_info, y)]\n model['x'] = x\n model['y'] = y\n return template_common.heatmap(values, model, {'x_label':\n hellweg_dump_reader.get_label(x), 'y_label': hellweg_dump_reader.\n get_label(y), 'title': _report_title(report.reportType,\n 'BeamReportType', beam_info), 'z_label': 'Number of Particles',\n 'summaryData': _summary_text(run_dir)})\n\n\ndef extract_parameter_report(report, run_dir):\n s = solver.BeamSolver(os.path.join(str(run_dir), HELLWEG_INI_FILE), os.\n path.join(str(run_dir), HELLWEG_INPUT_FILE))\n s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))\n y1_var, y2_var = report.reportType.split('-')\n x_field = 'z'\n x = s.get_structure_parameters(_parameter_index(x_field))\n y1 = s.get_structure_parameters(_parameter_index(y1_var))\n y1_extent = [np.min(y1), np.max(y1)]\n y2 = s.get_structure_parameters(_parameter_index(y2_var))\n y2_extent = [np.min(y2), np.max(y2)]\n return {'title': _enum_text('ParameterReportType', report.reportType),\n 'x_range': [x[0], x[-1]], 'y_label': hellweg_dump_reader.\n get_parameter_label(y1_var), 'x_label': hellweg_dump_reader.\n get_parameter_label(x_field), 'x_points': x, 'points': [y1, y2],\n 'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1],\n y2_extent[1])], 'y1_title': hellweg_dump_reader.get_parameter_title\n (y1_var), 'y2_title': hellweg_dump_reader.get_parameter_title(y2_var)}\n\n\ndef extract_particle_report(report, run_dir):\n x_field = 'z0'\n particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir),\n report.reportType, int(report.renderCount))\n x = particle_info['z_values']\n return {'title': _enum_text('ParticleReportType', report.reportType),\n 'x_range': [np.min(x), np.max(x)], 'y_label': hellweg_dump_reader.\n get_label(report.reportType), 'x_label': hellweg_dump_reader.\n get_label(x_field), 'x_points': x, 'points': particle_info[\n 'y_values'], 'y_range': particle_info['y_range']}\n\n\ndef fixup_old_data(data):\n for m in ('beamAnimation', 'beamHistogramAnimation',\n 'parameterAnimation', 'particleAnimation'):\n if m not in data.models:\n data.models[m] = pkcollections.Dict({})\n template_common.update_model_defaults(data.models[m], m, _SCHEMA)\n if 'solenoidFile' not in data['models']['solenoid']:\n data['models']['solenoid']['solenoidFile'] = ''\n if 'beamDefinition' not in data['models']['beam']:\n beam = data['models']['beam']\n beam['beamDefinition'] = 'transverse_longitude'\n beam['cstCompress'] = '0'\n beam['transversalFile2d'] = ''\n beam['transversalFile4d'] = ''\n beam['longitudinalFile1d'] = ''\n beam['longitudinalFile2d'] = ''\n beam['cstFile'] = ''\n template_common.organize_example(data)\n\n\ndef get_animation_name(data):\n return 'animation'\n\n\ndef get_application_data(data):\n if data['method'] == 'compute_particle_ranges':\n return template_common.compute_field_range(data,\n _compute_range_across_files)\n assert False, 'unknown application data method: {}'.format(data['method'])\n\n\n<mask token>\n\n\ndef get_simulation_frame(run_dir, data, model_data):\n frame_index = int(data['frameIndex'])\n if data['modelName'] == 'beamAnimation':\n args = template_common.parse_animation_args(data, {'1': [\n 'reportType', 'histogramBins', 'startTime'], '': ['reportType',\n 'histogramBins', 'plotRangeType', 'horizontalSize',\n 'horizontalOffset', 'verticalSize', 'verticalOffset',\n 'isRunning', 'startTime']})\n return extract_beam_report(args, run_dir, frame_index)\n elif data['modelName'] == 'beamHistogramAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'histogramBins', 'startTime']})\n return extract_beam_histrogram(args, run_dir, frame_index)\n elif data['modelName'] == 'particleAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'renderCount', 'startTime']})\n return extract_particle_report(args, run_dir)\n elif data['modelName'] == 'parameterAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'startTime']})\n return extract_parameter_report(args, run_dir)\n raise RuntimeError('unknown animation model: {}'.format(data['modelName']))\n\n\ndef models_related_to_report(data):\n \"\"\"What models are required for this data['report']\n\n Args:\n data (dict): simulation\n Returns:\n list: Named models, model fields or values (dict, list) that affect report\n \"\"\"\n r = data['report']\n if r == 'animation':\n return []\n res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [\n 'beam', 'ellipticalDistribution', 'energyPhaseDistribution',\n 'solenoid', 'sphericalDistribution', 'twissDistribution']\n for f in template_common.lib_files(data):\n res.append(f.mtime())\n return res\n\n\ndef python_source_for_model(data, model):\n return (\n \"\"\"\nfrom rslinac import solver\n\n{}\n\nwith open('input.txt', 'w') as f:\n f.write(input_file)\n\nwith open('defaults.ini', 'w') as f:\n f.write(ini_file)\n\ns = solver.BeamSolver('defaults.ini', 'input.txt')\ns.solve()\ns.save_output('output.txt')\n \"\"\"\n .format(_generate_parameters_file(data, is_parallel=len(data.models\n .beamline))))\n\n\ndef remove_last_frame(run_dir):\n pass\n\n\ndef validate_delete_file(data, filename, file_type):\n \"\"\"Returns True if the filename is in use by the simulation data.\"\"\"\n return filename in _simulation_files(data)\n\n\ndef write_parameters(data, run_dir, is_parallel):\n \"\"\"Write the parameters file\n\n Args:\n data (dict): input\n run_dir (py.path): where to write\n is_parallel (bool): run in background?\n \"\"\"\n pkio.write_text(run_dir.join(template_common.PARAMETERS_PYTHON_FILE),\n _generate_parameters_file(data, run_dir, is_parallel))\n\n\ndef _compute_range_across_files(run_dir, data):\n res = {}\n for v in _SCHEMA.enum.BeamReportType:\n x, y = v[0].split('-')\n res[x] = []\n res[y] = []\n dump_file = _dump_file(run_dir)\n if not os.path.exists(dump_file):\n return res\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n for frame in xrange(beam_header.NPoints):\n beam_info = hellweg_dump_reader.beam_info(dump_file, frame)\n for field in res:\n values = hellweg_dump_reader.get_points(beam_info, field)\n if not len(values):\n pass\n elif len(res[field]):\n res[field][0] = min(min(values), res[field][0])\n res[field][1] = max(max(values), res[field][1])\n else:\n res[field] = [min(values), max(values)]\n return res\n\n\ndef _dump_file(run_dir):\n return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)\n\n\ndef _enum_text(enum_name, v):\n enum_values = _SCHEMA['enum'][enum_name]\n for e in enum_values:\n if e[0] == v:\n return e[1]\n raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))\n\n\ndef _generate_beam(models):\n beam_def = models.beam.beamDefinition\n if beam_def == 'transverse_longitude':\n return 'BEAM {} {}'.format(_generate_transverse_dist(models),\n _generate_longitude_dist(models))\n if beam_def == 'cst_pit':\n return 'BEAM CST_PIT {} {}'.format(template_common.lib_file_name(\n 'beam', 'cstFile', models.beam.cstFile), 'COMPRESS' if models.\n beam.cstCompress else '')\n if beam_def == 'cst_pid':\n return 'BEAM CST_PID {} {}'.format(template_common.lib_file_name(\n 'beam', 'cstFile', models.beam.cstFile),\n _generate_energy_phase_distribution(models.energyPhaseDistribution)\n )\n raise RuntimeError('invalid beam def: {}'.format(beam_def))\n\n\ndef _generate_cell_params(el):\n if el.attenuation == 0 and el.aperture == 0:\n return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant)\n return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant, el.attenuation, el.aperture)\n\n\ndef _generate_charge(models):\n if models.beam.spaceCharge == 'none':\n return ''\n return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.\n beam.spaceChargeCore)\n\n\ndef _generate_current(models):\n return 'CURRENT {} {}'.format(models.beam.current, models.beam.\n numberOfParticles)\n\n\ndef _generate_energy_phase_distribution(dist):\n return '{} {} {}'.format(dist.meanPhase, dist.phaseLength, dist.\n phaseDeviation if dist.distributionType == 'gaussian' else '')\n\n\ndef _generate_lattice(models):\n res = ''\n for el in models.beamline:\n if el.type == 'powerElement':\n res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.\n phaseShift)\n elif el.type == 'cellElement':\n res += 'CELL {}'.format(_generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'cellsElement':\n res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'driftElement':\n res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)\n has_cell_or_drift = True\n elif el.type == 'saveElement':\n pass\n else:\n raise RuntimeError('unknown element type: {}'.format(el.type))\n res += '\\n'\n return res\n\n\ndef _generate_longitude_dist(models):\n dist_type = models.beam.longitudinalDistribution\n if dist_type == 'norm2d':\n dist = models.energyPhaseDistribution\n if dist.distributionType == 'uniform':\n return 'NORM2D {} {} {} {}'.format(dist.meanEnergy, dist.\n energySpread, dist.meanPhase, dist.phaseLength)\n if dist.distributionType == 'gaussian':\n return 'NORM2D {} {} {} {} {} {}'.format(dist.meanEnergy, dist.\n energySpread, dist.energyDeviation, dist.meanPhase, dist.\n phaseLength, dist.phaseDeviation)\n raise RuntimeError('unknown longitudinal distribution type: {}'.\n format(models.longitudinalDistribution.distributionType))\n if dist_type == 'file1d':\n return 'FILE1D {} {}'.format(template_common.lib_file_name('beam',\n 'longitudinalFile1d', models.beam.longitudinalFile1d),\n _generate_energy_phase_distribution(models.energyPhaseDistribution)\n )\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n raise RuntimeError('unknown longitudinal distribution: {}'.format(\n models.beam.longitudinalDistribution))\n\n\ndef _generate_options(models):\n if models.simulationSettings.allowBackwardWaves == '1':\n return 'OPTIONS REVERSE'\n return ''\n\n\ndef _generate_parameters_file(data, run_dir=None, is_parallel=False):\n template_common.validate_models(data, _SCHEMA)\n v = template_common.flatten_data(data['models'], {})\n v['optionsCommand'] = _generate_options(data['models'])\n v['solenoidCommand'] = _generate_solenoid(data['models'])\n v['beamCommand'] = _generate_beam(data['models'])\n v['currentCommand'] = _generate_current(data['models'])\n v['chargeCommand'] = _generate_charge(data['models'])\n if is_parallel:\n v['latticeCommands'] = _generate_lattice(data['models'])\n else:\n v['latticeCommands'] = _DEFAULT_DRIFT_ELEMENT\n return template_common.render_jinja(SIM_TYPE, v)\n\n\ndef _generate_solenoid(models):\n solenoid = models.solenoid\n if solenoid.sourceDefinition == 'none':\n return ''\n if solenoid.sourceDefinition == 'values':\n return 'SOLENOID {} {} {}'.format(solenoid.fieldStrength, solenoid.\n length, solenoid.z0)\n if solenoid.sourceDefinition == 'file':\n return 'SOLENOID {}'.format(template_common.lib_file_name(\n 'solenoid', 'solenoidFile', solenoid.solenoidFile))\n raise RuntimeError('unknown solenoidDefinition: {}'.format(solenoid.\n sourceDefinition))\n\n\ndef _generate_transverse_dist(models):\n dist_type = models.beam.transversalDistribution\n if dist_type == 'twiss4d':\n dist = models.twissDistribution\n return 'TWISS4D {} {} {} {} {} {}'.format(dist.horizontalAlpha,\n dist.horizontalBeta, dist.horizontalEmittance, dist.\n verticalAlpha, dist.verticalBeta, dist.verticalEmittance)\n if dist_type == 'sph2d':\n dist = models.sphericalDistribution\n if dist.curvature == 'flat':\n dist.curvatureFactor = 0\n return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.\n curvatureFactor, dist.thermalEmittance)\n if dist_type == 'ell2d':\n dist = models.ellipticalDistribution\n return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.\n rotationAngle, dist.rmsDeviationFactor)\n beam = models.beam\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n if dist_type == 'file4d':\n return 'FILE4D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile4d', beam.transversalFile4d))\n raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))\n\n\ndef _parameter_index(name):\n return hellweg_dump_reader.parameter_index(name)\n\n\n<mask token>\n\n\ndef _report_title(report_type, enum_name, beam_info):\n return '{}, z={:.4f} cm'.format(_enum_text(enum_name, report_type), 100 *\n hellweg_dump_reader.get_parameter(beam_info, 'z'))\n\n\ndef _simulation_files(data):\n res = []\n solenoid = data.models.solenoid\n if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:\n res.append(template_common.lib_file_name('solenoid', 'solenoidFile',\n solenoid.solenoidFile))\n beam = data.models.beam\n if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':\n res.append(template_common.lib_file_name('beam', 'cstFile', beam.\n cstFile))\n if beam.beamDefinition == 'transverse_longitude':\n if beam.transversalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n elif beam.transversalDistribution == 'file4d':\n res.append(template_common.lib_file_name('beam',\n 'transversalFile4d', beam.transversalFile4d))\n if beam.longitudinalDistribution == 'file1d':\n res.append(template_common.lib_file_name('beam',\n 'longitudinalFile1d', beam.longitudinalFile1d))\n if beam.longitudinalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam',\n 'longitudinalFile2d', beam.longitudinalFile2d))\n return res\n\n\ndef _summary_text(run_dir):\n return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))\n",
"step-4": "<mask token>\nfrom __future__ import absolute_import, division, print_function\nfrom pykern import pkcollections\nfrom pykern import pkio\nfrom pykern.pkdebug import pkdc, pkdp\nfrom rslinac import solver\nfrom sirepo import simulation_db\nfrom sirepo.template import template_common, hellweg_dump_reader\nimport math\nimport numpy as np\nimport os.path\nimport py.path\nimport re\nHELLWEG_DUMP_FILE = 'all-data.bin'\nHELLWEG_SUMMARY_FILE = 'output.txt'\nHELLWEG_INI_FILE = 'defaults.ini'\nHELLWEG_INPUT_FILE = 'input.txt'\nSIM_TYPE = 'hellweg'\nWANT_BROWSER_FRAME_CACHE = True\n_DEFAULT_DRIFT_ELEMENT = 'DRIFT 1e-16 1e+16 2' + '\\n'\n_HELLWEG_PARSED_FILE = 'PARSED.TXT'\n_REPORT_STYLE_FIELDS = ['colorMap', 'notes']\n_SCHEMA = simulation_db.get_schema(SIM_TYPE)\n\n\ndef background_percent_complete(report, run_dir, is_running):\n if is_running:\n return {'percentComplete': 0, 'frameCount': 0}\n dump_file = _dump_file(run_dir)\n if os.path.exists(dump_file):\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n last_update_time = int(os.path.getmtime(dump_file))\n frame_count = beam_header.NPoints\n return {'lastUpdateTime': last_update_time, 'percentComplete': 100,\n 'frameCount': frame_count, 'summaryData': _summary_text(run_dir)}\n return {'percentComplete': 100, 'frameCount': 0, 'error':\n _parse_error_message(run_dir)}\n\n\ndef extract_beam_histrogram(report, run_dir, frame):\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n points = hellweg_dump_reader.get_points(beam_info, report.reportType)\n hist, edges = np.histogram(points, template_common.histogram_bins(\n report.histogramBins))\n return {'title': _report_title(report.reportType,\n 'BeamHistogramReportType', beam_info), 'x_range': [edges[0], edges[\n -1]], 'y_label': 'Number of Particles', 'x_label':\n hellweg_dump_reader.get_label(report.reportType), 'points': hist.T.\n tolist()}\n\n\ndef extract_beam_report(report, run_dir, frame):\n data = simulation_db.read_json(run_dir.join(template_common.\n INPUT_BASE_NAME))\n model = data.models.beamAnimation\n model.update(report)\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n x, y = report.reportType.split('-')\n values = [hellweg_dump_reader.get_points(beam_info, x),\n hellweg_dump_reader.get_points(beam_info, y)]\n model['x'] = x\n model['y'] = y\n return template_common.heatmap(values, model, {'x_label':\n hellweg_dump_reader.get_label(x), 'y_label': hellweg_dump_reader.\n get_label(y), 'title': _report_title(report.reportType,\n 'BeamReportType', beam_info), 'z_label': 'Number of Particles',\n 'summaryData': _summary_text(run_dir)})\n\n\ndef extract_parameter_report(report, run_dir):\n s = solver.BeamSolver(os.path.join(str(run_dir), HELLWEG_INI_FILE), os.\n path.join(str(run_dir), HELLWEG_INPUT_FILE))\n s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))\n y1_var, y2_var = report.reportType.split('-')\n x_field = 'z'\n x = s.get_structure_parameters(_parameter_index(x_field))\n y1 = s.get_structure_parameters(_parameter_index(y1_var))\n y1_extent = [np.min(y1), np.max(y1)]\n y2 = s.get_structure_parameters(_parameter_index(y2_var))\n y2_extent = [np.min(y2), np.max(y2)]\n return {'title': _enum_text('ParameterReportType', report.reportType),\n 'x_range': [x[0], x[-1]], 'y_label': hellweg_dump_reader.\n get_parameter_label(y1_var), 'x_label': hellweg_dump_reader.\n get_parameter_label(x_field), 'x_points': x, 'points': [y1, y2],\n 'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1],\n y2_extent[1])], 'y1_title': hellweg_dump_reader.get_parameter_title\n (y1_var), 'y2_title': hellweg_dump_reader.get_parameter_title(y2_var)}\n\n\ndef extract_particle_report(report, run_dir):\n x_field = 'z0'\n particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir),\n report.reportType, int(report.renderCount))\n x = particle_info['z_values']\n return {'title': _enum_text('ParticleReportType', report.reportType),\n 'x_range': [np.min(x), np.max(x)], 'y_label': hellweg_dump_reader.\n get_label(report.reportType), 'x_label': hellweg_dump_reader.\n get_label(x_field), 'x_points': x, 'points': particle_info[\n 'y_values'], 'y_range': particle_info['y_range']}\n\n\ndef fixup_old_data(data):\n for m in ('beamAnimation', 'beamHistogramAnimation',\n 'parameterAnimation', 'particleAnimation'):\n if m not in data.models:\n data.models[m] = pkcollections.Dict({})\n template_common.update_model_defaults(data.models[m], m, _SCHEMA)\n if 'solenoidFile' not in data['models']['solenoid']:\n data['models']['solenoid']['solenoidFile'] = ''\n if 'beamDefinition' not in data['models']['beam']:\n beam = data['models']['beam']\n beam['beamDefinition'] = 'transverse_longitude'\n beam['cstCompress'] = '0'\n beam['transversalFile2d'] = ''\n beam['transversalFile4d'] = ''\n beam['longitudinalFile1d'] = ''\n beam['longitudinalFile2d'] = ''\n beam['cstFile'] = ''\n template_common.organize_example(data)\n\n\ndef get_animation_name(data):\n return 'animation'\n\n\ndef get_application_data(data):\n if data['method'] == 'compute_particle_ranges':\n return template_common.compute_field_range(data,\n _compute_range_across_files)\n assert False, 'unknown application data method: {}'.format(data['method'])\n\n\ndef lib_files(data, source_lib):\n return template_common.filename_to_path(_simulation_files(data), source_lib\n )\n\n\ndef get_simulation_frame(run_dir, data, model_data):\n frame_index = int(data['frameIndex'])\n if data['modelName'] == 'beamAnimation':\n args = template_common.parse_animation_args(data, {'1': [\n 'reportType', 'histogramBins', 'startTime'], '': ['reportType',\n 'histogramBins', 'plotRangeType', 'horizontalSize',\n 'horizontalOffset', 'verticalSize', 'verticalOffset',\n 'isRunning', 'startTime']})\n return extract_beam_report(args, run_dir, frame_index)\n elif data['modelName'] == 'beamHistogramAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'histogramBins', 'startTime']})\n return extract_beam_histrogram(args, run_dir, frame_index)\n elif data['modelName'] == 'particleAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'renderCount', 'startTime']})\n return extract_particle_report(args, run_dir)\n elif data['modelName'] == 'parameterAnimation':\n args = template_common.parse_animation_args(data, {'': [\n 'reportType', 'startTime']})\n return extract_parameter_report(args, run_dir)\n raise RuntimeError('unknown animation model: {}'.format(data['modelName']))\n\n\ndef models_related_to_report(data):\n \"\"\"What models are required for this data['report']\n\n Args:\n data (dict): simulation\n Returns:\n list: Named models, model fields or values (dict, list) that affect report\n \"\"\"\n r = data['report']\n if r == 'animation':\n return []\n res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [\n 'beam', 'ellipticalDistribution', 'energyPhaseDistribution',\n 'solenoid', 'sphericalDistribution', 'twissDistribution']\n for f in template_common.lib_files(data):\n res.append(f.mtime())\n return res\n\n\ndef python_source_for_model(data, model):\n return (\n \"\"\"\nfrom rslinac import solver\n\n{}\n\nwith open('input.txt', 'w') as f:\n f.write(input_file)\n\nwith open('defaults.ini', 'w') as f:\n f.write(ini_file)\n\ns = solver.BeamSolver('defaults.ini', 'input.txt')\ns.solve()\ns.save_output('output.txt')\n \"\"\"\n .format(_generate_parameters_file(data, is_parallel=len(data.models\n .beamline))))\n\n\ndef remove_last_frame(run_dir):\n pass\n\n\ndef validate_delete_file(data, filename, file_type):\n \"\"\"Returns True if the filename is in use by the simulation data.\"\"\"\n return filename in _simulation_files(data)\n\n\ndef write_parameters(data, run_dir, is_parallel):\n \"\"\"Write the parameters file\n\n Args:\n data (dict): input\n run_dir (py.path): where to write\n is_parallel (bool): run in background?\n \"\"\"\n pkio.write_text(run_dir.join(template_common.PARAMETERS_PYTHON_FILE),\n _generate_parameters_file(data, run_dir, is_parallel))\n\n\ndef _compute_range_across_files(run_dir, data):\n res = {}\n for v in _SCHEMA.enum.BeamReportType:\n x, y = v[0].split('-')\n res[x] = []\n res[y] = []\n dump_file = _dump_file(run_dir)\n if not os.path.exists(dump_file):\n return res\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n for frame in xrange(beam_header.NPoints):\n beam_info = hellweg_dump_reader.beam_info(dump_file, frame)\n for field in res:\n values = hellweg_dump_reader.get_points(beam_info, field)\n if not len(values):\n pass\n elif len(res[field]):\n res[field][0] = min(min(values), res[field][0])\n res[field][1] = max(max(values), res[field][1])\n else:\n res[field] = [min(values), max(values)]\n return res\n\n\ndef _dump_file(run_dir):\n return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)\n\n\ndef _enum_text(enum_name, v):\n enum_values = _SCHEMA['enum'][enum_name]\n for e in enum_values:\n if e[0] == v:\n return e[1]\n raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))\n\n\ndef _generate_beam(models):\n beam_def = models.beam.beamDefinition\n if beam_def == 'transverse_longitude':\n return 'BEAM {} {}'.format(_generate_transverse_dist(models),\n _generate_longitude_dist(models))\n if beam_def == 'cst_pit':\n return 'BEAM CST_PIT {} {}'.format(template_common.lib_file_name(\n 'beam', 'cstFile', models.beam.cstFile), 'COMPRESS' if models.\n beam.cstCompress else '')\n if beam_def == 'cst_pid':\n return 'BEAM CST_PID {} {}'.format(template_common.lib_file_name(\n 'beam', 'cstFile', models.beam.cstFile),\n _generate_energy_phase_distribution(models.energyPhaseDistribution)\n )\n raise RuntimeError('invalid beam def: {}'.format(beam_def))\n\n\ndef _generate_cell_params(el):\n if el.attenuation == 0 and el.aperture == 0:\n return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant)\n return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.\n acceleratingInvariant, el.attenuation, el.aperture)\n\n\ndef _generate_charge(models):\n if models.beam.spaceCharge == 'none':\n return ''\n return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.\n beam.spaceChargeCore)\n\n\ndef _generate_current(models):\n return 'CURRENT {} {}'.format(models.beam.current, models.beam.\n numberOfParticles)\n\n\ndef _generate_energy_phase_distribution(dist):\n return '{} {} {}'.format(dist.meanPhase, dist.phaseLength, dist.\n phaseDeviation if dist.distributionType == 'gaussian' else '')\n\n\ndef _generate_lattice(models):\n res = ''\n for el in models.beamline:\n if el.type == 'powerElement':\n res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.\n phaseShift)\n elif el.type == 'cellElement':\n res += 'CELL {}'.format(_generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'cellsElement':\n res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'driftElement':\n res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)\n has_cell_or_drift = True\n elif el.type == 'saveElement':\n pass\n else:\n raise RuntimeError('unknown element type: {}'.format(el.type))\n res += '\\n'\n return res\n\n\ndef _generate_longitude_dist(models):\n dist_type = models.beam.longitudinalDistribution\n if dist_type == 'norm2d':\n dist = models.energyPhaseDistribution\n if dist.distributionType == 'uniform':\n return 'NORM2D {} {} {} {}'.format(dist.meanEnergy, dist.\n energySpread, dist.meanPhase, dist.phaseLength)\n if dist.distributionType == 'gaussian':\n return 'NORM2D {} {} {} {} {} {}'.format(dist.meanEnergy, dist.\n energySpread, dist.energyDeviation, dist.meanPhase, dist.\n phaseLength, dist.phaseDeviation)\n raise RuntimeError('unknown longitudinal distribution type: {}'.\n format(models.longitudinalDistribution.distributionType))\n if dist_type == 'file1d':\n return 'FILE1D {} {}'.format(template_common.lib_file_name('beam',\n 'longitudinalFile1d', models.beam.longitudinalFile1d),\n _generate_energy_phase_distribution(models.energyPhaseDistribution)\n )\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n raise RuntimeError('unknown longitudinal distribution: {}'.format(\n models.beam.longitudinalDistribution))\n\n\ndef _generate_options(models):\n if models.simulationSettings.allowBackwardWaves == '1':\n return 'OPTIONS REVERSE'\n return ''\n\n\ndef _generate_parameters_file(data, run_dir=None, is_parallel=False):\n template_common.validate_models(data, _SCHEMA)\n v = template_common.flatten_data(data['models'], {})\n v['optionsCommand'] = _generate_options(data['models'])\n v['solenoidCommand'] = _generate_solenoid(data['models'])\n v['beamCommand'] = _generate_beam(data['models'])\n v['currentCommand'] = _generate_current(data['models'])\n v['chargeCommand'] = _generate_charge(data['models'])\n if is_parallel:\n v['latticeCommands'] = _generate_lattice(data['models'])\n else:\n v['latticeCommands'] = _DEFAULT_DRIFT_ELEMENT\n return template_common.render_jinja(SIM_TYPE, v)\n\n\ndef _generate_solenoid(models):\n solenoid = models.solenoid\n if solenoid.sourceDefinition == 'none':\n return ''\n if solenoid.sourceDefinition == 'values':\n return 'SOLENOID {} {} {}'.format(solenoid.fieldStrength, solenoid.\n length, solenoid.z0)\n if solenoid.sourceDefinition == 'file':\n return 'SOLENOID {}'.format(template_common.lib_file_name(\n 'solenoid', 'solenoidFile', solenoid.solenoidFile))\n raise RuntimeError('unknown solenoidDefinition: {}'.format(solenoid.\n sourceDefinition))\n\n\ndef _generate_transverse_dist(models):\n dist_type = models.beam.transversalDistribution\n if dist_type == 'twiss4d':\n dist = models.twissDistribution\n return 'TWISS4D {} {} {} {} {} {}'.format(dist.horizontalAlpha,\n dist.horizontalBeta, dist.horizontalEmittance, dist.\n verticalAlpha, dist.verticalBeta, dist.verticalEmittance)\n if dist_type == 'sph2d':\n dist = models.sphericalDistribution\n if dist.curvature == 'flat':\n dist.curvatureFactor = 0\n return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.\n curvatureFactor, dist.thermalEmittance)\n if dist_type == 'ell2d':\n dist = models.ellipticalDistribution\n return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.\n rotationAngle, dist.rmsDeviationFactor)\n beam = models.beam\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n if dist_type == 'file4d':\n return 'FILE4D {}'.format(template_common.lib_file_name('beam',\n 'transversalFile4d', beam.transversalFile4d))\n raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))\n\n\ndef _parameter_index(name):\n return hellweg_dump_reader.parameter_index(name)\n\n\ndef _parse_error_message(run_dir):\n path = os.path.join(str(run_dir), _HELLWEG_PARSED_FILE)\n if not os.path.exists(path):\n return 'No elements generated'\n text = pkio.read_text(str(path))\n for line in text.split('\\n'):\n match = re.search('^ERROR:\\\\s(.*)$', line)\n if match:\n return match.group(1)\n return 'No output generated'\n\n\ndef _report_title(report_type, enum_name, beam_info):\n return '{}, z={:.4f} cm'.format(_enum_text(enum_name, report_type), 100 *\n hellweg_dump_reader.get_parameter(beam_info, 'z'))\n\n\ndef _simulation_files(data):\n res = []\n solenoid = data.models.solenoid\n if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:\n res.append(template_common.lib_file_name('solenoid', 'solenoidFile',\n solenoid.solenoidFile))\n beam = data.models.beam\n if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':\n res.append(template_common.lib_file_name('beam', 'cstFile', beam.\n cstFile))\n if beam.beamDefinition == 'transverse_longitude':\n if beam.transversalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam',\n 'transversalFile2d', beam.transversalFile2d))\n elif beam.transversalDistribution == 'file4d':\n res.append(template_common.lib_file_name('beam',\n 'transversalFile4d', beam.transversalFile4d))\n if beam.longitudinalDistribution == 'file1d':\n res.append(template_common.lib_file_name('beam',\n 'longitudinalFile1d', beam.longitudinalFile1d))\n if beam.longitudinalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam',\n 'longitudinalFile2d', beam.longitudinalFile2d))\n return res\n\n\ndef _summary_text(run_dir):\n return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))\n",
"step-5": "# -*- coding: utf-8 -*-\nu\"\"\"Hellweg execution template.\n\n:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.\n:license: http://www.apache.org/licenses/LICENSE-2.0.html\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nfrom pykern import pkcollections\nfrom pykern import pkio\nfrom pykern.pkdebug import pkdc, pkdp\nfrom rslinac import solver\nfrom sirepo import simulation_db\nfrom sirepo.template import template_common, hellweg_dump_reader\nimport math\nimport numpy as np\nimport os.path\nimport py.path\nimport re\n\nHELLWEG_DUMP_FILE = 'all-data.bin'\n\nHELLWEG_SUMMARY_FILE = 'output.txt'\n\nHELLWEG_INI_FILE = 'defaults.ini'\n\nHELLWEG_INPUT_FILE = 'input.txt'\n\n#: Simulation type\nSIM_TYPE = 'hellweg'\n\nWANT_BROWSER_FRAME_CACHE = True\n\n# lattice element is required so make it very short and wide drift\n_DEFAULT_DRIFT_ELEMENT = 'DRIFT 1e-16 1e+16 2' + \"\\n\"\n\n_HELLWEG_PARSED_FILE = 'PARSED.TXT'\n\n_REPORT_STYLE_FIELDS = ['colorMap', 'notes']\n\n_SCHEMA = simulation_db.get_schema(SIM_TYPE)\n\ndef background_percent_complete(report, run_dir, is_running):\n if is_running:\n return {\n 'percentComplete': 0,\n 'frameCount': 0,\n }\n dump_file = _dump_file(run_dir)\n if os.path.exists(dump_file):\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n last_update_time = int(os.path.getmtime(dump_file))\n frame_count = beam_header.NPoints\n return {\n 'lastUpdateTime': last_update_time,\n 'percentComplete': 100,\n 'frameCount': frame_count,\n 'summaryData': _summary_text(run_dir),\n }\n return {\n 'percentComplete': 100,\n 'frameCount': 0,\n 'error': _parse_error_message(run_dir)\n }\n\n\ndef extract_beam_histrogram(report, run_dir, frame):\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n points = hellweg_dump_reader.get_points(beam_info, report.reportType)\n hist, edges = np.histogram(points, template_common.histogram_bins(report.histogramBins))\n return {\n 'title': _report_title(report.reportType, 'BeamHistogramReportType', beam_info),\n 'x_range': [edges[0], edges[-1]],\n 'y_label': 'Number of Particles',\n 'x_label': hellweg_dump_reader.get_label(report.reportType),\n 'points': hist.T.tolist(),\n }\n\n\ndef extract_beam_report(report, run_dir, frame):\n data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))\n model = data.models.beamAnimation\n model.update(report)\n beam_info = hellweg_dump_reader.beam_info(_dump_file(run_dir), frame)\n x, y = report.reportType.split('-')\n values = [\n hellweg_dump_reader.get_points(beam_info, x),\n hellweg_dump_reader.get_points(beam_info, y),\n ]\n model['x'] = x\n model['y'] = y\n return template_common.heatmap(values, model, {\n 'x_label': hellweg_dump_reader.get_label(x),\n 'y_label': hellweg_dump_reader.get_label(y),\n 'title': _report_title(report.reportType, 'BeamReportType', beam_info),\n 'z_label': 'Number of Particles',\n 'summaryData': _summary_text(run_dir),\n })\n\n\ndef extract_parameter_report(report, run_dir):\n s = solver.BeamSolver(\n os.path.join(str(run_dir), HELLWEG_INI_FILE),\n os.path.join(str(run_dir), HELLWEG_INPUT_FILE))\n s.load_bin(os.path.join(str(run_dir), HELLWEG_DUMP_FILE))\n y1_var, y2_var = report.reportType.split('-')\n x_field = 'z'\n x = s.get_structure_parameters(_parameter_index(x_field))\n y1 = s.get_structure_parameters(_parameter_index(y1_var))\n y1_extent = [np.min(y1), np.max(y1)]\n y2 = s.get_structure_parameters(_parameter_index(y2_var))\n y2_extent = [np.min(y2), np.max(y2)]\n return {\n 'title': _enum_text('ParameterReportType', report.reportType),\n 'x_range': [x[0], x[-1]],\n 'y_label': hellweg_dump_reader.get_parameter_label(y1_var),\n 'x_label': hellweg_dump_reader.get_parameter_label(x_field),\n 'x_points': x,\n 'points': [\n y1,\n y2,\n ],\n 'y_range': [min(y1_extent[0], y2_extent[0]), max(y1_extent[1], y2_extent[1])],\n 'y1_title': hellweg_dump_reader.get_parameter_title(y1_var),\n 'y2_title': hellweg_dump_reader.get_parameter_title(y2_var),\n }\n\n\ndef extract_particle_report(report, run_dir):\n x_field = 'z0'\n particle_info = hellweg_dump_reader.particle_info(_dump_file(run_dir), report.reportType, int(report.renderCount))\n x = particle_info['z_values']\n return {\n 'title': _enum_text('ParticleReportType', report.reportType),\n 'x_range': [np.min(x), np.max(x)],\n 'y_label': hellweg_dump_reader.get_label(report.reportType),\n 'x_label': hellweg_dump_reader.get_label(x_field),\n 'x_points': x,\n 'points': particle_info['y_values'],\n 'y_range': particle_info['y_range'],\n }\n\n\ndef fixup_old_data(data):\n for m in ('beamAnimation', 'beamHistogramAnimation', 'parameterAnimation', 'particleAnimation'):\n if m not in data.models:\n data.models[m] = pkcollections.Dict({})\n template_common.update_model_defaults(data.models[m], m, _SCHEMA)\n if 'solenoidFile' not in data['models']['solenoid']:\n data['models']['solenoid']['solenoidFile'] = ''\n if 'beamDefinition' not in data['models']['beam']:\n beam = data['models']['beam']\n beam['beamDefinition'] = 'transverse_longitude'\n beam['cstCompress'] = '0'\n beam['transversalFile2d'] = ''\n beam['transversalFile4d'] = ''\n beam['longitudinalFile1d'] = ''\n beam['longitudinalFile2d'] = ''\n beam['cstFile'] = ''\n template_common.organize_example(data)\n\n\ndef get_animation_name(data):\n return 'animation'\n\n\ndef get_application_data(data):\n if data['method'] == 'compute_particle_ranges':\n return template_common.compute_field_range(data, _compute_range_across_files)\n assert False, 'unknown application data method: {}'.format(data['method'])\n\n\ndef lib_files(data, source_lib):\n return template_common.filename_to_path(_simulation_files(data), source_lib)\n\n\ndef get_simulation_frame(run_dir, data, model_data):\n frame_index = int(data['frameIndex'])\n if data['modelName'] == 'beamAnimation':\n args = template_common.parse_animation_args(\n data,\n {\n '1': ['reportType', 'histogramBins', 'startTime'],\n '': ['reportType', 'histogramBins', 'plotRangeType', 'horizontalSize', 'horizontalOffset', 'verticalSize', 'verticalOffset', 'isRunning', 'startTime'],\n },\n )\n return extract_beam_report(args, run_dir, frame_index)\n elif data['modelName'] == 'beamHistogramAnimation':\n args = template_common.parse_animation_args(\n data,\n {'': ['reportType', 'histogramBins', 'startTime']},\n )\n return extract_beam_histrogram(args, run_dir, frame_index)\n elif data['modelName'] == 'particleAnimation':\n args = template_common.parse_animation_args(\n data,\n {'': ['reportType', 'renderCount', 'startTime']},\n )\n return extract_particle_report(args, run_dir)\n elif data['modelName'] == 'parameterAnimation':\n args = template_common.parse_animation_args(\n data,\n {'': ['reportType', 'startTime']},\n )\n return extract_parameter_report(args, run_dir)\n raise RuntimeError('unknown animation model: {}'.format(data['modelName']))\n\n\ndef models_related_to_report(data):\n \"\"\"What models are required for this data['report']\n\n Args:\n data (dict): simulation\n Returns:\n list: Named models, model fields or values (dict, list) that affect report\n \"\"\"\n r = data['report']\n if r == 'animation':\n return []\n res = template_common.report_fields(data, r, _REPORT_STYLE_FIELDS) + [\n 'beam',\n 'ellipticalDistribution',\n 'energyPhaseDistribution',\n 'solenoid',\n 'sphericalDistribution',\n 'twissDistribution',\n ]\n for f in template_common.lib_files(data):\n res.append(f.mtime())\n return res\n\n\ndef python_source_for_model(data, model):\n return '''\nfrom rslinac import solver\n\n{}\n\nwith open('input.txt', 'w') as f:\n f.write(input_file)\n\nwith open('defaults.ini', 'w') as f:\n f.write(ini_file)\n\ns = solver.BeamSolver('defaults.ini', 'input.txt')\ns.solve()\ns.save_output('output.txt')\n '''.format(_generate_parameters_file(data, is_parallel=len(data.models.beamline)))\n\n\ndef remove_last_frame(run_dir):\n pass\n\n\ndef validate_delete_file(data, filename, file_type):\n \"\"\"Returns True if the filename is in use by the simulation data.\"\"\"\n return filename in _simulation_files(data)\n\n\ndef write_parameters(data, run_dir, is_parallel):\n \"\"\"Write the parameters file\n\n Args:\n data (dict): input\n run_dir (py.path): where to write\n is_parallel (bool): run in background?\n \"\"\"\n pkio.write_text(\n run_dir.join(template_common.PARAMETERS_PYTHON_FILE),\n _generate_parameters_file(\n data,\n run_dir,\n is_parallel,\n ),\n )\n\n\ndef _compute_range_across_files(run_dir, data):\n res = {}\n for v in _SCHEMA.enum.BeamReportType:\n x, y = v[0].split('-')\n res[x] = []\n res[y] = []\n dump_file = _dump_file(run_dir)\n if not os.path.exists(dump_file):\n return res\n beam_header = hellweg_dump_reader.beam_header(dump_file)\n for frame in xrange(beam_header.NPoints):\n beam_info = hellweg_dump_reader.beam_info(dump_file, frame)\n for field in res:\n values = hellweg_dump_reader.get_points(beam_info, field)\n if not len(values):\n pass\n elif len(res[field]):\n res[field][0] = min(min(values), res[field][0])\n res[field][1] = max(max(values), res[field][1])\n else:\n res[field] = [min(values), max(values)]\n return res\n\n\ndef _dump_file(run_dir):\n return os.path.join(str(run_dir), HELLWEG_DUMP_FILE)\n\n\ndef _enum_text(enum_name, v):\n enum_values = _SCHEMA['enum'][enum_name]\n for e in enum_values:\n if e[0] == v:\n return e[1]\n raise RuntimeError('invalid enum value: {}, {}'.format(enum_values, v))\n\n\ndef _generate_beam(models):\n # BEAM SPH2D 0.564 -15 5 NORM2D 0.30 0.0000001 90 180\n beam_def = models.beam.beamDefinition\n if beam_def == 'transverse_longitude':\n return 'BEAM {} {}'.format(_generate_transverse_dist(models), _generate_longitude_dist(models))\n if beam_def == 'cst_pit':\n return 'BEAM CST_PIT {} {}'.format(\n template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),\n 'COMPRESS' if models.beam.cstCompress else '',\n )\n if beam_def == 'cst_pid':\n return 'BEAM CST_PID {} {}'.format(\n template_common.lib_file_name('beam', 'cstFile', models.beam.cstFile),\n _generate_energy_phase_distribution(models.energyPhaseDistribution),\n )\n raise RuntimeError('invalid beam def: {}'.format(beam_def))\n\n\ndef _generate_cell_params(el):\n #TODO(pjm): add an option field to select auto-calculate\n if el.attenuation == 0 and el.aperture == 0:\n return '{} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant)\n return '{} {} {} {} {}'.format(el.phaseAdvance, el.phaseVelocity, el.acceleratingInvariant, el.attenuation, el.aperture)\n\n\ndef _generate_charge(models):\n if models.beam.spaceCharge == 'none':\n return ''\n return 'SPCHARGE {} {}'.format(models.beam.spaceCharge.upper(), models.beam.spaceChargeCore)\n\n\ndef _generate_current(models):\n return 'CURRENT {} {}'.format(models.beam.current, models.beam.numberOfParticles)\n\n\ndef _generate_energy_phase_distribution(dist):\n return '{} {} {}'.format(\n dist.meanPhase,\n dist.phaseLength,\n dist.phaseDeviation if dist.distributionType == 'gaussian' else '',\n )\n\n\ndef _generate_lattice(models):\n res = ''\n for el in models.beamline:\n if el.type == 'powerElement':\n res += 'POWER {} {} {}'.format(el.inputPower, el.frequency, el.phaseShift)\n elif el.type == 'cellElement':\n res += 'CELL {}'.format(_generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'cellsElement':\n res += 'CELLS {} {}'.format(el.repeat, _generate_cell_params(el))\n has_cell_or_drift = True\n elif el.type == 'driftElement':\n res += 'DRIFT {} {} {}'.format(el.length, el.radius, el.meshPoints)\n has_cell_or_drift = True\n elif el.type == 'saveElement':\n #TODO(pjm): implement this\n pass\n else:\n raise RuntimeError('unknown element type: {}'.format(el.type))\n res += \"\\n\"\n return res\n\n\ndef _generate_longitude_dist(models):\n dist_type = models.beam.longitudinalDistribution\n if dist_type == 'norm2d':\n dist = models.energyPhaseDistribution\n if dist.distributionType == 'uniform':\n return 'NORM2D {} {} {} {}'.format(\n dist.meanEnergy, dist.energySpread, dist.meanPhase, dist.phaseLength)\n if dist.distributionType == 'gaussian':\n return 'NORM2D {} {} {} {} {} {}'.format(\n dist.meanEnergy, dist.energySpread, dist.energyDeviation, dist.meanPhase, dist.phaseLength, dist.phaseDeviation)\n raise RuntimeError('unknown longitudinal distribution type: {}'.format(models.longitudinalDistribution.distributionType))\n if dist_type == 'file1d':\n return 'FILE1D {} {}'.format(\n template_common.lib_file_name('beam', 'longitudinalFile1d', models.beam.longitudinalFile1d),\n _generate_energy_phase_distribution(models.energyPhaseDistribution),\n )\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))\n\n raise RuntimeError('unknown longitudinal distribution: {}'.format(models.beam.longitudinalDistribution))\n\n\ndef _generate_options(models):\n if models.simulationSettings.allowBackwardWaves == '1':\n return 'OPTIONS REVERSE'\n return ''\n\n\ndef _generate_parameters_file(data, run_dir=None, is_parallel=False):\n template_common.validate_models(data, _SCHEMA)\n v = template_common.flatten_data(data['models'], {})\n v['optionsCommand'] = _generate_options(data['models'])\n v['solenoidCommand'] = _generate_solenoid(data['models'])\n v['beamCommand'] = _generate_beam(data['models'])\n v['currentCommand'] = _generate_current(data['models'])\n v['chargeCommand'] = _generate_charge(data['models'])\n if is_parallel:\n v['latticeCommands'] = _generate_lattice(data['models'])\n else:\n v['latticeCommands'] = _DEFAULT_DRIFT_ELEMENT\n return template_common.render_jinja(SIM_TYPE, v)\n\n\ndef _generate_solenoid(models):\n solenoid = models.solenoid\n if solenoid.sourceDefinition == 'none':\n return ''\n if solenoid.sourceDefinition == 'values':\n #TODO(pjm): latest version also has solenoid.fringeRegion\n return 'SOLENOID {} {} {}'.format(\n solenoid.fieldStrength, solenoid.length, solenoid.z0)\n if solenoid.sourceDefinition == 'file':\n return 'SOLENOID {}'.format(\n template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))\n raise RuntimeError('unknown solenoidDefinition: {}'.format(solenoid.sourceDefinition))\n\n\ndef _generate_transverse_dist(models):\n dist_type = models.beam.transversalDistribution\n if dist_type == 'twiss4d':\n dist = models.twissDistribution\n return 'TWISS4D {} {} {} {} {} {}'.format(\n dist.horizontalAlpha, dist.horizontalBeta, dist.horizontalEmittance,\n dist.verticalAlpha, dist.verticalBeta, dist.verticalEmittance)\n if dist_type == 'sph2d':\n dist = models.sphericalDistribution\n if dist.curvature == 'flat':\n dist.curvatureFactor = 0\n return 'SPH2D {} {} {}'.format(dist.radialLimit, dist.curvatureFactor, dist.thermalEmittance)\n if dist_type == 'ell2d':\n dist = models.ellipticalDistribution\n return 'ELL2D {} {} {} {}'.format(dist.aX, dist.bY, dist.rotationAngle, dist.rmsDeviationFactor)\n beam = models.beam\n if dist_type == 'file2d':\n return 'FILE2D {}'.format(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))\n if dist_type == 'file4d':\n return 'FILE4D {}'.format(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))\n raise RuntimeError('unknown transverse distribution: {}'.format(dist_type))\n\n\ndef _parameter_index(name):\n return hellweg_dump_reader.parameter_index(name)\n\n\ndef _parse_error_message(run_dir):\n path = os.path.join(str(run_dir), _HELLWEG_PARSED_FILE)\n if not os.path.exists(path):\n return 'No elements generated'\n text = pkio.read_text(str(path))\n for line in text.split(\"\\n\"):\n match = re.search('^ERROR:\\s(.*)$', line)\n if match:\n return match.group(1)\n return 'No output generated'\n\n\ndef _report_title(report_type, enum_name, beam_info):\n return '{}, z={:.4f} cm'.format(\n _enum_text(enum_name, report_type),\n 100 * hellweg_dump_reader.get_parameter(beam_info, 'z'))\n\n\ndef _simulation_files(data):\n res = []\n solenoid = data.models.solenoid\n if solenoid.sourceDefinition == 'file' and solenoid.solenoidFile:\n res.append(template_common.lib_file_name('solenoid', 'solenoidFile', solenoid.solenoidFile))\n beam = data.models.beam\n if beam.beamDefinition == 'cst_pit' or beam.beamDefinition == 'cst_pid':\n res.append(template_common.lib_file_name('beam', 'cstFile', beam.cstFile))\n if beam.beamDefinition == 'transverse_longitude':\n if beam.transversalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam', 'transversalFile2d', beam.transversalFile2d))\n elif beam.transversalDistribution == 'file4d':\n res.append(template_common.lib_file_name('beam', 'transversalFile4d', beam.transversalFile4d))\n if beam.longitudinalDistribution == 'file1d':\n res.append(template_common.lib_file_name('beam', 'longitudinalFile1d', beam.longitudinalFile1d))\n if beam.longitudinalDistribution == 'file2d':\n res.append(template_common.lib_file_name('beam', 'longitudinalFile2d', beam.longitudinalFile2d))\n return res\n\n\ndef _summary_text(run_dir):\n return pkio.read_text(os.path.join(str(run_dir), HELLWEG_SUMMARY_FILE))\n",
"step-ids": [
22,
26,
32,
36,
37
]
}
|
[
22,
26,
32,
36,
37
] |
# Copyright 2016 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
charset = {"big5": ["big5_chinese_ci", "big5_bin"],
"dec8": ["dec8_swedish_ci", "dec8_bin"],
"cp850": ["cp850_general_ci", "cp850_bin"],
"hp8": ["hp8_english_ci", "hp8_bin"],
"koi8r": ["koi8r_general_ci", "koi8r_bin"],
"latin1": ["latin1_swedish_ci",
"latin1_german1_ci",
"latin1_danish_ci",
"latin1_german2_ci",
"latin1_bin",
"latin1_general_ci",
"latin1_general_cs",
"latin1_spanish_ci"],
"latin2": ["latin2_general_ci",
"latin2_czech_cs",
"latin2_hungarian_ci",
"latin2_croatian_ci",
"latin2_bin"],
"swe7": ["swe7_swedish_ci", "swe7_bin"],
"ascii": ["ascii_general_ci", "ascii_bin"],
"ujis": ["ujis_japanese_ci", "ujis_bin"],
"sjis": ["sjis_japanese_ci", "sjis_bin"],
"hebrew": ["hebrew_general_ci", "hebrew_bin"],
"tis620": ["tis620_thai_ci", "tis620_bin"],
"euckr": ["euckr_korean_ci", "euckr_bin"],
"koi8u": ["koi8u_general_ci", "koi8u_bin"],
"gb2312": ["gb2312_chinese_ci", "gb2312_bin"],
"greek": ["greek_general_ci", "greek_bin"],
"cp1250": ["cp1250_general_ci",
"cp1250_czech_cs",
"cp1250_croatian_ci",
"cp1250_bin",
"cp1250_polish_ci"],
"gbk": ["gbk_chinese_ci", "gbk_bin"],
"latin5": ["latin5_turkish_ci", "latin5_bin"],
"armscii8": ["armscii8_general_ci", "armscii8_bin"],
"utf8": ["utf8_general_ci",
"utf8_bin",
"utf8_unicode_ci",
"utf8_icelandic_ci",
"utf8_latvian_ci",
"utf8_romanian_ci",
"utf8_slovenian_ci",
"utf8_polish_ci",
"utf8_estonian_ci",
"utf8_spanish_ci",
"utf8_swedish_ci",
"utf8_turkish_ci",
"utf8_czech_ci",
"utf8_danish_ci",
"utf8_lithuanian_ci",
"utf8_slovak_ci",
"utf8_spanish2_ci",
"utf8_roman_ci",
"utf8_persian_ci",
"utf8_esperanto_ci",
"utf8_hungarian_ci",
"utf8_sinhala_ci",
"utf8_german2_ci",
"utf8_croatian_ci",
"utf8_unicode_520_ci",
"utf8_vietnamese_ci",
"utf8_general_mysql500_ci"
],
"utf8mb4": ["utf8mb4_0900_ai_ci"],
"utf8mb3": ["utf8mb3_general_ci"],
"ucs2": ["ucs2_general_ci",
"ucs2_bin",
"ucs2_unicode_ci",
"ucs2_icelandic_ci",
"ucs2_latvian_ci",
"ucs2_romanian_ci",
"ucs2_slovenian_ci",
"ucs2_polish_ci",
"ucs2_estonian_ci",
"ucs2_spanish_ci",
"ucs2_swedish_ci",
"ucs2_turkish_ci",
"ucs2_czech_ci",
"ucs2_danish_ci",
"ucs2_lithuanian_ci",
"ucs2_slovak_ci",
"ucs2_spanish2_ci",
"ucs2_roman_ci",
"ucs2_persian_ci",
"ucs2_esperanto_ci",
"ucs2_hungarian_ci",
"ucs2_sinhala_ci",
"ucs2_german2_ci",
"ucs2_croatian_ci",
"ucs2_unicode_520_ci",
"ucs2_vietnamese_ci",
"ucs2_general_mysql500_ci"
],
"cp866": ["cp866_general_ci", "cp866_bin"],
"keybcs2": ["keybcs2_general_ci", "keybcs2_bin"],
"macce": ["macce_general_ci", "macce_bin"],
"macroman": ["macroman_general_ci", "macroman_bin"],
"cp852": ["cp852_general_ci", "cp852_bin"],
"latin7": ["latin7_general_ci",
"latin7_estonian_cs",
"latin7_general_cs",
"latin7_bin"],
"utf8mb4": ["utf8mb4_general_ci",
"utf8mb4_bin",
"utf8mb4_unicode_ci",
"utf8mb4_icelandic_ci",
"utf8mb4_latvian_ci",
"utf8mb4_romanian_ci",
"utf8mb4_slovenian_ci",
"utf8mb4_polish_ci",
"utf8mb4_estonian_ci",
"utf8mb4_spanish_ci",
"utf8mb4_swedish_ci",
"utf8mb4_turkish_ci",
"utf8mb4_czech_ci",
"utf8mb4_danish_ci",
"utf8mb4_lithuanian_ci",
"utf8mb4_slovak_ci",
"utf8mb4_spanish2_ci",
"utf8mb4_roman_ci",
"utf8mb4_persian_ci",
"utf8mb4_esperanto_ci",
"utf8mb4_hungarian_ci",
"utf8mb4_sinhala_ci",
"utf8mb4_german2_ci",
"utf8mb4_croatian_ci",
"utf8mb4_unicode_520_ci",
"utf8mb4_vietnamese_ci"],
"cp1251": ["cp1251_general_ci",
"cp1251_bulgarian_ci",
"cp1251_ukrainian_ci",
"cp1251_bin",
"cp1251_general_cs"],
"utf16": ["utf16_general_ci",
"utf16_bin",
"utf16_unicode_ci",
"utf16_icelandic_ci",
"utf16_latvian_ci",
"utf16_romanian_ci",
"utf16_slovenian_ci",
"utf16_polish_ci",
"utf16_estonian_ci",
"utf16_spanish_ci",
"utf16_swedish_ci",
"utf16_turkish_ci",
"utf16_czech_ci",
"utf16_danish_ci",
"utf16_lithuanian_ci",
"utf16_slovak_ci",
"utf16_spanish2_ci",
"utf16_roman_ci",
"utf16_persian_ci",
"utf16_esperanto_ci",
"utf16_hungarian_ci",
"utf16_sinhala_ci",
"utf16_german2_ci",
"utf16_croatian_ci",
"utf16_unicode_520_ci",
"utf16_vietnamese_ci"],
"utf16le": ["utf16le_general_ci",
"utf16le_bin"],
"cp1256": ["cp1256_general_ci", "cp1256_bin"],
"cp1257": ["cp1257_general_ci",
"cp1257_lithuanian_ci",
"cp1257_bin"],
"utf32": ["utf32_general_ci",
"utf32_bin",
"utf32_unicode_ci",
"utf32_icelandic_ci",
"utf32_latvian_ci",
"utf32_romanian_ci",
"utf32_slovenian_ci",
"utf32_polish_ci",
"utf32_estonian_ci",
"utf32_spanish_ci",
"utf32_swedish_ci",
"utf32_turkish_ci",
"utf32_czech_ci",
"utf32_danish_ci",
"utf32_lithuanian_ci",
"utf32_slovak_ci",
"utf32_spanish2_ci",
"utf32_roman_ci",
"utf32_persian_ci",
"utf32_esperanto_ci",
"utf32_hungarian_ci",
"utf32_sinhala_ci",
"utf32_german2_ci",
"utf32_croatian_ci",
"utf32_unicode_520_ci",
"utf32_vietnamese_ci"],
"binary": ["binary"],
"geostd8": ["geostd8_general_ci", "geostd8_bin"],
"cp932": ["cp932_japanese_ci", "cp932_bin"],
"eucjpms": ["eucjpms_japanese_ci", "eucjpms_bin"],
"gb18030": ["gb18030_chinese_ci",
"gb18030_bin",
"gb18030_unicode_520_ci"]}
collation = {"big5_chinese_ci": "big5",
"big5_bin": "big5",
"dec8_swedish_ci": "dec8",
"dec8_bin": "dec8",
"cp850_general_ci": "cp850",
"cp850_bin": "cp850",
"hp8_english_ci": "hp8",
"hp8_bin": "hp8",
"koi8r_general_ci": "koi8r",
"koi8r_bin": "koi8r",
"latin1_german1_ci": "latin1",
"latin1_swedish_ci": "latin1",
"latin1_danish_ci": "latin1",
"latin1_german2_ci": "latin1",
"latin1_bin": "latin1",
"latin1_general_ci": "latin1",
"latin1_general_cs": "latin1",
"latin1_spanish_ci": "latin1",
"latin2_czech_cs": "latin2",
"latin2_general_ci": "latin2",
"latin2_hungarian_ci": "latin2",
"latin2_croatian_ci": "latin2",
"latin2_bin": "latin2",
"swe7_swedish_ci": "swe7",
"swe7_bin": "swe7",
"ascii_general_ci": "ascii",
"ascii_bin": "ascii",
"ujis_japanese_ci": "ujis",
"ujis_bin": "ujis",
"sjis_japanese_ci": "sjis",
"sjis_bin": "sjis",
"hebrew_general_ci": "hebrew",
"hebrew_bin": "hebrew",
"tis620_thai_ci": "tis620",
"tis620_bin": "tis620",
"euckr_korean_ci": "euckr",
"euckr_bin": "euckr",
"koi8u_general_ci": "koi8u",
"koi8u_bin": "koi8u",
"gb2312_chinese_ci": "gb2312",
"gb2312_bin": "gb2312",
"greek_general_ci": "greek",
"greek_bin": "greek",
"cp1250_general_ci": "cp1250",
"cp1250_czech_cs": "cp1250",
"cp1250_croatian_ci": "cp1250",
"cp1250_bin": "cp1250",
"cp1250_polish_ci": "cp1250",
"gbk_chinese_ci": "gbk",
"gbk_bin": "gbk",
"latin5_turkish_ci": "latin5",
"latin5_bin": "latin5",
"armscii8_general_ci": "armscii8",
"armscii8_bin": "armscii8",
"utf8_general_ci": "utf8",
"utf8mb3_general_ci": "utf8mb3",
"utf8_bin": "utf8",
"utf8_unicode_ci": "utf8",
"utf8_icelandic_ci": "utf8",
"utf8_latvian_ci": "utf8",
"utf8_romanian_ci": "utf8",
"utf8_slovenian_ci": "utf8",
"utf8_polish_ci": "utf8",
"utf8_estonian_ci": "utf8",
"utf8_spanish_ci": "utf8",
"utf8_swedish_ci": "utf8",
"utf8_turkish_ci": "utf8",
"utf8_czech_ci": "utf8",
"utf8_danish_ci": "utf8",
"utf8_lithuanian_ci": "utf8",
"utf8_slovak_ci": "utf8",
"utf8_spanish2_ci": "utf8",
"utf8_roman_ci": "utf8",
"utf8_persian_ci": "utf8",
"utf8_esperanto_ci": "utf8",
"utf8_hungarian_ci": "utf8",
"utf8_sinhala_ci": "utf8",
"utf8_german2_ci": "utf8",
"utf8_croatian_ci": "utf8",
"utf8_unicode_520_ci": "utf8",
"utf8_vietnamese_ci": "utf8",
"utf8_general_mysql500_ci": "utf8",
"utf8mb4_0900_ai_ci": "utf8mb4",
"ucs2_general_ci": "ucs2",
"ucs2_bin": "ucs2",
"ucs2_unicode_ci": "ucs2",
"ucs2_icelandic_ci": "ucs2",
"ucs2_latvian_ci": "ucs2",
"ucs2_romanian_ci": "ucs2",
"ucs2_slovenian_ci": "ucs2",
"ucs2_polish_ci": "ucs2",
"ucs2_estonian_ci": "ucs2",
"ucs2_spanish_ci": "ucs2",
"ucs2_swedish_ci": "ucs2",
"ucs2_turkish_ci": "ucs2",
"ucs2_czech_ci": "ucs2",
"ucs2_danish_ci": "ucs2",
"ucs2_lithuanian_ci": "ucs2",
"ucs2_slovak_ci": "ucs2",
"ucs2_spanish2_ci": "ucs2",
"ucs2_roman_ci": "ucs2",
"ucs2_persian_ci": "ucs2",
"ucs2_esperanto_ci": "ucs2",
"ucs2_hungarian_ci": "ucs2",
"ucs2_sinhala_ci": "ucs2",
"ucs2_german2_ci": "ucs2",
"ucs2_croatian_ci": "ucs2",
"ucs2_unicode_520_ci": "ucs2",
"ucs2_vietnamese_ci": "ucs2",
"ucs2_general_mysql500_ci": "ucs2",
"cp866_general_ci": "cp866",
"cp866_bin": "cp866",
"keybcs2_general_ci": "keybcs2",
"keybcs2_bin": "keybcs2",
"macce_general_ci": "macce",
"macce_bin": "macce",
"macroman_general_ci": "macroman",
"macroman_bin": "macroman",
"cp852_general_ci": "cp852",
"cp852_bin": "cp852",
"latin7_estonian_cs": "latin7",
"latin7_general_ci": "latin7",
"latin7_general_cs": "latin7",
"latin7_bin": "latin7",
"utf8mb4_general_ci": "utf8mb4",
"utf8mb4_bin": "utf8mb4",
"utf8mb4_unicode_ci": "utf8mb4",
"utf8mb4_icelandic_ci": "utf8mb4",
"utf8mb4_latvian_ci": "utf8mb4",
"utf8mb4_romanian_ci": "utf8mb4",
"utf8mb4_slovenian_ci": "utf8mb4",
"utf8mb4_polish_ci": "utf8mb4",
"utf8mb4_estonian_ci": "utf8mb4",
"utf8mb4_spanish_ci": "utf8mb4",
"utf8mb4_swedish_ci": "utf8mb4",
"utf8mb4_turkish_ci": "utf8mb4",
"utf8mb4_czech_ci": "utf8mb4",
"utf8mb4_danish_ci": "utf8mb4",
"utf8mb4_lithuanian_ci": "utf8mb4",
"utf8mb4_slovak_ci": "utf8mb4",
"utf8mb4_spanish2_ci": "utf8mb4",
"utf8mb4_roman_ci": "utf8mb4",
"utf8mb4_persian_ci": "utf8mb4",
"utf8mb4_esperanto_ci": "utf8mb4",
"utf8mb4_hungarian_ci": "utf8mb4",
"utf8mb4_sinhala_ci": "utf8mb4",
"utf8mb4_german2_ci": "utf8mb4",
"utf8mb4_croatian_ci": "utf8mb4",
"utf8mb4_unicode_520_ci": "utf8mb4",
"utf8mb4_vietnamese_ci": "utf8mb4",
"cp1251_bulgarian_ci": "cp1251",
"cp1251_ukrainian_ci": "cp1251",
"cp1251_bin": "cp1251",
"cp1251_general_ci": "cp1251",
"cp1251_general_cs": "cp1251",
"utf16_general_ci": "utf16",
"utf16_bin": "utf16",
"utf16_unicode_ci": "utf16",
"utf16_icelandic_ci": "utf16",
"utf16_latvian_ci": "utf16",
"utf16_romanian_ci": "utf16",
"utf16_slovenian_ci": "utf16",
"utf16_polish_ci": "utf16",
"utf16_estonian_ci": "utf16",
"utf16_spanish_ci": "utf16",
"utf16_swedish_ci": "utf16",
"utf16_turkish_ci": "utf16",
"utf16_czech_ci": "utf16",
"utf16_danish_ci": "utf16",
"utf16_lithuanian_ci": "utf16",
"utf16_slovak_ci": "utf16",
"utf16_spanish2_ci": "utf16",
"utf16_roman_ci": "utf16",
"utf16_persian_ci": "utf16",
"utf16_esperanto_ci": "utf16",
"utf16_hungarian_ci": "utf16",
"utf16_sinhala_ci": "utf16",
"utf16_german2_ci": "utf16",
"utf16_croatian_ci": "utf16",
"utf16_unicode_520_ci": "utf16",
"utf16_vietnamese_ci": "utf16",
"utf16le_general_ci": "utf16le",
"utf16le_bin": "utf16le",
"cp1256_general_ci": "cp1256",
"cp1256_bin": "cp1256",
"cp1257_lithuanian_ci": "cp1257",
"cp1257_bin": "cp1257",
"cp1257_general_ci": "cp1257",
"utf32_general_ci": "utf32",
"utf32_bin": "utf32",
"utf32_unicode_ci": "utf32",
"utf32_icelandic_ci": "utf32",
"utf32_latvian_ci": "utf32",
"utf32_romanian_ci": "utf32",
"utf32_slovenian_ci": "utf32",
"utf32_polish_ci": "utf32",
"utf32_estonian_ci": "utf32",
"utf32_spanish_ci": "utf32",
"utf32_swedish_ci": "utf32",
"utf32_turkish_ci": "utf32",
"utf32_czech_ci": "utf32",
"utf32_danish_ci": "utf32",
"utf32_lithuanian_ci": "utf32",
"utf32_slovak_ci": "utf32",
"utf32_spanish2_ci": "utf32",
"utf32_roman_ci": "utf32",
"utf32_persian_ci": "utf32",
"utf32_esperanto_ci": "utf32",
"utf32_hungarian_ci": "utf32",
"utf32_sinhala_ci": "utf32",
"utf32_german2_ci": "utf32",
"utf32_croatian_ci": "utf32",
"utf32_unicode_520_ci": "utf32",
"utf32_vietnamese_ci": "utf32",
"binary": "binary",
"geostd8_general_ci": "geostd8",
"geostd8_bin": "geostd8",
"cp932_japanese_ci": "cp932",
"cp932_bin": "cp932",
"eucjpms_japanese_ci": "eucjpms",
"eucjpms_bin": "eucjpms",
"gb18030_chinese_ci": "gb18030",
"gb18030_bin": "gb18030",
"gb18030_unicode_520_ci": "gb18030"}
|
normal
|
{
"blob_id": "5e29c6d1034f6612b0081037f8dc679b49f1dbef",
"index": 2855,
"step-1": "<mask token>\n",
"step-2": "charset = {'big5': ['big5_chinese_ci', 'big5_bin'], 'dec8': [\n 'dec8_swedish_ci', 'dec8_bin'], 'cp850': ['cp850_general_ci',\n 'cp850_bin'], 'hp8': ['hp8_english_ci', 'hp8_bin'], 'koi8r': [\n 'koi8r_general_ci', 'koi8r_bin'], 'latin1': ['latin1_swedish_ci',\n 'latin1_german1_ci', 'latin1_danish_ci', 'latin1_german2_ci',\n 'latin1_bin', 'latin1_general_ci', 'latin1_general_cs',\n 'latin1_spanish_ci'], 'latin2': ['latin2_general_ci', 'latin2_czech_cs',\n 'latin2_hungarian_ci', 'latin2_croatian_ci', 'latin2_bin'], 'swe7': [\n 'swe7_swedish_ci', 'swe7_bin'], 'ascii': ['ascii_general_ci',\n 'ascii_bin'], 'ujis': ['ujis_japanese_ci', 'ujis_bin'], 'sjis': [\n 'sjis_japanese_ci', 'sjis_bin'], 'hebrew': ['hebrew_general_ci',\n 'hebrew_bin'], 'tis620': ['tis620_thai_ci', 'tis620_bin'], 'euckr': [\n 'euckr_korean_ci', 'euckr_bin'], 'koi8u': ['koi8u_general_ci',\n 'koi8u_bin'], 'gb2312': ['gb2312_chinese_ci', 'gb2312_bin'], 'greek': [\n 'greek_general_ci', 'greek_bin'], 'cp1250': ['cp1250_general_ci',\n 'cp1250_czech_cs', 'cp1250_croatian_ci', 'cp1250_bin',\n 'cp1250_polish_ci'], 'gbk': ['gbk_chinese_ci', 'gbk_bin'], 'latin5': [\n 'latin5_turkish_ci', 'latin5_bin'], 'armscii8': ['armscii8_general_ci',\n 'armscii8_bin'], 'utf8': ['utf8_general_ci', 'utf8_bin',\n 'utf8_unicode_ci', 'utf8_icelandic_ci', 'utf8_latvian_ci',\n 'utf8_romanian_ci', 'utf8_slovenian_ci', 'utf8_polish_ci',\n 'utf8_estonian_ci', 'utf8_spanish_ci', 'utf8_swedish_ci',\n 'utf8_turkish_ci', 'utf8_czech_ci', 'utf8_danish_ci',\n 'utf8_lithuanian_ci', 'utf8_slovak_ci', 'utf8_spanish2_ci',\n 'utf8_roman_ci', 'utf8_persian_ci', 'utf8_esperanto_ci',\n 'utf8_hungarian_ci', 'utf8_sinhala_ci', 'utf8_german2_ci',\n 'utf8_croatian_ci', 'utf8_unicode_520_ci', 'utf8_vietnamese_ci',\n 'utf8_general_mysql500_ci'], 'utf8mb4': ['utf8mb4_0900_ai_ci'],\n 'utf8mb3': ['utf8mb3_general_ci'], 'ucs2': ['ucs2_general_ci',\n 'ucs2_bin', 'ucs2_unicode_ci', 'ucs2_icelandic_ci', 'ucs2_latvian_ci',\n 'ucs2_romanian_ci', 'ucs2_slovenian_ci', 'ucs2_polish_ci',\n 'ucs2_estonian_ci', 'ucs2_spanish_ci', 'ucs2_swedish_ci',\n 'ucs2_turkish_ci', 'ucs2_czech_ci', 'ucs2_danish_ci',\n 'ucs2_lithuanian_ci', 'ucs2_slovak_ci', 'ucs2_spanish2_ci',\n 'ucs2_roman_ci', 'ucs2_persian_ci', 'ucs2_esperanto_ci',\n 'ucs2_hungarian_ci', 'ucs2_sinhala_ci', 'ucs2_german2_ci',\n 'ucs2_croatian_ci', 'ucs2_unicode_520_ci', 'ucs2_vietnamese_ci',\n 'ucs2_general_mysql500_ci'], 'cp866': ['cp866_general_ci', 'cp866_bin'],\n 'keybcs2': ['keybcs2_general_ci', 'keybcs2_bin'], 'macce': [\n 'macce_general_ci', 'macce_bin'], 'macroman': ['macroman_general_ci',\n 'macroman_bin'], 'cp852': ['cp852_general_ci', 'cp852_bin'], 'latin7':\n ['latin7_general_ci', 'latin7_estonian_cs', 'latin7_general_cs',\n 'latin7_bin'], 'utf8mb4': ['utf8mb4_general_ci', 'utf8mb4_bin',\n 'utf8mb4_unicode_ci', 'utf8mb4_icelandic_ci', 'utf8mb4_latvian_ci',\n 'utf8mb4_romanian_ci', 'utf8mb4_slovenian_ci', 'utf8mb4_polish_ci',\n 'utf8mb4_estonian_ci', 'utf8mb4_spanish_ci', 'utf8mb4_swedish_ci',\n 'utf8mb4_turkish_ci', 'utf8mb4_czech_ci', 'utf8mb4_danish_ci',\n 'utf8mb4_lithuanian_ci', 'utf8mb4_slovak_ci', 'utf8mb4_spanish2_ci',\n 'utf8mb4_roman_ci', 'utf8mb4_persian_ci', 'utf8mb4_esperanto_ci',\n 'utf8mb4_hungarian_ci', 'utf8mb4_sinhala_ci', 'utf8mb4_german2_ci',\n 'utf8mb4_croatian_ci', 'utf8mb4_unicode_520_ci',\n 'utf8mb4_vietnamese_ci'], 'cp1251': ['cp1251_general_ci',\n 'cp1251_bulgarian_ci', 'cp1251_ukrainian_ci', 'cp1251_bin',\n 'cp1251_general_cs'], 'utf16': ['utf16_general_ci', 'utf16_bin',\n 'utf16_unicode_ci', 'utf16_icelandic_ci', 'utf16_latvian_ci',\n 'utf16_romanian_ci', 'utf16_slovenian_ci', 'utf16_polish_ci',\n 'utf16_estonian_ci', 'utf16_spanish_ci', 'utf16_swedish_ci',\n 'utf16_turkish_ci', 'utf16_czech_ci', 'utf16_danish_ci',\n 'utf16_lithuanian_ci', 'utf16_slovak_ci', 'utf16_spanish2_ci',\n 'utf16_roman_ci', 'utf16_persian_ci', 'utf16_esperanto_ci',\n 'utf16_hungarian_ci', 'utf16_sinhala_ci', 'utf16_german2_ci',\n 'utf16_croatian_ci', 'utf16_unicode_520_ci', 'utf16_vietnamese_ci'],\n 'utf16le': ['utf16le_general_ci', 'utf16le_bin'], 'cp1256': [\n 'cp1256_general_ci', 'cp1256_bin'], 'cp1257': ['cp1257_general_ci',\n 'cp1257_lithuanian_ci', 'cp1257_bin'], 'utf32': ['utf32_general_ci',\n 'utf32_bin', 'utf32_unicode_ci', 'utf32_icelandic_ci',\n 'utf32_latvian_ci', 'utf32_romanian_ci', 'utf32_slovenian_ci',\n 'utf32_polish_ci', 'utf32_estonian_ci', 'utf32_spanish_ci',\n 'utf32_swedish_ci', 'utf32_turkish_ci', 'utf32_czech_ci',\n 'utf32_danish_ci', 'utf32_lithuanian_ci', 'utf32_slovak_ci',\n 'utf32_spanish2_ci', 'utf32_roman_ci', 'utf32_persian_ci',\n 'utf32_esperanto_ci', 'utf32_hungarian_ci', 'utf32_sinhala_ci',\n 'utf32_german2_ci', 'utf32_croatian_ci', 'utf32_unicode_520_ci',\n 'utf32_vietnamese_ci'], 'binary': ['binary'], 'geostd8': [\n 'geostd8_general_ci', 'geostd8_bin'], 'cp932': ['cp932_japanese_ci',\n 'cp932_bin'], 'eucjpms': ['eucjpms_japanese_ci', 'eucjpms_bin'],\n 'gb18030': ['gb18030_chinese_ci', 'gb18030_bin', 'gb18030_unicode_520_ci']}\ncollation = {'big5_chinese_ci': 'big5', 'big5_bin': 'big5',\n 'dec8_swedish_ci': 'dec8', 'dec8_bin': 'dec8', 'cp850_general_ci':\n 'cp850', 'cp850_bin': 'cp850', 'hp8_english_ci': 'hp8', 'hp8_bin':\n 'hp8', 'koi8r_general_ci': 'koi8r', 'koi8r_bin': 'koi8r',\n 'latin1_german1_ci': 'latin1', 'latin1_swedish_ci': 'latin1',\n 'latin1_danish_ci': 'latin1', 'latin1_german2_ci': 'latin1',\n 'latin1_bin': 'latin1', 'latin1_general_ci': 'latin1',\n 'latin1_general_cs': 'latin1', 'latin1_spanish_ci': 'latin1',\n 'latin2_czech_cs': 'latin2', 'latin2_general_ci': 'latin2',\n 'latin2_hungarian_ci': 'latin2', 'latin2_croatian_ci': 'latin2',\n 'latin2_bin': 'latin2', 'swe7_swedish_ci': 'swe7', 'swe7_bin': 'swe7',\n 'ascii_general_ci': 'ascii', 'ascii_bin': 'ascii', 'ujis_japanese_ci':\n 'ujis', 'ujis_bin': 'ujis', 'sjis_japanese_ci': 'sjis', 'sjis_bin':\n 'sjis', 'hebrew_general_ci': 'hebrew', 'hebrew_bin': 'hebrew',\n 'tis620_thai_ci': 'tis620', 'tis620_bin': 'tis620', 'euckr_korean_ci':\n 'euckr', 'euckr_bin': 'euckr', 'koi8u_general_ci': 'koi8u', 'koi8u_bin':\n 'koi8u', 'gb2312_chinese_ci': 'gb2312', 'gb2312_bin': 'gb2312',\n 'greek_general_ci': 'greek', 'greek_bin': 'greek', 'cp1250_general_ci':\n 'cp1250', 'cp1250_czech_cs': 'cp1250', 'cp1250_croatian_ci': 'cp1250',\n 'cp1250_bin': 'cp1250', 'cp1250_polish_ci': 'cp1250', 'gbk_chinese_ci':\n 'gbk', 'gbk_bin': 'gbk', 'latin5_turkish_ci': 'latin5', 'latin5_bin':\n 'latin5', 'armscii8_general_ci': 'armscii8', 'armscii8_bin': 'armscii8',\n 'utf8_general_ci': 'utf8', 'utf8mb3_general_ci': 'utf8mb3', 'utf8_bin':\n 'utf8', 'utf8_unicode_ci': 'utf8', 'utf8_icelandic_ci': 'utf8',\n 'utf8_latvian_ci': 'utf8', 'utf8_romanian_ci': 'utf8',\n 'utf8_slovenian_ci': 'utf8', 'utf8_polish_ci': 'utf8',\n 'utf8_estonian_ci': 'utf8', 'utf8_spanish_ci': 'utf8',\n 'utf8_swedish_ci': 'utf8', 'utf8_turkish_ci': 'utf8', 'utf8_czech_ci':\n 'utf8', 'utf8_danish_ci': 'utf8', 'utf8_lithuanian_ci': 'utf8',\n 'utf8_slovak_ci': 'utf8', 'utf8_spanish2_ci': 'utf8', 'utf8_roman_ci':\n 'utf8', 'utf8_persian_ci': 'utf8', 'utf8_esperanto_ci': 'utf8',\n 'utf8_hungarian_ci': 'utf8', 'utf8_sinhala_ci': 'utf8',\n 'utf8_german2_ci': 'utf8', 'utf8_croatian_ci': 'utf8',\n 'utf8_unicode_520_ci': 'utf8', 'utf8_vietnamese_ci': 'utf8',\n 'utf8_general_mysql500_ci': 'utf8', 'utf8mb4_0900_ai_ci': 'utf8mb4',\n 'ucs2_general_ci': 'ucs2', 'ucs2_bin': 'ucs2', 'ucs2_unicode_ci':\n 'ucs2', 'ucs2_icelandic_ci': 'ucs2', 'ucs2_latvian_ci': 'ucs2',\n 'ucs2_romanian_ci': 'ucs2', 'ucs2_slovenian_ci': 'ucs2',\n 'ucs2_polish_ci': 'ucs2', 'ucs2_estonian_ci': 'ucs2', 'ucs2_spanish_ci':\n 'ucs2', 'ucs2_swedish_ci': 'ucs2', 'ucs2_turkish_ci': 'ucs2',\n 'ucs2_czech_ci': 'ucs2', 'ucs2_danish_ci': 'ucs2', 'ucs2_lithuanian_ci':\n 'ucs2', 'ucs2_slovak_ci': 'ucs2', 'ucs2_spanish2_ci': 'ucs2',\n 'ucs2_roman_ci': 'ucs2', 'ucs2_persian_ci': 'ucs2', 'ucs2_esperanto_ci':\n 'ucs2', 'ucs2_hungarian_ci': 'ucs2', 'ucs2_sinhala_ci': 'ucs2',\n 'ucs2_german2_ci': 'ucs2', 'ucs2_croatian_ci': 'ucs2',\n 'ucs2_unicode_520_ci': 'ucs2', 'ucs2_vietnamese_ci': 'ucs2',\n 'ucs2_general_mysql500_ci': 'ucs2', 'cp866_general_ci': 'cp866',\n 'cp866_bin': 'cp866', 'keybcs2_general_ci': 'keybcs2', 'keybcs2_bin':\n 'keybcs2', 'macce_general_ci': 'macce', 'macce_bin': 'macce',\n 'macroman_general_ci': 'macroman', 'macroman_bin': 'macroman',\n 'cp852_general_ci': 'cp852', 'cp852_bin': 'cp852', 'latin7_estonian_cs':\n 'latin7', 'latin7_general_ci': 'latin7', 'latin7_general_cs': 'latin7',\n 'latin7_bin': 'latin7', 'utf8mb4_general_ci': 'utf8mb4', 'utf8mb4_bin':\n 'utf8mb4', 'utf8mb4_unicode_ci': 'utf8mb4', 'utf8mb4_icelandic_ci':\n 'utf8mb4', 'utf8mb4_latvian_ci': 'utf8mb4', 'utf8mb4_romanian_ci':\n 'utf8mb4', 'utf8mb4_slovenian_ci': 'utf8mb4', 'utf8mb4_polish_ci':\n 'utf8mb4', 'utf8mb4_estonian_ci': 'utf8mb4', 'utf8mb4_spanish_ci':\n 'utf8mb4', 'utf8mb4_swedish_ci': 'utf8mb4', 'utf8mb4_turkish_ci':\n 'utf8mb4', 'utf8mb4_czech_ci': 'utf8mb4', 'utf8mb4_danish_ci':\n 'utf8mb4', 'utf8mb4_lithuanian_ci': 'utf8mb4', 'utf8mb4_slovak_ci':\n 'utf8mb4', 'utf8mb4_spanish2_ci': 'utf8mb4', 'utf8mb4_roman_ci':\n 'utf8mb4', 'utf8mb4_persian_ci': 'utf8mb4', 'utf8mb4_esperanto_ci':\n 'utf8mb4', 'utf8mb4_hungarian_ci': 'utf8mb4', 'utf8mb4_sinhala_ci':\n 'utf8mb4', 'utf8mb4_german2_ci': 'utf8mb4', 'utf8mb4_croatian_ci':\n 'utf8mb4', 'utf8mb4_unicode_520_ci': 'utf8mb4', 'utf8mb4_vietnamese_ci':\n 'utf8mb4', 'cp1251_bulgarian_ci': 'cp1251', 'cp1251_ukrainian_ci':\n 'cp1251', 'cp1251_bin': 'cp1251', 'cp1251_general_ci': 'cp1251',\n 'cp1251_general_cs': 'cp1251', 'utf16_general_ci': 'utf16', 'utf16_bin':\n 'utf16', 'utf16_unicode_ci': 'utf16', 'utf16_icelandic_ci': 'utf16',\n 'utf16_latvian_ci': 'utf16', 'utf16_romanian_ci': 'utf16',\n 'utf16_slovenian_ci': 'utf16', 'utf16_polish_ci': 'utf16',\n 'utf16_estonian_ci': 'utf16', 'utf16_spanish_ci': 'utf16',\n 'utf16_swedish_ci': 'utf16', 'utf16_turkish_ci': 'utf16',\n 'utf16_czech_ci': 'utf16', 'utf16_danish_ci': 'utf16',\n 'utf16_lithuanian_ci': 'utf16', 'utf16_slovak_ci': 'utf16',\n 'utf16_spanish2_ci': 'utf16', 'utf16_roman_ci': 'utf16',\n 'utf16_persian_ci': 'utf16', 'utf16_esperanto_ci': 'utf16',\n 'utf16_hungarian_ci': 'utf16', 'utf16_sinhala_ci': 'utf16',\n 'utf16_german2_ci': 'utf16', 'utf16_croatian_ci': 'utf16',\n 'utf16_unicode_520_ci': 'utf16', 'utf16_vietnamese_ci': 'utf16',\n 'utf16le_general_ci': 'utf16le', 'utf16le_bin': 'utf16le',\n 'cp1256_general_ci': 'cp1256', 'cp1256_bin': 'cp1256',\n 'cp1257_lithuanian_ci': 'cp1257', 'cp1257_bin': 'cp1257',\n 'cp1257_general_ci': 'cp1257', 'utf32_general_ci': 'utf32', 'utf32_bin':\n 'utf32', 'utf32_unicode_ci': 'utf32', 'utf32_icelandic_ci': 'utf32',\n 'utf32_latvian_ci': 'utf32', 'utf32_romanian_ci': 'utf32',\n 'utf32_slovenian_ci': 'utf32', 'utf32_polish_ci': 'utf32',\n 'utf32_estonian_ci': 'utf32', 'utf32_spanish_ci': 'utf32',\n 'utf32_swedish_ci': 'utf32', 'utf32_turkish_ci': 'utf32',\n 'utf32_czech_ci': 'utf32', 'utf32_danish_ci': 'utf32',\n 'utf32_lithuanian_ci': 'utf32', 'utf32_slovak_ci': 'utf32',\n 'utf32_spanish2_ci': 'utf32', 'utf32_roman_ci': 'utf32',\n 'utf32_persian_ci': 'utf32', 'utf32_esperanto_ci': 'utf32',\n 'utf32_hungarian_ci': 'utf32', 'utf32_sinhala_ci': 'utf32',\n 'utf32_german2_ci': 'utf32', 'utf32_croatian_ci': 'utf32',\n 'utf32_unicode_520_ci': 'utf32', 'utf32_vietnamese_ci': 'utf32',\n 'binary': 'binary', 'geostd8_general_ci': 'geostd8', 'geostd8_bin':\n 'geostd8', 'cp932_japanese_ci': 'cp932', 'cp932_bin': 'cp932',\n 'eucjpms_japanese_ci': 'eucjpms', 'eucjpms_bin': 'eucjpms',\n 'gb18030_chinese_ci': 'gb18030', 'gb18030_bin': 'gb18030',\n 'gb18030_unicode_520_ci': 'gb18030'}\n",
"step-3": "# Copyright 2016 Tesora, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\ncharset = {\"big5\": [\"big5_chinese_ci\", \"big5_bin\"],\n \"dec8\": [\"dec8_swedish_ci\", \"dec8_bin\"],\n \"cp850\": [\"cp850_general_ci\", \"cp850_bin\"],\n \"hp8\": [\"hp8_english_ci\", \"hp8_bin\"],\n \"koi8r\": [\"koi8r_general_ci\", \"koi8r_bin\"],\n \"latin1\": [\"latin1_swedish_ci\",\n \"latin1_german1_ci\",\n \"latin1_danish_ci\",\n \"latin1_german2_ci\",\n \"latin1_bin\",\n \"latin1_general_ci\",\n \"latin1_general_cs\",\n \"latin1_spanish_ci\"],\n \"latin2\": [\"latin2_general_ci\",\n \"latin2_czech_cs\",\n \"latin2_hungarian_ci\",\n \"latin2_croatian_ci\",\n \"latin2_bin\"],\n \"swe7\": [\"swe7_swedish_ci\", \"swe7_bin\"],\n \"ascii\": [\"ascii_general_ci\", \"ascii_bin\"],\n \"ujis\": [\"ujis_japanese_ci\", \"ujis_bin\"],\n \"sjis\": [\"sjis_japanese_ci\", \"sjis_bin\"],\n \"hebrew\": [\"hebrew_general_ci\", \"hebrew_bin\"],\n \"tis620\": [\"tis620_thai_ci\", \"tis620_bin\"],\n \"euckr\": [\"euckr_korean_ci\", \"euckr_bin\"],\n \"koi8u\": [\"koi8u_general_ci\", \"koi8u_bin\"],\n \"gb2312\": [\"gb2312_chinese_ci\", \"gb2312_bin\"],\n \"greek\": [\"greek_general_ci\", \"greek_bin\"],\n \"cp1250\": [\"cp1250_general_ci\",\n \"cp1250_czech_cs\",\n \"cp1250_croatian_ci\",\n \"cp1250_bin\",\n \"cp1250_polish_ci\"],\n \"gbk\": [\"gbk_chinese_ci\", \"gbk_bin\"],\n \"latin5\": [\"latin5_turkish_ci\", \"latin5_bin\"],\n \"armscii8\": [\"armscii8_general_ci\", \"armscii8_bin\"],\n \"utf8\": [\"utf8_general_ci\",\n \"utf8_bin\",\n \"utf8_unicode_ci\",\n \"utf8_icelandic_ci\",\n \"utf8_latvian_ci\",\n \"utf8_romanian_ci\",\n \"utf8_slovenian_ci\",\n \"utf8_polish_ci\",\n \"utf8_estonian_ci\",\n \"utf8_spanish_ci\",\n \"utf8_swedish_ci\",\n \"utf8_turkish_ci\",\n \"utf8_czech_ci\",\n \"utf8_danish_ci\",\n \"utf8_lithuanian_ci\",\n \"utf8_slovak_ci\",\n \"utf8_spanish2_ci\",\n \"utf8_roman_ci\",\n \"utf8_persian_ci\",\n \"utf8_esperanto_ci\",\n \"utf8_hungarian_ci\",\n \"utf8_sinhala_ci\",\n \"utf8_german2_ci\",\n \"utf8_croatian_ci\",\n \"utf8_unicode_520_ci\",\n \"utf8_vietnamese_ci\",\n \"utf8_general_mysql500_ci\"\n ],\n \"utf8mb4\": [\"utf8mb4_0900_ai_ci\"],\n \"utf8mb3\": [\"utf8mb3_general_ci\"],\n \"ucs2\": [\"ucs2_general_ci\",\n \"ucs2_bin\",\n \"ucs2_unicode_ci\",\n \"ucs2_icelandic_ci\",\n \"ucs2_latvian_ci\",\n \"ucs2_romanian_ci\",\n \"ucs2_slovenian_ci\",\n \"ucs2_polish_ci\",\n \"ucs2_estonian_ci\",\n \"ucs2_spanish_ci\",\n \"ucs2_swedish_ci\",\n \"ucs2_turkish_ci\",\n \"ucs2_czech_ci\",\n \"ucs2_danish_ci\",\n \"ucs2_lithuanian_ci\",\n \"ucs2_slovak_ci\",\n \"ucs2_spanish2_ci\",\n \"ucs2_roman_ci\",\n \"ucs2_persian_ci\",\n \"ucs2_esperanto_ci\",\n \"ucs2_hungarian_ci\",\n \"ucs2_sinhala_ci\",\n \"ucs2_german2_ci\",\n \"ucs2_croatian_ci\",\n \"ucs2_unicode_520_ci\",\n \"ucs2_vietnamese_ci\",\n \"ucs2_general_mysql500_ci\"\n ],\n \"cp866\": [\"cp866_general_ci\", \"cp866_bin\"],\n \"keybcs2\": [\"keybcs2_general_ci\", \"keybcs2_bin\"],\n \"macce\": [\"macce_general_ci\", \"macce_bin\"],\n \"macroman\": [\"macroman_general_ci\", \"macroman_bin\"],\n \"cp852\": [\"cp852_general_ci\", \"cp852_bin\"],\n \"latin7\": [\"latin7_general_ci\",\n \"latin7_estonian_cs\",\n \"latin7_general_cs\",\n \"latin7_bin\"],\n \"utf8mb4\": [\"utf8mb4_general_ci\",\n \"utf8mb4_bin\",\n \"utf8mb4_unicode_ci\",\n \"utf8mb4_icelandic_ci\",\n \"utf8mb4_latvian_ci\",\n \"utf8mb4_romanian_ci\",\n \"utf8mb4_slovenian_ci\",\n \"utf8mb4_polish_ci\",\n \"utf8mb4_estonian_ci\",\n \"utf8mb4_spanish_ci\",\n \"utf8mb4_swedish_ci\",\n \"utf8mb4_turkish_ci\",\n \"utf8mb4_czech_ci\",\n \"utf8mb4_danish_ci\",\n \"utf8mb4_lithuanian_ci\",\n \"utf8mb4_slovak_ci\",\n \"utf8mb4_spanish2_ci\",\n \"utf8mb4_roman_ci\",\n \"utf8mb4_persian_ci\",\n \"utf8mb4_esperanto_ci\",\n \"utf8mb4_hungarian_ci\",\n \"utf8mb4_sinhala_ci\",\n \"utf8mb4_german2_ci\",\n \"utf8mb4_croatian_ci\",\n \"utf8mb4_unicode_520_ci\",\n \"utf8mb4_vietnamese_ci\"],\n \"cp1251\": [\"cp1251_general_ci\",\n \"cp1251_bulgarian_ci\",\n \"cp1251_ukrainian_ci\",\n \"cp1251_bin\",\n \"cp1251_general_cs\"],\n \"utf16\": [\"utf16_general_ci\",\n \"utf16_bin\",\n \"utf16_unicode_ci\",\n \"utf16_icelandic_ci\",\n \"utf16_latvian_ci\",\n \"utf16_romanian_ci\",\n \"utf16_slovenian_ci\",\n \"utf16_polish_ci\",\n \"utf16_estonian_ci\",\n \"utf16_spanish_ci\",\n \"utf16_swedish_ci\",\n \"utf16_turkish_ci\",\n \"utf16_czech_ci\",\n \"utf16_danish_ci\",\n \"utf16_lithuanian_ci\",\n \"utf16_slovak_ci\",\n \"utf16_spanish2_ci\",\n \"utf16_roman_ci\",\n \"utf16_persian_ci\",\n \"utf16_esperanto_ci\",\n \"utf16_hungarian_ci\",\n \"utf16_sinhala_ci\",\n \"utf16_german2_ci\",\n \"utf16_croatian_ci\",\n \"utf16_unicode_520_ci\",\n \"utf16_vietnamese_ci\"],\n \"utf16le\": [\"utf16le_general_ci\",\n \"utf16le_bin\"],\n \"cp1256\": [\"cp1256_general_ci\", \"cp1256_bin\"],\n \"cp1257\": [\"cp1257_general_ci\",\n \"cp1257_lithuanian_ci\",\n \"cp1257_bin\"],\n \"utf32\": [\"utf32_general_ci\",\n \"utf32_bin\",\n \"utf32_unicode_ci\",\n \"utf32_icelandic_ci\",\n \"utf32_latvian_ci\",\n \"utf32_romanian_ci\",\n \"utf32_slovenian_ci\",\n \"utf32_polish_ci\",\n \"utf32_estonian_ci\",\n \"utf32_spanish_ci\",\n \"utf32_swedish_ci\",\n \"utf32_turkish_ci\",\n \"utf32_czech_ci\",\n \"utf32_danish_ci\",\n \"utf32_lithuanian_ci\",\n \"utf32_slovak_ci\",\n \"utf32_spanish2_ci\",\n \"utf32_roman_ci\",\n \"utf32_persian_ci\",\n \"utf32_esperanto_ci\",\n \"utf32_hungarian_ci\",\n \"utf32_sinhala_ci\",\n \"utf32_german2_ci\",\n \"utf32_croatian_ci\",\n \"utf32_unicode_520_ci\",\n \"utf32_vietnamese_ci\"],\n \"binary\": [\"binary\"],\n \"geostd8\": [\"geostd8_general_ci\", \"geostd8_bin\"],\n \"cp932\": [\"cp932_japanese_ci\", \"cp932_bin\"],\n \"eucjpms\": [\"eucjpms_japanese_ci\", \"eucjpms_bin\"],\n \"gb18030\": [\"gb18030_chinese_ci\",\n \"gb18030_bin\",\n \"gb18030_unicode_520_ci\"]}\n\ncollation = {\"big5_chinese_ci\": \"big5\",\n \"big5_bin\": \"big5\",\n \"dec8_swedish_ci\": \"dec8\",\n \"dec8_bin\": \"dec8\",\n \"cp850_general_ci\": \"cp850\",\n \"cp850_bin\": \"cp850\",\n \"hp8_english_ci\": \"hp8\",\n \"hp8_bin\": \"hp8\",\n \"koi8r_general_ci\": \"koi8r\",\n \"koi8r_bin\": \"koi8r\",\n \"latin1_german1_ci\": \"latin1\",\n \"latin1_swedish_ci\": \"latin1\",\n \"latin1_danish_ci\": \"latin1\",\n \"latin1_german2_ci\": \"latin1\",\n \"latin1_bin\": \"latin1\",\n \"latin1_general_ci\": \"latin1\",\n \"latin1_general_cs\": \"latin1\",\n \"latin1_spanish_ci\": \"latin1\",\n \"latin2_czech_cs\": \"latin2\",\n \"latin2_general_ci\": \"latin2\",\n \"latin2_hungarian_ci\": \"latin2\",\n \"latin2_croatian_ci\": \"latin2\",\n \"latin2_bin\": \"latin2\",\n \"swe7_swedish_ci\": \"swe7\",\n \"swe7_bin\": \"swe7\",\n \"ascii_general_ci\": \"ascii\",\n \"ascii_bin\": \"ascii\",\n \"ujis_japanese_ci\": \"ujis\",\n \"ujis_bin\": \"ujis\",\n \"sjis_japanese_ci\": \"sjis\",\n \"sjis_bin\": \"sjis\",\n \"hebrew_general_ci\": \"hebrew\",\n \"hebrew_bin\": \"hebrew\",\n \"tis620_thai_ci\": \"tis620\",\n \"tis620_bin\": \"tis620\",\n \"euckr_korean_ci\": \"euckr\",\n \"euckr_bin\": \"euckr\",\n \"koi8u_general_ci\": \"koi8u\",\n \"koi8u_bin\": \"koi8u\",\n \"gb2312_chinese_ci\": \"gb2312\",\n \"gb2312_bin\": \"gb2312\",\n \"greek_general_ci\": \"greek\",\n \"greek_bin\": \"greek\",\n \"cp1250_general_ci\": \"cp1250\",\n \"cp1250_czech_cs\": \"cp1250\",\n \"cp1250_croatian_ci\": \"cp1250\",\n \"cp1250_bin\": \"cp1250\",\n \"cp1250_polish_ci\": \"cp1250\",\n \"gbk_chinese_ci\": \"gbk\",\n \"gbk_bin\": \"gbk\",\n \"latin5_turkish_ci\": \"latin5\",\n \"latin5_bin\": \"latin5\",\n \"armscii8_general_ci\": \"armscii8\",\n \"armscii8_bin\": \"armscii8\",\n \"utf8_general_ci\": \"utf8\",\n \"utf8mb3_general_ci\": \"utf8mb3\",\n \"utf8_bin\": \"utf8\",\n \"utf8_unicode_ci\": \"utf8\",\n \"utf8_icelandic_ci\": \"utf8\",\n \"utf8_latvian_ci\": \"utf8\",\n \"utf8_romanian_ci\": \"utf8\",\n \"utf8_slovenian_ci\": \"utf8\",\n \"utf8_polish_ci\": \"utf8\",\n \"utf8_estonian_ci\": \"utf8\",\n \"utf8_spanish_ci\": \"utf8\",\n \"utf8_swedish_ci\": \"utf8\",\n \"utf8_turkish_ci\": \"utf8\",\n \"utf8_czech_ci\": \"utf8\",\n \"utf8_danish_ci\": \"utf8\",\n \"utf8_lithuanian_ci\": \"utf8\",\n \"utf8_slovak_ci\": \"utf8\",\n \"utf8_spanish2_ci\": \"utf8\",\n \"utf8_roman_ci\": \"utf8\",\n \"utf8_persian_ci\": \"utf8\",\n \"utf8_esperanto_ci\": \"utf8\",\n \"utf8_hungarian_ci\": \"utf8\",\n \"utf8_sinhala_ci\": \"utf8\",\n \"utf8_german2_ci\": \"utf8\",\n \"utf8_croatian_ci\": \"utf8\",\n \"utf8_unicode_520_ci\": \"utf8\",\n \"utf8_vietnamese_ci\": \"utf8\",\n \"utf8_general_mysql500_ci\": \"utf8\",\n \"utf8mb4_0900_ai_ci\": \"utf8mb4\",\n \"ucs2_general_ci\": \"ucs2\",\n \"ucs2_bin\": \"ucs2\",\n \"ucs2_unicode_ci\": \"ucs2\",\n \"ucs2_icelandic_ci\": \"ucs2\",\n \"ucs2_latvian_ci\": \"ucs2\",\n \"ucs2_romanian_ci\": \"ucs2\",\n \"ucs2_slovenian_ci\": \"ucs2\",\n \"ucs2_polish_ci\": \"ucs2\",\n \"ucs2_estonian_ci\": \"ucs2\",\n \"ucs2_spanish_ci\": \"ucs2\",\n \"ucs2_swedish_ci\": \"ucs2\",\n \"ucs2_turkish_ci\": \"ucs2\",\n \"ucs2_czech_ci\": \"ucs2\",\n \"ucs2_danish_ci\": \"ucs2\",\n \"ucs2_lithuanian_ci\": \"ucs2\",\n \"ucs2_slovak_ci\": \"ucs2\",\n \"ucs2_spanish2_ci\": \"ucs2\",\n \"ucs2_roman_ci\": \"ucs2\",\n \"ucs2_persian_ci\": \"ucs2\",\n \"ucs2_esperanto_ci\": \"ucs2\",\n \"ucs2_hungarian_ci\": \"ucs2\",\n \"ucs2_sinhala_ci\": \"ucs2\",\n \"ucs2_german2_ci\": \"ucs2\",\n \"ucs2_croatian_ci\": \"ucs2\",\n \"ucs2_unicode_520_ci\": \"ucs2\",\n \"ucs2_vietnamese_ci\": \"ucs2\",\n \"ucs2_general_mysql500_ci\": \"ucs2\",\n \"cp866_general_ci\": \"cp866\",\n \"cp866_bin\": \"cp866\",\n \"keybcs2_general_ci\": \"keybcs2\",\n \"keybcs2_bin\": \"keybcs2\",\n \"macce_general_ci\": \"macce\",\n \"macce_bin\": \"macce\",\n \"macroman_general_ci\": \"macroman\",\n \"macroman_bin\": \"macroman\",\n \"cp852_general_ci\": \"cp852\",\n \"cp852_bin\": \"cp852\",\n \"latin7_estonian_cs\": \"latin7\",\n \"latin7_general_ci\": \"latin7\",\n \"latin7_general_cs\": \"latin7\",\n \"latin7_bin\": \"latin7\",\n \"utf8mb4_general_ci\": \"utf8mb4\",\n \"utf8mb4_bin\": \"utf8mb4\",\n \"utf8mb4_unicode_ci\": \"utf8mb4\",\n \"utf8mb4_icelandic_ci\": \"utf8mb4\",\n \"utf8mb4_latvian_ci\": \"utf8mb4\",\n \"utf8mb4_romanian_ci\": \"utf8mb4\",\n \"utf8mb4_slovenian_ci\": \"utf8mb4\",\n \"utf8mb4_polish_ci\": \"utf8mb4\",\n \"utf8mb4_estonian_ci\": \"utf8mb4\",\n \"utf8mb4_spanish_ci\": \"utf8mb4\",\n \"utf8mb4_swedish_ci\": \"utf8mb4\",\n \"utf8mb4_turkish_ci\": \"utf8mb4\",\n \"utf8mb4_czech_ci\": \"utf8mb4\",\n \"utf8mb4_danish_ci\": \"utf8mb4\",\n \"utf8mb4_lithuanian_ci\": \"utf8mb4\",\n \"utf8mb4_slovak_ci\": \"utf8mb4\",\n \"utf8mb4_spanish2_ci\": \"utf8mb4\",\n \"utf8mb4_roman_ci\": \"utf8mb4\",\n \"utf8mb4_persian_ci\": \"utf8mb4\",\n \"utf8mb4_esperanto_ci\": \"utf8mb4\",\n \"utf8mb4_hungarian_ci\": \"utf8mb4\",\n \"utf8mb4_sinhala_ci\": \"utf8mb4\",\n \"utf8mb4_german2_ci\": \"utf8mb4\",\n \"utf8mb4_croatian_ci\": \"utf8mb4\",\n \"utf8mb4_unicode_520_ci\": \"utf8mb4\",\n \"utf8mb4_vietnamese_ci\": \"utf8mb4\",\n \"cp1251_bulgarian_ci\": \"cp1251\",\n \"cp1251_ukrainian_ci\": \"cp1251\",\n \"cp1251_bin\": \"cp1251\",\n \"cp1251_general_ci\": \"cp1251\",\n \"cp1251_general_cs\": \"cp1251\",\n \"utf16_general_ci\": \"utf16\",\n \"utf16_bin\": \"utf16\",\n \"utf16_unicode_ci\": \"utf16\",\n \"utf16_icelandic_ci\": \"utf16\",\n \"utf16_latvian_ci\": \"utf16\",\n \"utf16_romanian_ci\": \"utf16\",\n \"utf16_slovenian_ci\": \"utf16\",\n \"utf16_polish_ci\": \"utf16\",\n \"utf16_estonian_ci\": \"utf16\",\n \"utf16_spanish_ci\": \"utf16\",\n \"utf16_swedish_ci\": \"utf16\",\n \"utf16_turkish_ci\": \"utf16\",\n \"utf16_czech_ci\": \"utf16\",\n \"utf16_danish_ci\": \"utf16\",\n \"utf16_lithuanian_ci\": \"utf16\",\n \"utf16_slovak_ci\": \"utf16\",\n \"utf16_spanish2_ci\": \"utf16\",\n \"utf16_roman_ci\": \"utf16\",\n \"utf16_persian_ci\": \"utf16\",\n \"utf16_esperanto_ci\": \"utf16\",\n \"utf16_hungarian_ci\": \"utf16\",\n \"utf16_sinhala_ci\": \"utf16\",\n \"utf16_german2_ci\": \"utf16\",\n \"utf16_croatian_ci\": \"utf16\",\n \"utf16_unicode_520_ci\": \"utf16\",\n \"utf16_vietnamese_ci\": \"utf16\",\n \"utf16le_general_ci\": \"utf16le\",\n \"utf16le_bin\": \"utf16le\",\n \"cp1256_general_ci\": \"cp1256\",\n \"cp1256_bin\": \"cp1256\",\n \"cp1257_lithuanian_ci\": \"cp1257\",\n \"cp1257_bin\": \"cp1257\",\n \"cp1257_general_ci\": \"cp1257\",\n \"utf32_general_ci\": \"utf32\",\n \"utf32_bin\": \"utf32\",\n \"utf32_unicode_ci\": \"utf32\",\n \"utf32_icelandic_ci\": \"utf32\",\n \"utf32_latvian_ci\": \"utf32\",\n \"utf32_romanian_ci\": \"utf32\",\n \"utf32_slovenian_ci\": \"utf32\",\n \"utf32_polish_ci\": \"utf32\",\n \"utf32_estonian_ci\": \"utf32\",\n \"utf32_spanish_ci\": \"utf32\",\n \"utf32_swedish_ci\": \"utf32\",\n \"utf32_turkish_ci\": \"utf32\",\n \"utf32_czech_ci\": \"utf32\",\n \"utf32_danish_ci\": \"utf32\",\n \"utf32_lithuanian_ci\": \"utf32\",\n \"utf32_slovak_ci\": \"utf32\",\n \"utf32_spanish2_ci\": \"utf32\",\n \"utf32_roman_ci\": \"utf32\",\n \"utf32_persian_ci\": \"utf32\",\n \"utf32_esperanto_ci\": \"utf32\",\n \"utf32_hungarian_ci\": \"utf32\",\n \"utf32_sinhala_ci\": \"utf32\",\n \"utf32_german2_ci\": \"utf32\",\n \"utf32_croatian_ci\": \"utf32\",\n \"utf32_unicode_520_ci\": \"utf32\",\n \"utf32_vietnamese_ci\": \"utf32\",\n \"binary\": \"binary\",\n \"geostd8_general_ci\": \"geostd8\",\n \"geostd8_bin\": \"geostd8\",\n \"cp932_japanese_ci\": \"cp932\",\n \"cp932_bin\": \"cp932\",\n \"eucjpms_japanese_ci\": \"eucjpms\",\n \"eucjpms_bin\": \"eucjpms\",\n \"gb18030_chinese_ci\": \"gb18030\",\n \"gb18030_bin\": \"gb18030\",\n \"gb18030_unicode_520_ci\": \"gb18030\"}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Parsing the raw.csv generated by running lis2dh_cluster.py
g = 9.806
def twos_complement(lsb, msb):
signBit = (msb & 0b10000000) >> 7
msb &= 0x7F # Strip off sign bit
if signBit:
x = (msb << 8) + lsb
x ^= 0x7FFF
x = -1 - x
else:
x = (msb << 8) + lsb
x = x>>6 # Remove left justification of data
return x
offset = 'not_set'
with open('raw.csv', 'r') as infile:
with open('parsed.csv', 'a') as outfile:
# Read the first line (the column headers)
headers = infile.readline().strip('\n\r')
headers = headers.split(';')
newheaders = []
for header in headers:
if header == 't': newheaders += ['t']
else: newheaders += [header+'x', header+'y', header+'z']
newheaders = ','.join(newheaders)
outfile.write(newheaders + '\n')
# Read and parse all sequential lines
line_in = infile.readline().strip('\n\r')
while line_in:
line_out = ''
data = line_in.split(';')
timestamp = eval(data[0])
if offset == 'not_set':
offset = timestamp
line_out += str(timestamp - offset)
for accel in data[1:]:
array = eval(accel) # Quick and dirty way of converting string to array
line_out += ','
line_out += str(twos_complement(array[0], array[1]))
line_out += ','
line_out += str(twos_complement(array[2], array[3]))
line_out += ','
line_out += str(twos_complement(array[4], array[5]))
line_out += '\n'
outfile.write(line_out)
try:
line_in = infile.readline().strip('\n\r')
except:
pass
|
normal
|
{
"blob_id": "a1b579494d20e8b8a26f7636ebd444252d2aa250",
"index": 4824,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef twos_complement(lsb, msb):\n signBit = (msb & 128) >> 7\n msb &= 127\n if signBit:\n x = (msb << 8) + lsb\n x ^= 32767\n x = -1 - x\n else:\n x = (msb << 8) + lsb\n x = x >> 6\n return x\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef twos_complement(lsb, msb):\n signBit = (msb & 128) >> 7\n msb &= 127\n if signBit:\n x = (msb << 8) + lsb\n x ^= 32767\n x = -1 - x\n else:\n x = (msb << 8) + lsb\n x = x >> 6\n return x\n\n\n<mask token>\nwith open('raw.csv', 'r') as infile:\n with open('parsed.csv', 'a') as outfile:\n headers = infile.readline().strip('\\n\\r')\n headers = headers.split(';')\n newheaders = []\n for header in headers:\n if header == 't':\n newheaders += ['t']\n else:\n newheaders += [header + 'x', header + 'y', header + 'z']\n newheaders = ','.join(newheaders)\n outfile.write(newheaders + '\\n')\n line_in = infile.readline().strip('\\n\\r')\n while line_in:\n line_out = ''\n data = line_in.split(';')\n timestamp = eval(data[0])\n if offset == 'not_set':\n offset = timestamp\n line_out += str(timestamp - offset)\n for accel in data[1:]:\n array = eval(accel)\n line_out += ','\n line_out += str(twos_complement(array[0], array[1]))\n line_out += ','\n line_out += str(twos_complement(array[2], array[3]))\n line_out += ','\n line_out += str(twos_complement(array[4], array[5]))\n line_out += '\\n'\n outfile.write(line_out)\n try:\n line_in = infile.readline().strip('\\n\\r')\n except:\n pass\n",
"step-4": "g = 9.806\n\n\ndef twos_complement(lsb, msb):\n signBit = (msb & 128) >> 7\n msb &= 127\n if signBit:\n x = (msb << 8) + lsb\n x ^= 32767\n x = -1 - x\n else:\n x = (msb << 8) + lsb\n x = x >> 6\n return x\n\n\noffset = 'not_set'\nwith open('raw.csv', 'r') as infile:\n with open('parsed.csv', 'a') as outfile:\n headers = infile.readline().strip('\\n\\r')\n headers = headers.split(';')\n newheaders = []\n for header in headers:\n if header == 't':\n newheaders += ['t']\n else:\n newheaders += [header + 'x', header + 'y', header + 'z']\n newheaders = ','.join(newheaders)\n outfile.write(newheaders + '\\n')\n line_in = infile.readline().strip('\\n\\r')\n while line_in:\n line_out = ''\n data = line_in.split(';')\n timestamp = eval(data[0])\n if offset == 'not_set':\n offset = timestamp\n line_out += str(timestamp - offset)\n for accel in data[1:]:\n array = eval(accel)\n line_out += ','\n line_out += str(twos_complement(array[0], array[1]))\n line_out += ','\n line_out += str(twos_complement(array[2], array[3]))\n line_out += ','\n line_out += str(twos_complement(array[4], array[5]))\n line_out += '\\n'\n outfile.write(line_out)\n try:\n line_in = infile.readline().strip('\\n\\r')\n except:\n pass\n",
"step-5": "# Parsing the raw.csv generated by running lis2dh_cluster.py\ng = 9.806\n\ndef twos_complement(lsb, msb):\n signBit = (msb & 0b10000000) >> 7\n msb &= 0x7F # Strip off sign bit\n if signBit:\n x = (msb << 8) + lsb\n x ^= 0x7FFF\n x = -1 - x\n else:\n x = (msb << 8) + lsb\n x = x>>6 # Remove left justification of data\n return x\n\n\noffset = 'not_set'\nwith open('raw.csv', 'r') as infile:\n with open('parsed.csv', 'a') as outfile:\n \n # Read the first line (the column headers)\n headers = infile.readline().strip('\\n\\r')\n headers = headers.split(';')\n newheaders = []\n for header in headers:\n if header == 't': newheaders += ['t']\n else: newheaders += [header+'x', header+'y', header+'z']\n newheaders = ','.join(newheaders)\n outfile.write(newheaders + '\\n')\n \n # Read and parse all sequential lines\n line_in = infile.readline().strip('\\n\\r')\n while line_in:\n line_out = ''\n data = line_in.split(';')\n timestamp = eval(data[0])\n if offset == 'not_set':\n offset = timestamp\n line_out += str(timestamp - offset)\n for accel in data[1:]:\n array = eval(accel) # Quick and dirty way of converting string to array\n line_out += ','\n line_out += str(twos_complement(array[0], array[1]))\n line_out += ','\n line_out += str(twos_complement(array[2], array[3]))\n line_out += ','\n line_out += str(twos_complement(array[4], array[5]))\n line_out += '\\n'\n outfile.write(line_out)\n try:\n line_in = infile.readline().strip('\\n\\r')\n except:\n pass",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import matplotlib.pyplot as plt
plt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5],
'go-', label='line 1', linewidth=2)
plt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25],
'rs--', label='line 2', linewidth=4)
plt.axis([0, 6, 0, 26])
plt.legend(loc="upper right")
plt.show()
|
normal
|
{
"blob_id": "7eeba06e78bd1e7139b1706574c4d040465d4566",
"index": 4178,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5], 'go-', label='line 1', linewidth=2)\nplt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25], 'rs--', label='line 2',\n linewidth=4)\nplt.axis([0, 6, 0, 26])\nplt.legend(loc='upper right')\nplt.show()\n",
"step-3": "import matplotlib.pyplot as plt\nplt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5], 'go-', label='line 1', linewidth=2)\nplt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25], 'rs--', label='line 2',\n linewidth=4)\nplt.axis([0, 6, 0, 26])\nplt.legend(loc='upper right')\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\n\nplt.plot([1, 2, 3, 4, 5], [1, 2, 3, 4, 5],\n 'go-', label='line 1', linewidth=2)\n\nplt.plot([1, 2, 3, 4, 5], [1, 4, 9, 16, 25],\n 'rs--', label='line 2', linewidth=4)\n\nplt.axis([0, 6, 0, 26])\nplt.legend(loc=\"upper right\")\nplt.show()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Solution:
def sumSubarrayMins(self, A: List[int]) ->int:
stack = []
prev = [None] * len(A)
for i in range(len(A)):
while stack and A[stack[-1]] >= A[i]:
stack.pop()
prev[i] = stack[-1] if stack else -1
stack.append(i)
stack = []
nex = [None] * len(A)
for i in range(len(A) - 1, -1, -1):
while stack and A[stack[-1]] > A[i]:
stack.pop()
nex[i] = stack[-1] if stack else len(A)
stack.append(i)
return sum((i - prev[i]) * (nex[i] - i) * A[i] for i in range(len(A))
) % (10 ** 9 + 7)
|
normal
|
{
"blob_id": "97029ac9f05037bf9304dacf86c35f5534d887c4",
"index": 8303,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def sumSubarrayMins(self, A: List[int]) ->int:\n stack = []\n prev = [None] * len(A)\n for i in range(len(A)):\n while stack and A[stack[-1]] >= A[i]:\n stack.pop()\n prev[i] = stack[-1] if stack else -1\n stack.append(i)\n stack = []\n nex = [None] * len(A)\n for i in range(len(A) - 1, -1, -1):\n while stack and A[stack[-1]] > A[i]:\n stack.pop()\n nex[i] = stack[-1] if stack else len(A)\n stack.append(i)\n return sum((i - prev[i]) * (nex[i] - i) * A[i] for i in range(len(A))\n ) % (10 ** 9 + 7)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import torch
def DiceLoss(pred,target,smooth=2):
# print("pred shape: ",pred.shape)
# print("target shape: ",target.shape)
index = (2*torch.sum(pred*target)+smooth)/(torch.sum(pred)+torch.sum(target)+smooth)
#if torch.sum(target).item() == 0:
#print("instersection: ",torch.sum(pred*target).item())
# print("pred: ",torch.sum(pred).item())
# print("target: ",torch.sum(target).item())
#print("Index: ", index.item())
return 1-index
|
normal
|
{
"blob_id": "0aa0fcbb0ec1272bea93574a9287de9f526539c8",
"index": 3119,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef DiceLoss(pred, target, smooth=2):\n index = (2 * torch.sum(pred * target) + smooth) / (torch.sum(pred) +\n torch.sum(target) + smooth)\n return 1 - index\n",
"step-3": "import torch\n\n\ndef DiceLoss(pred, target, smooth=2):\n index = (2 * torch.sum(pred * target) + smooth) / (torch.sum(pred) +\n torch.sum(target) + smooth)\n return 1 - index\n",
"step-4": "import torch\ndef DiceLoss(pred,target,smooth=2):\n # print(\"pred shape: \",pred.shape)\n # print(\"target shape: \",target.shape)\n index = (2*torch.sum(pred*target)+smooth)/(torch.sum(pred)+torch.sum(target)+smooth)\n #if torch.sum(target).item() == 0:\n #print(\"instersection: \",torch.sum(pred*target).item())\n # print(\"pred: \",torch.sum(pred).item())\n # print(\"target: \",torch.sum(target).item())\n #print(\"Index: \", index.item())\n return 1-index",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from functools import partial
import torch
from torch import nn
from src.backbone.layers.conv_block import ConvBNAct, MBConvConfig, MBConvSE, mobilenet_v2_init
from src.backbone.mobilenet_v2 import MobileNetV2
from src.backbone.utils import load_from_zoo
class MobileNetV3(MobileNetV2):
def __init__(self, residual_config, last_channel=1280, dropout=0.2, stochastic_depth=0.0,
block=MBConvSE, act_layer=nn.Hardswish, norm_layer=nn.BatchNorm2d):
super(MobileNetV3, self).__init__(residual_config, dropout, stochastic_depth, block, act_layer, norm_layer)
in_ch = self.layer_infos[-1].in_ch
out_ch = in_ch * self.layer_infos[-1].expand_ratio
self.features[-1] = ConvBNAct(in_ch, out_ch, kernel_size=1, stride=1, norm_layer=self.norm_layer, act=self.act)
self.classifier = nn.Sequential(
nn.Linear(out_ch, last_channel),
act_layer(inplace=True),
)
self.out_channels = last_channel
def forward(self, x):
return self.dropout(self.classifier(torch.flatten(self.avg_pool(self.features(x)), 1)))
def get_mobilenet_v3(model_name:str, pretrained=True, **kwargs) -> nn.Module:
"""Get mobilenet_v3 large model
The changes from mobilenet_v3:
- change input channel to 16 and last stage structure to avoid redundancy
- change activation to nn.relu, nn.Hardsigmoid, nn.Hardswish to reduce computational cost
- apply se unit (larger hidden_dim than efficientnet)
"""
mbconfig = partial(MBConvConfig, depth_mult=1.0, width_mult=1.0, norm_layer=nn.BatchNorm2d,
se_act2=partial(nn.Hardsigmoid, inplace=True), se_reduction_ratio=4, se_reduce_mode='adjust')
if model_name == 'mobilenet_v3_large':
residual_config = [
# expand k s in out layers act
mbconfig(1, 3, 1, 16, 16, 1, act=nn.ReLU, use_se=False),
mbconfig(4, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=False),
mbconfig(3, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=False),
mbconfig(3, 5, 2, 24, 40, 1, act=nn.ReLU, use_se=True),
mbconfig(3, 5, 1, 40, 40, 2, act=nn.ReLU, use_se=True),
mbconfig(6, 3, 2, 40, 80, 1, act=nn.Hardswish, use_se=False),
mbconfig(2.5, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),
mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),
mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),
mbconfig(6, 3, 1, 80, 112, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 3, 1, 112, 112, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 2, 112, 160, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True),
]
last_channel = 1280
elif model_name == 'mobilenet_v3_small':
residual_config = [
# expand k s in out layers act
mbconfig(1, 3, 2, 16, 16, 1, act=nn.ReLU, use_se=True),
mbconfig(4.5, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=False),
mbconfig(3.5, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=False),
mbconfig(4, 5, 2, 24, 40, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=True),
mbconfig(3, 5, 1, 40, 48, 1, act=nn.Hardswish, use_se=True),
mbconfig(3, 5, 1, 48, 48, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 2, 48, 96, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=True),
mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=True),
]
last_channel = 1024
model = MobileNetV3(residual_config, last_channel=last_channel, block=MBConvSE, act_layer=nn.Hardswish, norm_layer=nn.BatchNorm2d)
mobilenet_v2_init(model)
if pretrained:
load_from_zoo(model, model_name)
return model
|
normal
|
{
"blob_id": "4a5185fac7d6c09daa76b5d0d5aee863028a6bce",
"index": 5328,
"step-1": "<mask token>\n\n\nclass MobileNetV3(MobileNetV2):\n <mask token>\n\n def forward(self, x):\n return self.dropout(self.classifier(torch.flatten(self.avg_pool(\n self.features(x)), 1)))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MobileNetV3(MobileNetV2):\n\n def __init__(self, residual_config, last_channel=1280, dropout=0.2,\n stochastic_depth=0.0, block=MBConvSE, act_layer=nn.Hardswish,\n norm_layer=nn.BatchNorm2d):\n super(MobileNetV3, self).__init__(residual_config, dropout,\n stochastic_depth, block, act_layer, norm_layer)\n in_ch = self.layer_infos[-1].in_ch\n out_ch = in_ch * self.layer_infos[-1].expand_ratio\n self.features[-1] = ConvBNAct(in_ch, out_ch, kernel_size=1, stride=\n 1, norm_layer=self.norm_layer, act=self.act)\n self.classifier = nn.Sequential(nn.Linear(out_ch, last_channel),\n act_layer(inplace=True))\n self.out_channels = last_channel\n\n def forward(self, x):\n return self.dropout(self.classifier(torch.flatten(self.avg_pool(\n self.features(x)), 1)))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MobileNetV3(MobileNetV2):\n\n def __init__(self, residual_config, last_channel=1280, dropout=0.2,\n stochastic_depth=0.0, block=MBConvSE, act_layer=nn.Hardswish,\n norm_layer=nn.BatchNorm2d):\n super(MobileNetV3, self).__init__(residual_config, dropout,\n stochastic_depth, block, act_layer, norm_layer)\n in_ch = self.layer_infos[-1].in_ch\n out_ch = in_ch * self.layer_infos[-1].expand_ratio\n self.features[-1] = ConvBNAct(in_ch, out_ch, kernel_size=1, stride=\n 1, norm_layer=self.norm_layer, act=self.act)\n self.classifier = nn.Sequential(nn.Linear(out_ch, last_channel),\n act_layer(inplace=True))\n self.out_channels = last_channel\n\n def forward(self, x):\n return self.dropout(self.classifier(torch.flatten(self.avg_pool(\n self.features(x)), 1)))\n\n\ndef get_mobilenet_v3(model_name: str, pretrained=True, **kwargs) ->nn.Module:\n \"\"\"Get mobilenet_v3 large model\n\n The changes from mobilenet_v3:\n - change input channel to 16 and last stage structure to avoid redundancy\n - change activation to nn.relu, nn.Hardsigmoid, nn.Hardswish to reduce computational cost\n - apply se unit (larger hidden_dim than efficientnet)\n \"\"\"\n mbconfig = partial(MBConvConfig, depth_mult=1.0, width_mult=1.0,\n norm_layer=nn.BatchNorm2d, se_act2=partial(nn.Hardsigmoid, inplace=\n True), se_reduction_ratio=4, se_reduce_mode='adjust')\n if model_name == 'mobilenet_v3_large':\n residual_config = [mbconfig(1, 3, 1, 16, 16, 1, act=nn.ReLU, use_se\n =False), mbconfig(4, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=False\n ), mbconfig(3, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(3, 5, 2, 24, 40, 1, act=nn.ReLU, use_se=True),\n mbconfig(3, 5, 1, 40, 40, 2, act=nn.ReLU, use_se=True),\n mbconfig(6, 3, 2, 40, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.5, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(6, 3, 1, 80, 112, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 3, 1, 112, 112, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 2, 112, 160, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True)]\n last_channel = 1280\n elif model_name == 'mobilenet_v3_small':\n residual_config = [mbconfig(1, 3, 2, 16, 16, 1, act=nn.ReLU, use_se\n =True), mbconfig(4.5, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=\n False), mbconfig(3.5, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=\n False), mbconfig(4, 5, 2, 24, 40, 1, act=nn.Hardswish, use_se=\n True), mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=\n True), mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=\n True), mbconfig(3, 5, 1, 40, 48, 1, act=nn.Hardswish, use_se=\n True), mbconfig(3, 5, 1, 48, 48, 1, act=nn.Hardswish, use_se=\n True), mbconfig(6, 5, 2, 48, 96, 1, act=nn.Hardswish, use_se=\n True), mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=\n True), mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=True)]\n last_channel = 1024\n model = MobileNetV3(residual_config, last_channel=last_channel, block=\n MBConvSE, act_layer=nn.Hardswish, norm_layer=nn.BatchNorm2d)\n mobilenet_v2_init(model)\n if pretrained:\n load_from_zoo(model, model_name)\n return model\n",
"step-4": "from functools import partial\nimport torch\nfrom torch import nn\nfrom src.backbone.layers.conv_block import ConvBNAct, MBConvConfig, MBConvSE, mobilenet_v2_init\nfrom src.backbone.mobilenet_v2 import MobileNetV2\nfrom src.backbone.utils import load_from_zoo\n\n\nclass MobileNetV3(MobileNetV2):\n\n def __init__(self, residual_config, last_channel=1280, dropout=0.2,\n stochastic_depth=0.0, block=MBConvSE, act_layer=nn.Hardswish,\n norm_layer=nn.BatchNorm2d):\n super(MobileNetV3, self).__init__(residual_config, dropout,\n stochastic_depth, block, act_layer, norm_layer)\n in_ch = self.layer_infos[-1].in_ch\n out_ch = in_ch * self.layer_infos[-1].expand_ratio\n self.features[-1] = ConvBNAct(in_ch, out_ch, kernel_size=1, stride=\n 1, norm_layer=self.norm_layer, act=self.act)\n self.classifier = nn.Sequential(nn.Linear(out_ch, last_channel),\n act_layer(inplace=True))\n self.out_channels = last_channel\n\n def forward(self, x):\n return self.dropout(self.classifier(torch.flatten(self.avg_pool(\n self.features(x)), 1)))\n\n\ndef get_mobilenet_v3(model_name: str, pretrained=True, **kwargs) ->nn.Module:\n \"\"\"Get mobilenet_v3 large model\n\n The changes from mobilenet_v3:\n - change input channel to 16 and last stage structure to avoid redundancy\n - change activation to nn.relu, nn.Hardsigmoid, nn.Hardswish to reduce computational cost\n - apply se unit (larger hidden_dim than efficientnet)\n \"\"\"\n mbconfig = partial(MBConvConfig, depth_mult=1.0, width_mult=1.0,\n norm_layer=nn.BatchNorm2d, se_act2=partial(nn.Hardsigmoid, inplace=\n True), se_reduction_ratio=4, se_reduce_mode='adjust')\n if model_name == 'mobilenet_v3_large':\n residual_config = [mbconfig(1, 3, 1, 16, 16, 1, act=nn.ReLU, use_se\n =False), mbconfig(4, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=False\n ), mbconfig(3, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(3, 5, 2, 24, 40, 1, act=nn.ReLU, use_se=True),\n mbconfig(3, 5, 1, 40, 40, 2, act=nn.ReLU, use_se=True),\n mbconfig(6, 3, 2, 40, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.5, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(6, 3, 1, 80, 112, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 3, 1, 112, 112, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 2, 112, 160, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True)]\n last_channel = 1280\n elif model_name == 'mobilenet_v3_small':\n residual_config = [mbconfig(1, 3, 2, 16, 16, 1, act=nn.ReLU, use_se\n =True), mbconfig(4.5, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=\n False), mbconfig(3.5, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=\n False), mbconfig(4, 5, 2, 24, 40, 1, act=nn.Hardswish, use_se=\n True), mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=\n True), mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=\n True), mbconfig(3, 5, 1, 40, 48, 1, act=nn.Hardswish, use_se=\n True), mbconfig(3, 5, 1, 48, 48, 1, act=nn.Hardswish, use_se=\n True), mbconfig(6, 5, 2, 48, 96, 1, act=nn.Hardswish, use_se=\n True), mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=\n True), mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=True)]\n last_channel = 1024\n model = MobileNetV3(residual_config, last_channel=last_channel, block=\n MBConvSE, act_layer=nn.Hardswish, norm_layer=nn.BatchNorm2d)\n mobilenet_v2_init(model)\n if pretrained:\n load_from_zoo(model, model_name)\n return model\n",
"step-5": "from functools import partial\n\nimport torch\nfrom torch import nn\n\nfrom src.backbone.layers.conv_block import ConvBNAct, MBConvConfig, MBConvSE, mobilenet_v2_init\nfrom src.backbone.mobilenet_v2 import MobileNetV2\nfrom src.backbone.utils import load_from_zoo\n\n\nclass MobileNetV3(MobileNetV2):\n def __init__(self, residual_config, last_channel=1280, dropout=0.2, stochastic_depth=0.0,\n block=MBConvSE, act_layer=nn.Hardswish, norm_layer=nn.BatchNorm2d):\n super(MobileNetV3, self).__init__(residual_config, dropout, stochastic_depth, block, act_layer, norm_layer)\n in_ch = self.layer_infos[-1].in_ch\n out_ch = in_ch * self.layer_infos[-1].expand_ratio\n self.features[-1] = ConvBNAct(in_ch, out_ch, kernel_size=1, stride=1, norm_layer=self.norm_layer, act=self.act)\n self.classifier = nn.Sequential(\n nn.Linear(out_ch, last_channel),\n act_layer(inplace=True),\n )\n self.out_channels = last_channel\n\n def forward(self, x):\n return self.dropout(self.classifier(torch.flatten(self.avg_pool(self.features(x)), 1)))\n\n\ndef get_mobilenet_v3(model_name:str, pretrained=True, **kwargs) -> nn.Module:\n \"\"\"Get mobilenet_v3 large model\n\n The changes from mobilenet_v3:\n - change input channel to 16 and last stage structure to avoid redundancy\n - change activation to nn.relu, nn.Hardsigmoid, nn.Hardswish to reduce computational cost\n - apply se unit (larger hidden_dim than efficientnet)\n \"\"\"\n\n mbconfig = partial(MBConvConfig, depth_mult=1.0, width_mult=1.0, norm_layer=nn.BatchNorm2d,\n se_act2=partial(nn.Hardsigmoid, inplace=True), se_reduction_ratio=4, se_reduce_mode='adjust')\n\n if model_name == 'mobilenet_v3_large':\n residual_config = [\n # expand k s in out layers act\n mbconfig(1, 3, 1, 16, 16, 1, act=nn.ReLU, use_se=False),\n mbconfig(4, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(3, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(3, 5, 2, 24, 40, 1, act=nn.ReLU, use_se=True),\n mbconfig(3, 5, 1, 40, 40, 2, act=nn.ReLU, use_se=True),\n mbconfig(6, 3, 2, 40, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.5, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(6, 3, 1, 80, 112, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 3, 1, 112, 112, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 2, 112, 160, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True),\n ]\n last_channel = 1280\n elif model_name == 'mobilenet_v3_small':\n residual_config = [\n # expand k s in out layers act\n mbconfig(1, 3, 2, 16, 16, 1, act=nn.ReLU, use_se=True),\n mbconfig(4.5, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(3.5, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(4, 5, 2, 24, 40, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=True),\n mbconfig(3, 5, 1, 40, 48, 1, act=nn.Hardswish, use_se=True),\n mbconfig(3, 5, 1, 48, 48, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 2, 48, 96, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=True),\n ]\n last_channel = 1024\n\n model = MobileNetV3(residual_config, last_channel=last_channel, block=MBConvSE, act_layer=nn.Hardswish, norm_layer=nn.BatchNorm2d)\n\n mobilenet_v2_init(model)\n\n if pretrained:\n load_from_zoo(model, model_name)\n\n return model",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os
import io
import yaml
from collections import OrderedDict
from rich.console import Console
from malwarebazaar.platform import get_config_path, get_config_dir
class Config(OrderedDict):
instance = None
def __init__(self):
ec = Console(stderr=True, style="bold red")
Config.ensure_path(ec)
config_file = get_config_path()
if not os.path.exists(config_file) or os.path.getsize(config_file) == 0:
ec.print("Config does not exist, please run the init command.")
exit(-1)
with io.open(config_file, "r") as handle:
config_data = yaml.load(handle.read(), Loader=yaml.Loader)
super().__init__(**config_data)
@staticmethod
def get_instance():
if not Config.instance:
return Config()
return Config.instance
@staticmethod
def ensure_path(ec: Console = Console(stderr=True, style="bold red")):
config_dir = get_config_dir()
if not os.path.exists(config_dir):
os.mkdir(config_dir)
if not os.path.isdir(config_dir):
ec.print(f"{config_dir} should be a dir, but is a file.")
exit(-1)
@staticmethod
def init_config(key: str):
Config.ensure_path()
with io.open(get_config_path(), "w") as handle:
bytes = handle.write(yaml.dump(
{
"api_key": key,
"csv_columns": {
"md5": "md5_hash",
"sha1": "sha1_hash",
"sha256": "sha256_hash",
"imphash": "imphash",
"signature": "signature",
"tags": "tags"
}
},
Dumper=yaml.Dumper
))
if bytes <= 0:
raise IOError(f"Writing to config file failed.")
return True
|
normal
|
{
"blob_id": "5a9e0b220d2c94aea7e3d67338771cf48c3aec8f",
"index": 6439,
"step-1": "<mask token>\n\n\nclass Config(OrderedDict):\n <mask token>\n\n def __init__(self):\n ec = Console(stderr=True, style='bold red')\n Config.ensure_path(ec)\n config_file = get_config_path()\n if not os.path.exists(config_file) or os.path.getsize(config_file\n ) == 0:\n ec.print('Config does not exist, please run the init command.')\n exit(-1)\n with io.open(config_file, 'r') as handle:\n config_data = yaml.load(handle.read(), Loader=yaml.Loader)\n super().__init__(**config_data)\n\n @staticmethod\n def get_instance():\n if not Config.instance:\n return Config()\n return Config.instance\n\n @staticmethod\n def ensure_path(ec: Console=Console(stderr=True, style='bold red')):\n config_dir = get_config_dir()\n if not os.path.exists(config_dir):\n os.mkdir(config_dir)\n if not os.path.isdir(config_dir):\n ec.print(f'{config_dir} should be a dir, but is a file.')\n exit(-1)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Config(OrderedDict):\n <mask token>\n\n def __init__(self):\n ec = Console(stderr=True, style='bold red')\n Config.ensure_path(ec)\n config_file = get_config_path()\n if not os.path.exists(config_file) or os.path.getsize(config_file\n ) == 0:\n ec.print('Config does not exist, please run the init command.')\n exit(-1)\n with io.open(config_file, 'r') as handle:\n config_data = yaml.load(handle.read(), Loader=yaml.Loader)\n super().__init__(**config_data)\n\n @staticmethod\n def get_instance():\n if not Config.instance:\n return Config()\n return Config.instance\n\n @staticmethod\n def ensure_path(ec: Console=Console(stderr=True, style='bold red')):\n config_dir = get_config_dir()\n if not os.path.exists(config_dir):\n os.mkdir(config_dir)\n if not os.path.isdir(config_dir):\n ec.print(f'{config_dir} should be a dir, but is a file.')\n exit(-1)\n\n @staticmethod\n def init_config(key: str):\n Config.ensure_path()\n with io.open(get_config_path(), 'w') as handle:\n bytes = handle.write(yaml.dump({'api_key': key, 'csv_columns':\n {'md5': 'md5_hash', 'sha1': 'sha1_hash', 'sha256':\n 'sha256_hash', 'imphash': 'imphash', 'signature':\n 'signature', 'tags': 'tags'}}, Dumper=yaml.Dumper))\n if bytes <= 0:\n raise IOError(f'Writing to config file failed.')\n return True\n",
"step-3": "<mask token>\n\n\nclass Config(OrderedDict):\n instance = None\n\n def __init__(self):\n ec = Console(stderr=True, style='bold red')\n Config.ensure_path(ec)\n config_file = get_config_path()\n if not os.path.exists(config_file) or os.path.getsize(config_file\n ) == 0:\n ec.print('Config does not exist, please run the init command.')\n exit(-1)\n with io.open(config_file, 'r') as handle:\n config_data = yaml.load(handle.read(), Loader=yaml.Loader)\n super().__init__(**config_data)\n\n @staticmethod\n def get_instance():\n if not Config.instance:\n return Config()\n return Config.instance\n\n @staticmethod\n def ensure_path(ec: Console=Console(stderr=True, style='bold red')):\n config_dir = get_config_dir()\n if not os.path.exists(config_dir):\n os.mkdir(config_dir)\n if not os.path.isdir(config_dir):\n ec.print(f'{config_dir} should be a dir, but is a file.')\n exit(-1)\n\n @staticmethod\n def init_config(key: str):\n Config.ensure_path()\n with io.open(get_config_path(), 'w') as handle:\n bytes = handle.write(yaml.dump({'api_key': key, 'csv_columns':\n {'md5': 'md5_hash', 'sha1': 'sha1_hash', 'sha256':\n 'sha256_hash', 'imphash': 'imphash', 'signature':\n 'signature', 'tags': 'tags'}}, Dumper=yaml.Dumper))\n if bytes <= 0:\n raise IOError(f'Writing to config file failed.')\n return True\n",
"step-4": "import os\nimport io\nimport yaml\nfrom collections import OrderedDict\nfrom rich.console import Console\nfrom malwarebazaar.platform import get_config_path, get_config_dir\n\n\nclass Config(OrderedDict):\n instance = None\n\n def __init__(self):\n ec = Console(stderr=True, style='bold red')\n Config.ensure_path(ec)\n config_file = get_config_path()\n if not os.path.exists(config_file) or os.path.getsize(config_file\n ) == 0:\n ec.print('Config does not exist, please run the init command.')\n exit(-1)\n with io.open(config_file, 'r') as handle:\n config_data = yaml.load(handle.read(), Loader=yaml.Loader)\n super().__init__(**config_data)\n\n @staticmethod\n def get_instance():\n if not Config.instance:\n return Config()\n return Config.instance\n\n @staticmethod\n def ensure_path(ec: Console=Console(stderr=True, style='bold red')):\n config_dir = get_config_dir()\n if not os.path.exists(config_dir):\n os.mkdir(config_dir)\n if not os.path.isdir(config_dir):\n ec.print(f'{config_dir} should be a dir, but is a file.')\n exit(-1)\n\n @staticmethod\n def init_config(key: str):\n Config.ensure_path()\n with io.open(get_config_path(), 'w') as handle:\n bytes = handle.write(yaml.dump({'api_key': key, 'csv_columns':\n {'md5': 'md5_hash', 'sha1': 'sha1_hash', 'sha256':\n 'sha256_hash', 'imphash': 'imphash', 'signature':\n 'signature', 'tags': 'tags'}}, Dumper=yaml.Dumper))\n if bytes <= 0:\n raise IOError(f'Writing to config file failed.')\n return True\n",
"step-5": "import os\nimport io\nimport yaml\nfrom collections import OrderedDict\n\nfrom rich.console import Console\n\nfrom malwarebazaar.platform import get_config_path, get_config_dir\n\n\nclass Config(OrderedDict):\n instance = None\n\n def __init__(self):\n ec = Console(stderr=True, style=\"bold red\")\n Config.ensure_path(ec)\n config_file = get_config_path()\n if not os.path.exists(config_file) or os.path.getsize(config_file) == 0:\n ec.print(\"Config does not exist, please run the init command.\")\n exit(-1)\n\n with io.open(config_file, \"r\") as handle:\n config_data = yaml.load(handle.read(), Loader=yaml.Loader)\n\n super().__init__(**config_data)\n\n @staticmethod\n def get_instance():\n if not Config.instance:\n return Config()\n return Config.instance\n\n @staticmethod\n def ensure_path(ec: Console = Console(stderr=True, style=\"bold red\")):\n config_dir = get_config_dir()\n\n if not os.path.exists(config_dir):\n os.mkdir(config_dir)\n\n if not os.path.isdir(config_dir):\n ec.print(f\"{config_dir} should be a dir, but is a file.\")\n exit(-1)\n\n @staticmethod\n def init_config(key: str):\n Config.ensure_path()\n with io.open(get_config_path(), \"w\") as handle:\n bytes = handle.write(yaml.dump(\n {\n \"api_key\": key,\n \"csv_columns\": {\n \"md5\": \"md5_hash\",\n \"sha1\": \"sha1_hash\",\n \"sha256\": \"sha256_hash\",\n \"imphash\": \"imphash\",\n \"signature\": \"signature\",\n \"tags\": \"tags\"\n }\n },\n Dumper=yaml.Dumper\n ))\n\n if bytes <= 0:\n raise IOError(f\"Writing to config file failed.\")\n return True\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# @Time : 2019/12/12 15:54
# @Author : Libuda
# @FileName: 远程服务器文件监控.py
# @Software: PyCharm
import itchat
@itchat.msg_register(itchat.content.TEXT)
def text_reply(msg):
return msg.text
itchat.auto_login()
itchat.run()
|
normal
|
{
"blob_id": "2b87b8571664989e78790bd9df23eee9cbd44035",
"index": 1363,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@itchat.msg_register(itchat.content.TEXT)\ndef text_reply(msg):\n return msg.text\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@itchat.msg_register(itchat.content.TEXT)\ndef text_reply(msg):\n return msg.text\n\n\nitchat.auto_login()\nitchat.run()\n",
"step-4": "import itchat\n\n\n@itchat.msg_register(itchat.content.TEXT)\ndef text_reply(msg):\n return msg.text\n\n\nitchat.auto_login()\nitchat.run()\n",
"step-5": "# @Time : 2019/12/12 15:54\n# @Author : Libuda\n# @FileName: 远程服务器文件监控.py\n# @Software: PyCharm\nimport itchat\n\n\n@itchat.msg_register(itchat.content.TEXT)\ndef text_reply(msg):\n return msg.text\n\n\nitchat.auto_login()\nitchat.run()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
import os
import random
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
def file_len(file):
initial = file.tell()
file.seek(0, os.SEEK_END)
size = file.tell()
file.seek(initial)
return size
def run():
rand_seed = None
stderr_filename = None
stdout_filename = None
if len(sys.argv) >= 4:
rand_seed = int(sys.argv[3])
if len(sys.argv) >= 3:
stderr_filename = sys.argv[2]
if len(sys.argv) >= 2:
stdout_filename = sys.argv[1]
stdout_file = None
stderr_file = None
if stdout_filename:
stdout_file = open(stdout_filename, 'r')
else:
stdout_file = StringIO()
if stderr_filename:
stderr_file = open(stderr_filename, 'r')
else:
stderr_file = StringIO()
if not rand_seed:
sys.stdout.write(stdout_file.read())
sys.stderr.write(stderr_file.read())
else:
random.seed(rand_seed)
stdout_len = file_len(stdout_file)
stdout_eof = False
stderr_eof = False
while not stdout_eof or not stderr_eof:
if not stdout_eof:
r = random.randrange(stdout_len / 4)
data = stdout_file.read(r)
if len(data) < r:
stdout_eof = True
sys.stdout.write(data)
if not stderr_eof:
r = random.randrange(stdout_len / 4)
data = stderr_file.read(r)
if len(data) < r:
stderr_eof = True
sys.stderr.write(data)
if __name__ == '__main__':
run()
|
normal
|
{
"blob_id": "b7db0d2f4bbbc2c7763b9d2e6bede74979b65161",
"index": 4283,
"step-1": "<mask token>\n\n\ndef run():\n rand_seed = None\n stderr_filename = None\n stdout_filename = None\n if len(sys.argv) >= 4:\n rand_seed = int(sys.argv[3])\n if len(sys.argv) >= 3:\n stderr_filename = sys.argv[2]\n if len(sys.argv) >= 2:\n stdout_filename = sys.argv[1]\n stdout_file = None\n stderr_file = None\n if stdout_filename:\n stdout_file = open(stdout_filename, 'r')\n else:\n stdout_file = StringIO()\n if stderr_filename:\n stderr_file = open(stderr_filename, 'r')\n else:\n stderr_file = StringIO()\n if not rand_seed:\n sys.stdout.write(stdout_file.read())\n sys.stderr.write(stderr_file.read())\n else:\n random.seed(rand_seed)\n stdout_len = file_len(stdout_file)\n stdout_eof = False\n stderr_eof = False\n while not stdout_eof or not stderr_eof:\n if not stdout_eof:\n r = random.randrange(stdout_len / 4)\n data = stdout_file.read(r)\n if len(data) < r:\n stdout_eof = True\n sys.stdout.write(data)\n if not stderr_eof:\n r = random.randrange(stdout_len / 4)\n data = stderr_file.read(r)\n if len(data) < r:\n stderr_eof = True\n sys.stderr.write(data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef file_len(file):\n initial = file.tell()\n file.seek(0, os.SEEK_END)\n size = file.tell()\n file.seek(initial)\n return size\n\n\ndef run():\n rand_seed = None\n stderr_filename = None\n stdout_filename = None\n if len(sys.argv) >= 4:\n rand_seed = int(sys.argv[3])\n if len(sys.argv) >= 3:\n stderr_filename = sys.argv[2]\n if len(sys.argv) >= 2:\n stdout_filename = sys.argv[1]\n stdout_file = None\n stderr_file = None\n if stdout_filename:\n stdout_file = open(stdout_filename, 'r')\n else:\n stdout_file = StringIO()\n if stderr_filename:\n stderr_file = open(stderr_filename, 'r')\n else:\n stderr_file = StringIO()\n if not rand_seed:\n sys.stdout.write(stdout_file.read())\n sys.stderr.write(stderr_file.read())\n else:\n random.seed(rand_seed)\n stdout_len = file_len(stdout_file)\n stdout_eof = False\n stderr_eof = False\n while not stdout_eof or not stderr_eof:\n if not stdout_eof:\n r = random.randrange(stdout_len / 4)\n data = stdout_file.read(r)\n if len(data) < r:\n stdout_eof = True\n sys.stdout.write(data)\n if not stderr_eof:\n r = random.randrange(stdout_len / 4)\n data = stderr_file.read(r)\n if len(data) < r:\n stderr_eof = True\n sys.stderr.write(data)\n\n\n<mask token>\n",
"step-3": "<mask token>\nif sys.version_info[0] < 3:\n from StringIO import StringIO\nelse:\n from io import StringIO\n\n\ndef file_len(file):\n initial = file.tell()\n file.seek(0, os.SEEK_END)\n size = file.tell()\n file.seek(initial)\n return size\n\n\ndef run():\n rand_seed = None\n stderr_filename = None\n stdout_filename = None\n if len(sys.argv) >= 4:\n rand_seed = int(sys.argv[3])\n if len(sys.argv) >= 3:\n stderr_filename = sys.argv[2]\n if len(sys.argv) >= 2:\n stdout_filename = sys.argv[1]\n stdout_file = None\n stderr_file = None\n if stdout_filename:\n stdout_file = open(stdout_filename, 'r')\n else:\n stdout_file = StringIO()\n if stderr_filename:\n stderr_file = open(stderr_filename, 'r')\n else:\n stderr_file = StringIO()\n if not rand_seed:\n sys.stdout.write(stdout_file.read())\n sys.stderr.write(stderr_file.read())\n else:\n random.seed(rand_seed)\n stdout_len = file_len(stdout_file)\n stdout_eof = False\n stderr_eof = False\n while not stdout_eof or not stderr_eof:\n if not stdout_eof:\n r = random.randrange(stdout_len / 4)\n data = stdout_file.read(r)\n if len(data) < r:\n stdout_eof = True\n sys.stdout.write(data)\n if not stderr_eof:\n r = random.randrange(stdout_len / 4)\n data = stderr_file.read(r)\n if len(data) < r:\n stderr_eof = True\n sys.stderr.write(data)\n\n\nif __name__ == '__main__':\n run()\n",
"step-4": "import sys\nimport os\nimport random\nif sys.version_info[0] < 3:\n from StringIO import StringIO\nelse:\n from io import StringIO\n\n\ndef file_len(file):\n initial = file.tell()\n file.seek(0, os.SEEK_END)\n size = file.tell()\n file.seek(initial)\n return size\n\n\ndef run():\n rand_seed = None\n stderr_filename = None\n stdout_filename = None\n if len(sys.argv) >= 4:\n rand_seed = int(sys.argv[3])\n if len(sys.argv) >= 3:\n stderr_filename = sys.argv[2]\n if len(sys.argv) >= 2:\n stdout_filename = sys.argv[1]\n stdout_file = None\n stderr_file = None\n if stdout_filename:\n stdout_file = open(stdout_filename, 'r')\n else:\n stdout_file = StringIO()\n if stderr_filename:\n stderr_file = open(stderr_filename, 'r')\n else:\n stderr_file = StringIO()\n if not rand_seed:\n sys.stdout.write(stdout_file.read())\n sys.stderr.write(stderr_file.read())\n else:\n random.seed(rand_seed)\n stdout_len = file_len(stdout_file)\n stdout_eof = False\n stderr_eof = False\n while not stdout_eof or not stderr_eof:\n if not stdout_eof:\n r = random.randrange(stdout_len / 4)\n data = stdout_file.read(r)\n if len(data) < r:\n stdout_eof = True\n sys.stdout.write(data)\n if not stderr_eof:\n r = random.randrange(stdout_len / 4)\n data = stderr_file.read(r)\n if len(data) < r:\n stderr_eof = True\n sys.stderr.write(data)\n\n\nif __name__ == '__main__':\n run()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
#!/usr/bin/python3
"""
Requests username and tasks from JSON Placeholder
based on userid (which is sys.argv[1])
"""
import json
import requests
import sys
if __name__ == "__main__":
url = "https://jsonplaceholder.typicode.com"
if len(sys.argv) > 1:
user_id = sys.argv[1]
name = requests.get("{}/users/{}".format(
url, user_id)).json().get("name")
r = requests.get("{}/todos?userId={}".format(
url, user_id)).json()
tasks_completed = []
for task in r:
if task.get("completed") is True:
tasks_completed.append(task)
print("Employee {} is done with tasks({:d}/{:d}):".format(
name, len(tasks_completed), len(r)))
if len(tasks_completed) > 0:
for task in tasks_completed:
print("\t {}".format(task.get("title")))
|
normal
|
{
"blob_id": "e1a2b33a1ec7aca21a157895d8c7c5b5f29ff49c",
"index": 5047,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n url = 'https://jsonplaceholder.typicode.com'\n if len(sys.argv) > 1:\n user_id = sys.argv[1]\n name = requests.get('{}/users/{}'.format(url, user_id)).json().get(\n 'name')\n r = requests.get('{}/todos?userId={}'.format(url, user_id)).json()\n tasks_completed = []\n for task in r:\n if task.get('completed') is True:\n tasks_completed.append(task)\n print('Employee {} is done with tasks({:d}/{:d}):'.format(name, len\n (tasks_completed), len(r)))\n if len(tasks_completed) > 0:\n for task in tasks_completed:\n print('\\t {}'.format(task.get('title')))\n",
"step-3": "<mask token>\nimport json\nimport requests\nimport sys\nif __name__ == '__main__':\n url = 'https://jsonplaceholder.typicode.com'\n if len(sys.argv) > 1:\n user_id = sys.argv[1]\n name = requests.get('{}/users/{}'.format(url, user_id)).json().get(\n 'name')\n r = requests.get('{}/todos?userId={}'.format(url, user_id)).json()\n tasks_completed = []\n for task in r:\n if task.get('completed') is True:\n tasks_completed.append(task)\n print('Employee {} is done with tasks({:d}/{:d}):'.format(name, len\n (tasks_completed), len(r)))\n if len(tasks_completed) > 0:\n for task in tasks_completed:\n print('\\t {}'.format(task.get('title')))\n",
"step-4": "#!/usr/bin/python3\n\"\"\"\nRequests username and tasks from JSON Placeholder\nbased on userid (which is sys.argv[1])\n\"\"\"\nimport json\nimport requests\nimport sys\n\n\nif __name__ == \"__main__\":\n url = \"https://jsonplaceholder.typicode.com\"\n if len(sys.argv) > 1:\n user_id = sys.argv[1]\n name = requests.get(\"{}/users/{}\".format(\n url, user_id)).json().get(\"name\")\n r = requests.get(\"{}/todos?userId={}\".format(\n url, user_id)).json()\n tasks_completed = []\n for task in r:\n if task.get(\"completed\") is True:\n tasks_completed.append(task)\n print(\"Employee {} is done with tasks({:d}/{:d}):\".format(\n name, len(tasks_completed), len(r)))\n if len(tasks_completed) > 0:\n for task in tasks_completed:\n print(\"\\t {}\".format(task.get(\"title\")))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from foods.fruits import *
orange.eat()
apple.eat()
|
normal
|
{
"blob_id": "ad84a5bfcf82dff1f4a7e8f08f3c4243ad24de52",
"index": 7318,
"step-1": "<mask token>\n",
"step-2": "<mask token>\norange.eat()\napple.eat()\n",
"step-3": "from foods.fruits import *\norange.eat()\napple.eat()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from typing import List
import scrapy
from cssselect import Selector
class RwidSpider(scrapy.Spider):
name = 'rwid'
allowed_domains = ['0.0.0.0']
# REQUEST LOGIN DARI URLS
start_urls = ['http://0.0.0.0:9999/']
# LOGIN DISINI
def parse(self, response):
# apa bedanya yield & return
# yield {"title": response.css("title::text").get()}
# cek di inspect element perlu login tidak?
data = {
"username": "user",
"password": "user12345"
}
# cek di FormRequest butuhnya apa aja
return scrapy.FormRequest(
url="http://0.0.0.0:9999/login",
formdata=data,
callback=self.after_login # untuk mengektraksi data
)
def after_login(self, response):
"""
Ada 2 Task disini :
1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)
2. Ambil semua link next -> akan balik ke self.after_login
:param response:
:return:
"""
# get detail product
detail_products: List[Selector] = response.css(".card .card-title a")
for detail in detail_products:
href = detail.attrib.get("href") # untuk mendapatkan urls
yield response.follow(href, callback=self.parse_detail) # masukkan urls ini ke antrian scrapy
yield {"title": response.css("title::text").get()}
def parse_detail(self, response):
yield {"title": response.css("title::text").get()}
|
normal
|
{
"blob_id": "2185d332f7cd4cbf17d6b72a19297d156c2182a1",
"index": 2233,
"step-1": "<mask token>\n\n\nclass RwidSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n data = {'username': 'user', 'password': 'user12345'}\n return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata\n =data, callback=self.after_login)\n <mask token>\n\n def parse_detail(self, response):\n yield {'title': response.css('title::text').get()}\n",
"step-2": "<mask token>\n\n\nclass RwidSpider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n data = {'username': 'user', 'password': 'user12345'}\n return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata\n =data, callback=self.after_login)\n\n def after_login(self, response):\n \"\"\"\n Ada 2 Task disini :\n 1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)\n 2. Ambil semua link next -> akan balik ke self.after_login\n\n :param response:\n :return:\n \"\"\"\n detail_products: List[Selector] = response.css('.card .card-title a')\n for detail in detail_products:\n href = detail.attrib.get('href')\n yield response.follow(href, callback=self.parse_detail)\n yield {'title': response.css('title::text').get()}\n\n def parse_detail(self, response):\n yield {'title': response.css('title::text').get()}\n",
"step-3": "<mask token>\n\n\nclass RwidSpider(scrapy.Spider):\n name = 'rwid'\n allowed_domains = ['0.0.0.0']\n start_urls = ['http://0.0.0.0:9999/']\n\n def parse(self, response):\n data = {'username': 'user', 'password': 'user12345'}\n return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata\n =data, callback=self.after_login)\n\n def after_login(self, response):\n \"\"\"\n Ada 2 Task disini :\n 1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)\n 2. Ambil semua link next -> akan balik ke self.after_login\n\n :param response:\n :return:\n \"\"\"\n detail_products: List[Selector] = response.css('.card .card-title a')\n for detail in detail_products:\n href = detail.attrib.get('href')\n yield response.follow(href, callback=self.parse_detail)\n yield {'title': response.css('title::text').get()}\n\n def parse_detail(self, response):\n yield {'title': response.css('title::text').get()}\n",
"step-4": "from typing import List\nimport scrapy\nfrom cssselect import Selector\n\n\nclass RwidSpider(scrapy.Spider):\n name = 'rwid'\n allowed_domains = ['0.0.0.0']\n start_urls = ['http://0.0.0.0:9999/']\n\n def parse(self, response):\n data = {'username': 'user', 'password': 'user12345'}\n return scrapy.FormRequest(url='http://0.0.0.0:9999/login', formdata\n =data, callback=self.after_login)\n\n def after_login(self, response):\n \"\"\"\n Ada 2 Task disini :\n 1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)\n 2. Ambil semua link next -> akan balik ke self.after_login\n\n :param response:\n :return:\n \"\"\"\n detail_products: List[Selector] = response.css('.card .card-title a')\n for detail in detail_products:\n href = detail.attrib.get('href')\n yield response.follow(href, callback=self.parse_detail)\n yield {'title': response.css('title::text').get()}\n\n def parse_detail(self, response):\n yield {'title': response.css('title::text').get()}\n",
"step-5": "from typing import List\n\nimport scrapy\nfrom cssselect import Selector\n\nclass RwidSpider(scrapy.Spider):\n name = 'rwid'\n allowed_domains = ['0.0.0.0']\n\n # REQUEST LOGIN DARI URLS\n start_urls = ['http://0.0.0.0:9999/']\n\n # LOGIN DISINI\n def parse(self, response):\n # apa bedanya yield & return\n # yield {\"title\": response.css(\"title::text\").get()}\n\n # cek di inspect element perlu login tidak?\n\n data = {\n \"username\": \"user\",\n \"password\": \"user12345\"\n }\n\n # cek di FormRequest butuhnya apa aja\n return scrapy.FormRequest(\n url=\"http://0.0.0.0:9999/login\",\n formdata=data,\n callback=self.after_login # untuk mengektraksi data\n )\n\n def after_login(self, response):\n \"\"\"\n Ada 2 Task disini :\n 1. Ambil semua data barang yang ada dihalaman hasil -> akan menuju detail (parsing detail)\n 2. Ambil semua link next -> akan balik ke self.after_login\n\n :param response:\n :return:\n \"\"\"\n\n # get detail product\n detail_products: List[Selector] = response.css(\".card .card-title a\")\n for detail in detail_products:\n href = detail.attrib.get(\"href\") # untuk mendapatkan urls\n yield response.follow(href, callback=self.parse_detail) # masukkan urls ini ke antrian scrapy\n\n yield {\"title\": response.css(\"title::text\").get()}\n\n def parse_detail(self, response):\n yield {\"title\": response.css(\"title::text\").get()}\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import sys
prop = float(sys.argv[1])
def kind(n):
s = str(n)
l = len(s)
i = 0
j = i + 1
decr, bouncy, incr = False, False, False
while j < l:
a = int(s[i])
b = int(s[j])
if s[i] > s[j]:
decr = True
elif s[i] < s[j]:
incr = True
i += 1
j += 1
if decr and incr:
return True
return False
def calc(prop):
currentProp = 0
i = 100
countBouncy = 0
while currentProp < prop:
if kind(i):
countBouncy += 1
currentProp = (countBouncy * 100) / i
if currentProp >= prop:
return i
i += 1
return "Proportion was not reached."
calc(prop)
|
normal
|
{
"blob_id": "0de27101675eb8328d9a2831ed468a969b03e7d3",
"index": 5741,
"step-1": "<mask token>\n\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = countBouncy * 100 / i\n if currentProp >= prop:\n return i\n i += 1\n return 'Proportion was not reached.'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = countBouncy * 100 / i\n if currentProp >= prop:\n return i\n i += 1\n return 'Proportion was not reached.'\n\n\ncalc(prop)\n",
"step-3": "<mask token>\nprop = float(sys.argv[1])\n\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = countBouncy * 100 / i\n if currentProp >= prop:\n return i\n i += 1\n return 'Proportion was not reached.'\n\n\ncalc(prop)\n",
"step-4": "import sys\nprop = float(sys.argv[1])\n\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = countBouncy * 100 / i\n if currentProp >= prop:\n return i\n i += 1\n return 'Proportion was not reached.'\n\n\ncalc(prop)\n",
"step-5": "import sys\n\nprop = float(sys.argv[1])\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = (countBouncy * 100) / i\n if currentProp >= prop:\n return i\n i += 1\n return \"Proportion was not reached.\"\n\ncalc(prop)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# coding: utf-8
import logging
from flask import request
from flask.ext.admin import expose
from cores.actions import action
from cores.adminweb import BaseHandler
from dao.bannerdao import banner
from extends import csrf
from libs.flask_login import login_required
from utils.function_data_flow import flow_tools
from utils.helpers import utf8
from utils.numbering import numbers
__author__ = 'bin wen'
_log = logging.getLogger("ADMIN")
_handler_log = logging.getLogger("HANDLER")
class BannerHandler(BaseHandler):
"""
轮播焦点图列表
"""
column_list = ("banner_code", "name", "banner_type", "target", "image", 'validity',
"updated_time", "remark")
column_labels = {
"banner_code": u"编号",
"name": u"名称",
"banner_type": u"类型",
"target": u"跳转目标",
"image": u"图片",
"validity": u"状态",
"updated_time": u"变更时间",
"remark": u"备注"
}
column_widget_args = {
"image": {'class': "hidden-480"},
"remark": {'class': "hidden-480"}
}
tabs_list = (
{"query_type": -1, "name": u"全部"},
{"query_type": 1, "name": u"有效的"},
{"query_type": 0, "name": u"已作废"}
)
@expose('/')
@expose('/banner/list.html')
@login_required
def list_view(self):
page = request.args.get('page', 0, type=int)
name = request.args.get('name', "")
query_type = request.args.get('query_type', -1, type=int)
query_kwargs = dict(name=name, query_type=query_type)
def pager_url(p):
if p is None:
p = 0
return self._get_url('.list_view', p, **query_kwargs)
count = banner.get_total_count(**query_kwargs)
results = []
num_pages = 0
if count > 0:
num_pages = self.gen_total_pages(count)
if num_pages - 1 < page:
page -= 1
offset_value = page * self.page_size
results = banner.query_list(
query_type=query_type,
name=name,
limit=self.page_size,
offset=offset_value
)
actions, actions_confirmation = self.get_actions_list()
return_url = self.gen_return_url(".list_view", page=page, **query_kwargs)
return self.render(
template="banner/list.html",
actions=actions,
actions_confirmation=actions_confirmation,
count=count,
page=page,
num_pages=num_pages,
pager_url=pager_url,
data=results,
query_kwargs=query_kwargs,
return_url=return_url,
column_list=self.column_list,
column_labels=self.column_labels,
column_widget_args=self.column_widget_args,
tabs_list=self.tabs_list,
banner_types=flow_tools.gen_banner_type()
)
@expose('/banner/action.html', methods=('POST',))
@login_required
def action_view(self):
return_url = request.form.get("return_url", "")
return self.handle_action(return_view=return_url)
@action('disable', u"注销(下架)所选", u"你确定要注销(下架)所选的记录?")
def action_disable(self, ids):
try:
result = banner.set_validity(ids, validity=0)
_handler_log.info(u"[BannerListHandler] batch disable, id:{}, operator: {}".format(
utf8(ids), self.current_operator)
)
return result
except Exception as e:
_log.exception(u"[BannerListHandler] batch disable error")
@action('activate', u"激活(上架)选择", u"你确定要激活所选的记录?")
def action_activate(self, ids):
try:
result = banner.set_validity(ids, validity=1)
_handler_log.info(u"[BannerListHandler] batch disable, id:{}, operator: {}".format(
utf8(ids), self.current_operator)
)
return result
except Exception as e:
_log.exception(u"[BannerListHandler] batch disable error")
@action('delete', u"删除所选", u"你确定要删除所选的记录?")
def action_delete(self, ids):
try:
result = banner.delete(ids)
_handler_log.info(u"[BannerListHandler] batch delete, id:{}, operator: {}".format(
utf8(ids), self.current_operator)
)
return result
except Exception as e:
_log.exception(u"[BannerListHandler] batch delete error")
@expose('/banner/create.html', methods=('GET', 'POST'))
@login_required
def create_view(self):
if request.method == "GET":
select_content_list = flow_tools.gen_bind_products()
result = {
"select_content_list": select_content_list,
"banner_types": flow_tools.gen_banner_type()
}
return self.render(template="banner/create.html", data=result)
else:
req_data = self.gen_arguments
name = req_data.get("name")
banner_type = int(req_data.get("banner_type", 0))
url_target = req_data.get("url_target", "") # 外部url
select_target = req_data.get("select_target", "") # 下拉内容
remark = req_data.get("remark", "")
picture_url_list = req_data.getlist("picture_url") # 图片url
if not picture_url_list:
return self.make_write(result_code=4002)
if banner_type == 2:
target = url_target
else:
target = select_target
result = banner.save(
banner_code=numbers.gen_banner_code(),
name=name,
banner_type=banner_type,
target=target,
image_url=picture_url_list[0],
remark=remark
)
return self.make_write(result_code=0, result_data=self.reverse_url(".list_view"))
@expose('/banner/edit.html', methods=('GET', 'POST'))
@login_required
def edit_view(self):
if request.method == "GET":
_id = request.args.get("id", "")
return_url = request.args.get("return_url", "")
result = banner.get_detail(_id)
banner_type = result.banner_type
select_content_list = []
if banner_type == 0:
select_content_list = flow_tools.gen_bind_products()
elif banner_type == 1:
select_content_list = flow_tools.gen_bind_tweets()
elif banner_type == 3:
select_content_list = flow_tools.gen_bind_groups()
result["banner_types"] = flow_tools.gen_banner_type()
result["select_content_list"] = select_content_list
return self.render(
template="banner/edit.html",
data=result,
return_url=return_url
)
else:
req_data = self.gen_arguments
return_url = req_data.get("return_url", "")
_id = req_data.get("id")
name = req_data.get("name")
banner_type = int(req_data.get("banner_type", 0))
url_target = req_data.get("url_target", "") # 外部url
select_target = req_data.get("select_target", "") # 下拉内容
remark = req_data.get("remark", "")
picture_url_list = req_data.getlist("picture_url") # 图片url
if not picture_url_list:
return self.make_write(result_code=4002)
if banner_type == 2:
target = url_target
else:
target = select_target
result = banner.update(
_id=_id,
name=name,
banner_type=banner_type,
target=target,
image_url=picture_url_list[0],
remark=remark
)
return self.make_write(result_code=0, result_data=self.decode_return_url(return_url))
@expose('/banner/delete.html', methods=('POST',))
@login_required
def delete_view(self):
req_data = self.gen_arguments
return_url = req_data.get("return_url", "")
_id = req_data.get("id")
result = banner.delete([_id])
_handler_log.exception(u"[AdminDeleteHandler] admin_id:{}, operator: {}".format(
utf8(_id), self.current_operator))
return self.make_write(result_code=0, result_data=self.decode_return_url(return_url))
@expose('/banner/detail.html', methods=('GET',))
@login_required
def detail_view(self):
pass
@csrf.exempt
@expose('/banner/ajax/check.html', methods=('POST',))
def check_view(self):
pass
|
normal
|
{
"blob_id": "d80cb5ea57faa0f9e3a8dd5d40c9852c2f7f83e4",
"index": 4586,
"step-1": "<mask token>\n\n\nclass BannerHandler(BaseHandler):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @expose('/banner/action.html', methods=('POST',))\n @login_required\n def action_view(self):\n return_url = request.form.get('return_url', '')\n return self.handle_action(return_view=return_url)\n\n @action('disable', u'注销(下架)所选', u'你确定要注销(下架)所选的记录?')\n def action_disable(self, ids):\n try:\n result = banner.set_validity(ids, validity=0)\n _handler_log.info(\n u'[BannerListHandler] batch disable, id:{}, operator: {}'.\n format(utf8(ids), self.current_operator))\n return result\n except Exception as e:\n _log.exception(u'[BannerListHandler] batch disable error')\n\n @action('activate', u'激活(上架)选择', u'你确定要激活所选的记录?')\n def action_activate(self, ids):\n try:\n result = banner.set_validity(ids, validity=1)\n _handler_log.info(\n u'[BannerListHandler] batch disable, id:{}, operator: {}'.\n format(utf8(ids), self.current_operator))\n return result\n except Exception as e:\n _log.exception(u'[BannerListHandler] batch disable error')\n <mask token>\n <mask token>\n\n @expose('/banner/edit.html', methods=('GET', 'POST'))\n @login_required\n def edit_view(self):\n if request.method == 'GET':\n _id = request.args.get('id', '')\n return_url = request.args.get('return_url', '')\n result = banner.get_detail(_id)\n banner_type = result.banner_type\n select_content_list = []\n if banner_type == 0:\n select_content_list = flow_tools.gen_bind_products()\n elif banner_type == 1:\n select_content_list = flow_tools.gen_bind_tweets()\n elif banner_type == 3:\n select_content_list = flow_tools.gen_bind_groups()\n result['banner_types'] = flow_tools.gen_banner_type()\n result['select_content_list'] = select_content_list\n return self.render(template='banner/edit.html', data=result,\n return_url=return_url)\n else:\n req_data = self.gen_arguments\n return_url = req_data.get('return_url', '')\n _id = req_data.get('id')\n name = req_data.get('name')\n banner_type = int(req_data.get('banner_type', 0))\n url_target = req_data.get('url_target', '')\n select_target = req_data.get('select_target', '')\n remark = req_data.get('remark', '')\n picture_url_list = req_data.getlist('picture_url')\n if not picture_url_list:\n return self.make_write(result_code=4002)\n if banner_type == 2:\n target = url_target\n else:\n target = select_target\n result = banner.update(_id=_id, name=name, banner_type=\n banner_type, target=target, image_url=picture_url_list[0],\n remark=remark)\n return self.make_write(result_code=0, result_data=self.\n decode_return_url(return_url))\n <mask token>\n\n @expose('/banner/detail.html', methods=('GET',))\n @login_required\n def detail_view(self):\n pass\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BannerHandler(BaseHandler):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @expose('/banner/action.html', methods=('POST',))\n @login_required\n def action_view(self):\n return_url = request.form.get('return_url', '')\n return self.handle_action(return_view=return_url)\n\n @action('disable', u'注销(下架)所选', u'你确定要注销(下架)所选的记录?')\n def action_disable(self, ids):\n try:\n result = banner.set_validity(ids, validity=0)\n _handler_log.info(\n u'[BannerListHandler] batch disable, id:{}, operator: {}'.\n format(utf8(ids), self.current_operator))\n return result\n except Exception as e:\n _log.exception(u'[BannerListHandler] batch disable error')\n\n @action('activate', u'激活(上架)选择', u'你确定要激活所选的记录?')\n def action_activate(self, ids):\n try:\n result = banner.set_validity(ids, validity=1)\n _handler_log.info(\n u'[BannerListHandler] batch disable, id:{}, operator: {}'.\n format(utf8(ids), self.current_operator))\n return result\n except Exception as e:\n _log.exception(u'[BannerListHandler] batch disable error')\n\n @action('delete', u'删除所选', u'你确定要删除所选的记录?')\n def action_delete(self, ids):\n try:\n result = banner.delete(ids)\n _handler_log.info(\n u'[BannerListHandler] batch delete, id:{}, operator: {}'.\n format(utf8(ids), self.current_operator))\n return result\n except Exception as e:\n _log.exception(u'[BannerListHandler] batch delete error')\n\n @expose('/banner/create.html', methods=('GET', 'POST'))\n @login_required\n def create_view(self):\n if request.method == 'GET':\n select_content_list = flow_tools.gen_bind_products()\n result = {'select_content_list': select_content_list,\n 'banner_types': flow_tools.gen_banner_type()}\n return self.render(template='banner/create.html', data=result)\n else:\n req_data = self.gen_arguments\n name = req_data.get('name')\n banner_type = int(req_data.get('banner_type', 0))\n url_target = req_data.get('url_target', '')\n select_target = req_data.get('select_target', '')\n remark = req_data.get('remark', '')\n picture_url_list = req_data.getlist('picture_url')\n if not picture_url_list:\n return self.make_write(result_code=4002)\n if banner_type == 2:\n target = url_target\n else:\n target = select_target\n result = banner.save(banner_code=numbers.gen_banner_code(),\n name=name, banner_type=banner_type, target=target,\n image_url=picture_url_list[0], remark=remark)\n return self.make_write(result_code=0, result_data=self.\n reverse_url('.list_view'))\n\n @expose('/banner/edit.html', methods=('GET', 'POST'))\n @login_required\n def edit_view(self):\n if request.method == 'GET':\n _id = request.args.get('id', '')\n return_url = request.args.get('return_url', '')\n result = banner.get_detail(_id)\n banner_type = result.banner_type\n select_content_list = []\n if banner_type == 0:\n select_content_list = flow_tools.gen_bind_products()\n elif banner_type == 1:\n select_content_list = flow_tools.gen_bind_tweets()\n elif banner_type == 3:\n select_content_list = flow_tools.gen_bind_groups()\n result['banner_types'] = flow_tools.gen_banner_type()\n result['select_content_list'] = select_content_list\n return self.render(template='banner/edit.html', data=result,\n return_url=return_url)\n else:\n req_data = self.gen_arguments\n return_url = req_data.get('return_url', '')\n _id = req_data.get('id')\n name = req_data.get('name')\n banner_type = int(req_data.get('banner_type', 0))\n url_target = req_data.get('url_target', '')\n select_target = req_data.get('select_target', '')\n remark = req_data.get('remark', '')\n picture_url_list = req_data.getlist('picture_url')\n if not picture_url_list:\n return self.make_write(result_code=4002)\n if banner_type == 2:\n target = url_target\n else:\n target = select_target\n result = banner.update(_id=_id, name=name, banner_type=\n banner_type, target=target, image_url=picture_url_list[0],\n remark=remark)\n return self.make_write(result_code=0, result_data=self.\n decode_return_url(return_url))\n <mask token>\n\n @expose('/banner/detail.html', methods=('GET',))\n @login_required\n def detail_view(self):\n pass\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BannerHandler(BaseHandler):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @expose('/banner/action.html', methods=('POST',))\n @login_required\n def action_view(self):\n return_url = request.form.get('return_url', '')\n return self.handle_action(return_view=return_url)\n\n @action('disable', u'注销(下架)所选', u'你确定要注销(下架)所选的记录?')\n def action_disable(self, ids):\n try:\n result = banner.set_validity(ids, validity=0)\n _handler_log.info(\n u'[BannerListHandler] batch disable, id:{}, operator: {}'.\n format(utf8(ids), self.current_operator))\n return result\n except Exception as e:\n _log.exception(u'[BannerListHandler] batch disable error')\n\n @action('activate', u'激活(上架)选择', u'你确定要激活所选的记录?')\n def action_activate(self, ids):\n try:\n result = banner.set_validity(ids, validity=1)\n _handler_log.info(\n u'[BannerListHandler] batch disable, id:{}, operator: {}'.\n format(utf8(ids), self.current_operator))\n return result\n except Exception as e:\n _log.exception(u'[BannerListHandler] batch disable error')\n\n @action('delete', u'删除所选', u'你确定要删除所选的记录?')\n def action_delete(self, ids):\n try:\n result = banner.delete(ids)\n _handler_log.info(\n u'[BannerListHandler] batch delete, id:{}, operator: {}'.\n format(utf8(ids), self.current_operator))\n return result\n except Exception as e:\n _log.exception(u'[BannerListHandler] batch delete error')\n\n @expose('/banner/create.html', methods=('GET', 'POST'))\n @login_required\n def create_view(self):\n if request.method == 'GET':\n select_content_list = flow_tools.gen_bind_products()\n result = {'select_content_list': select_content_list,\n 'banner_types': flow_tools.gen_banner_type()}\n return self.render(template='banner/create.html', data=result)\n else:\n req_data = self.gen_arguments\n name = req_data.get('name')\n banner_type = int(req_data.get('banner_type', 0))\n url_target = req_data.get('url_target', '')\n select_target = req_data.get('select_target', '')\n remark = req_data.get('remark', '')\n picture_url_list = req_data.getlist('picture_url')\n if not picture_url_list:\n return self.make_write(result_code=4002)\n if banner_type == 2:\n target = url_target\n else:\n target = select_target\n result = banner.save(banner_code=numbers.gen_banner_code(),\n name=name, banner_type=banner_type, target=target,\n image_url=picture_url_list[0], remark=remark)\n return self.make_write(result_code=0, result_data=self.\n reverse_url('.list_view'))\n\n @expose('/banner/edit.html', methods=('GET', 'POST'))\n @login_required\n def edit_view(self):\n if request.method == 'GET':\n _id = request.args.get('id', '')\n return_url = request.args.get('return_url', '')\n result = banner.get_detail(_id)\n banner_type = result.banner_type\n select_content_list = []\n if banner_type == 0:\n select_content_list = flow_tools.gen_bind_products()\n elif banner_type == 1:\n select_content_list = flow_tools.gen_bind_tweets()\n elif banner_type == 3:\n select_content_list = flow_tools.gen_bind_groups()\n result['banner_types'] = flow_tools.gen_banner_type()\n result['select_content_list'] = select_content_list\n return self.render(template='banner/edit.html', data=result,\n return_url=return_url)\n else:\n req_data = self.gen_arguments\n return_url = req_data.get('return_url', '')\n _id = req_data.get('id')\n name = req_data.get('name')\n banner_type = int(req_data.get('banner_type', 0))\n url_target = req_data.get('url_target', '')\n select_target = req_data.get('select_target', '')\n remark = req_data.get('remark', '')\n picture_url_list = req_data.getlist('picture_url')\n if not picture_url_list:\n return self.make_write(result_code=4002)\n if banner_type == 2:\n target = url_target\n else:\n target = select_target\n result = banner.update(_id=_id, name=name, banner_type=\n banner_type, target=target, image_url=picture_url_list[0],\n remark=remark)\n return self.make_write(result_code=0, result_data=self.\n decode_return_url(return_url))\n\n @expose('/banner/delete.html', methods=('POST',))\n @login_required\n def delete_view(self):\n req_data = self.gen_arguments\n return_url = req_data.get('return_url', '')\n _id = req_data.get('id')\n result = banner.delete([_id])\n _handler_log.exception(\n u'[AdminDeleteHandler] admin_id:{}, operator: {}'.format(utf8(\n _id), self.current_operator))\n return self.make_write(result_code=0, result_data=self.\n decode_return_url(return_url))\n\n @expose('/banner/detail.html', methods=('GET',))\n @login_required\n def detail_view(self):\n pass\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass BannerHandler(BaseHandler):\n \"\"\"\n 轮播焦点图列表\n \"\"\"\n column_list = ('banner_code', 'name', 'banner_type', 'target', 'image',\n 'validity', 'updated_time', 'remark')\n column_labels = {'banner_code': u'编号', 'name': u'名称', 'banner_type':\n u'类型', 'target': u'跳转目标', 'image': u'图片', 'validity': u'状态',\n 'updated_time': u'变更时间', 'remark': u'备注'}\n column_widget_args = {'image': {'class': 'hidden-480'}, 'remark': {\n 'class': 'hidden-480'}}\n tabs_list = {'query_type': -1, 'name': u'全部'}, {'query_type': 1, 'name':\n u'有效的'}, {'query_type': 0, 'name': u'已作废'}\n\n @expose('/')\n @expose('/banner/list.html')\n @login_required\n def list_view(self):\n page = request.args.get('page', 0, type=int)\n name = request.args.get('name', '')\n query_type = request.args.get('query_type', -1, type=int)\n query_kwargs = dict(name=name, query_type=query_type)\n\n def pager_url(p):\n if p is None:\n p = 0\n return self._get_url('.list_view', p, **query_kwargs)\n count = banner.get_total_count(**query_kwargs)\n results = []\n num_pages = 0\n if count > 0:\n num_pages = self.gen_total_pages(count)\n if num_pages - 1 < page:\n page -= 1\n offset_value = page * self.page_size\n results = banner.query_list(query_type=query_type, name=name,\n limit=self.page_size, offset=offset_value)\n actions, actions_confirmation = self.get_actions_list()\n return_url = self.gen_return_url('.list_view', page=page, **\n query_kwargs)\n return self.render(template='banner/list.html', actions=actions,\n actions_confirmation=actions_confirmation, count=count, page=\n page, num_pages=num_pages, pager_url=pager_url, data=results,\n query_kwargs=query_kwargs, return_url=return_url, column_list=\n self.column_list, column_labels=self.column_labels,\n column_widget_args=self.column_widget_args, tabs_list=self.\n tabs_list, banner_types=flow_tools.gen_banner_type())\n\n @expose('/banner/action.html', methods=('POST',))\n @login_required\n def action_view(self):\n return_url = request.form.get('return_url', '')\n return self.handle_action(return_view=return_url)\n\n @action('disable', u'注销(下架)所选', u'你确定要注销(下架)所选的记录?')\n def action_disable(self, ids):\n try:\n result = banner.set_validity(ids, validity=0)\n _handler_log.info(\n u'[BannerListHandler] batch disable, id:{}, operator: {}'.\n format(utf8(ids), self.current_operator))\n return result\n except Exception as e:\n _log.exception(u'[BannerListHandler] batch disable error')\n\n @action('activate', u'激活(上架)选择', u'你确定要激活所选的记录?')\n def action_activate(self, ids):\n try:\n result = banner.set_validity(ids, validity=1)\n _handler_log.info(\n u'[BannerListHandler] batch disable, id:{}, operator: {}'.\n format(utf8(ids), self.current_operator))\n return result\n except Exception as e:\n _log.exception(u'[BannerListHandler] batch disable error')\n\n @action('delete', u'删除所选', u'你确定要删除所选的记录?')\n def action_delete(self, ids):\n try:\n result = banner.delete(ids)\n _handler_log.info(\n u'[BannerListHandler] batch delete, id:{}, operator: {}'.\n format(utf8(ids), self.current_operator))\n return result\n except Exception as e:\n _log.exception(u'[BannerListHandler] batch delete error')\n\n @expose('/banner/create.html', methods=('GET', 'POST'))\n @login_required\n def create_view(self):\n if request.method == 'GET':\n select_content_list = flow_tools.gen_bind_products()\n result = {'select_content_list': select_content_list,\n 'banner_types': flow_tools.gen_banner_type()}\n return self.render(template='banner/create.html', data=result)\n else:\n req_data = self.gen_arguments\n name = req_data.get('name')\n banner_type = int(req_data.get('banner_type', 0))\n url_target = req_data.get('url_target', '')\n select_target = req_data.get('select_target', '')\n remark = req_data.get('remark', '')\n picture_url_list = req_data.getlist('picture_url')\n if not picture_url_list:\n return self.make_write(result_code=4002)\n if banner_type == 2:\n target = url_target\n else:\n target = select_target\n result = banner.save(banner_code=numbers.gen_banner_code(),\n name=name, banner_type=banner_type, target=target,\n image_url=picture_url_list[0], remark=remark)\n return self.make_write(result_code=0, result_data=self.\n reverse_url('.list_view'))\n\n @expose('/banner/edit.html', methods=('GET', 'POST'))\n @login_required\n def edit_view(self):\n if request.method == 'GET':\n _id = request.args.get('id', '')\n return_url = request.args.get('return_url', '')\n result = banner.get_detail(_id)\n banner_type = result.banner_type\n select_content_list = []\n if banner_type == 0:\n select_content_list = flow_tools.gen_bind_products()\n elif banner_type == 1:\n select_content_list = flow_tools.gen_bind_tweets()\n elif banner_type == 3:\n select_content_list = flow_tools.gen_bind_groups()\n result['banner_types'] = flow_tools.gen_banner_type()\n result['select_content_list'] = select_content_list\n return self.render(template='banner/edit.html', data=result,\n return_url=return_url)\n else:\n req_data = self.gen_arguments\n return_url = req_data.get('return_url', '')\n _id = req_data.get('id')\n name = req_data.get('name')\n banner_type = int(req_data.get('banner_type', 0))\n url_target = req_data.get('url_target', '')\n select_target = req_data.get('select_target', '')\n remark = req_data.get('remark', '')\n picture_url_list = req_data.getlist('picture_url')\n if not picture_url_list:\n return self.make_write(result_code=4002)\n if banner_type == 2:\n target = url_target\n else:\n target = select_target\n result = banner.update(_id=_id, name=name, banner_type=\n banner_type, target=target, image_url=picture_url_list[0],\n remark=remark)\n return self.make_write(result_code=0, result_data=self.\n decode_return_url(return_url))\n\n @expose('/banner/delete.html', methods=('POST',))\n @login_required\n def delete_view(self):\n req_data = self.gen_arguments\n return_url = req_data.get('return_url', '')\n _id = req_data.get('id')\n result = banner.delete([_id])\n _handler_log.exception(\n u'[AdminDeleteHandler] admin_id:{}, operator: {}'.format(utf8(\n _id), self.current_operator))\n return self.make_write(result_code=0, result_data=self.\n decode_return_url(return_url))\n\n @expose('/banner/detail.html', methods=('GET',))\n @login_required\n def detail_view(self):\n pass\n\n @csrf.exempt\n @expose('/banner/ajax/check.html', methods=('POST',))\n def check_view(self):\n pass\n",
"step-5": "# coding: utf-8\nimport logging\n\nfrom flask import request\nfrom flask.ext.admin import expose\n\nfrom cores.actions import action\nfrom cores.adminweb import BaseHandler\nfrom dao.bannerdao import banner\nfrom extends import csrf\nfrom libs.flask_login import login_required\nfrom utils.function_data_flow import flow_tools\nfrom utils.helpers import utf8\nfrom utils.numbering import numbers\n\n__author__ = 'bin wen'\n\n_log = logging.getLogger(\"ADMIN\")\n_handler_log = logging.getLogger(\"HANDLER\")\n\n\nclass BannerHandler(BaseHandler):\n \"\"\"\n 轮播焦点图列表\n \"\"\"\n column_list = (\"banner_code\", \"name\", \"banner_type\", \"target\", \"image\", 'validity',\n \"updated_time\", \"remark\")\n\n column_labels = {\n \"banner_code\": u\"编号\",\n \"name\": u\"名称\",\n \"banner_type\": u\"类型\",\n \"target\": u\"跳转目标\",\n \"image\": u\"图片\",\n \"validity\": u\"状态\",\n \"updated_time\": u\"变更时间\",\n \"remark\": u\"备注\"\n }\n column_widget_args = {\n \"image\": {'class': \"hidden-480\"},\n \"remark\": {'class': \"hidden-480\"}\n }\n tabs_list = (\n {\"query_type\": -1, \"name\": u\"全部\"},\n {\"query_type\": 1, \"name\": u\"有效的\"},\n {\"query_type\": 0, \"name\": u\"已作废\"}\n )\n\n @expose('/')\n @expose('/banner/list.html')\n @login_required\n def list_view(self):\n page = request.args.get('page', 0, type=int)\n name = request.args.get('name', \"\")\n query_type = request.args.get('query_type', -1, type=int)\n\n query_kwargs = dict(name=name, query_type=query_type)\n\n def pager_url(p):\n if p is None:\n p = 0\n\n return self._get_url('.list_view', p, **query_kwargs)\n\n count = banner.get_total_count(**query_kwargs)\n\n results = []\n num_pages = 0\n\n if count > 0:\n num_pages = self.gen_total_pages(count)\n if num_pages - 1 < page:\n page -= 1\n\n offset_value = page * self.page_size\n results = banner.query_list(\n query_type=query_type,\n name=name,\n limit=self.page_size,\n offset=offset_value\n )\n\n actions, actions_confirmation = self.get_actions_list()\n return_url = self.gen_return_url(\".list_view\", page=page, **query_kwargs)\n\n return self.render(\n template=\"banner/list.html\",\n actions=actions,\n actions_confirmation=actions_confirmation,\n count=count,\n page=page,\n num_pages=num_pages,\n pager_url=pager_url,\n data=results,\n query_kwargs=query_kwargs,\n return_url=return_url,\n column_list=self.column_list,\n column_labels=self.column_labels,\n column_widget_args=self.column_widget_args,\n tabs_list=self.tabs_list,\n banner_types=flow_tools.gen_banner_type()\n )\n\n @expose('/banner/action.html', methods=('POST',))\n @login_required\n def action_view(self):\n return_url = request.form.get(\"return_url\", \"\")\n return self.handle_action(return_view=return_url)\n\n @action('disable', u\"注销(下架)所选\", u\"你确定要注销(下架)所选的记录?\")\n def action_disable(self, ids):\n try:\n result = banner.set_validity(ids, validity=0)\n _handler_log.info(u\"[BannerListHandler] batch disable, id:{}, operator: {}\".format(\n utf8(ids), self.current_operator)\n )\n return result\n except Exception as e:\n _log.exception(u\"[BannerListHandler] batch disable error\")\n\n @action('activate', u\"激活(上架)选择\", u\"你确定要激活所选的记录?\")\n def action_activate(self, ids):\n try:\n result = banner.set_validity(ids, validity=1)\n _handler_log.info(u\"[BannerListHandler] batch disable, id:{}, operator: {}\".format(\n utf8(ids), self.current_operator)\n )\n return result\n except Exception as e:\n _log.exception(u\"[BannerListHandler] batch disable error\")\n\n @action('delete', u\"删除所选\", u\"你确定要删除所选的记录?\")\n def action_delete(self, ids):\n try:\n result = banner.delete(ids)\n _handler_log.info(u\"[BannerListHandler] batch delete, id:{}, operator: {}\".format(\n utf8(ids), self.current_operator)\n )\n return result\n except Exception as e:\n _log.exception(u\"[BannerListHandler] batch delete error\")\n\n @expose('/banner/create.html', methods=('GET', 'POST'))\n @login_required\n def create_view(self):\n if request.method == \"GET\":\n select_content_list = flow_tools.gen_bind_products()\n result = {\n \"select_content_list\": select_content_list,\n \"banner_types\": flow_tools.gen_banner_type()\n }\n return self.render(template=\"banner/create.html\", data=result)\n else:\n req_data = self.gen_arguments\n name = req_data.get(\"name\")\n banner_type = int(req_data.get(\"banner_type\", 0))\n url_target = req_data.get(\"url_target\", \"\") # 外部url\n select_target = req_data.get(\"select_target\", \"\") # 下拉内容\n remark = req_data.get(\"remark\", \"\")\n picture_url_list = req_data.getlist(\"picture_url\") # 图片url\n if not picture_url_list:\n return self.make_write(result_code=4002)\n\n if banner_type == 2:\n target = url_target\n else:\n target = select_target\n result = banner.save(\n banner_code=numbers.gen_banner_code(),\n name=name,\n banner_type=banner_type,\n target=target,\n image_url=picture_url_list[0],\n remark=remark\n )\n\n return self.make_write(result_code=0, result_data=self.reverse_url(\".list_view\"))\n\n @expose('/banner/edit.html', methods=('GET', 'POST'))\n @login_required\n def edit_view(self):\n if request.method == \"GET\":\n _id = request.args.get(\"id\", \"\")\n return_url = request.args.get(\"return_url\", \"\")\n result = banner.get_detail(_id)\n banner_type = result.banner_type\n select_content_list = []\n if banner_type == 0:\n select_content_list = flow_tools.gen_bind_products()\n elif banner_type == 1:\n select_content_list = flow_tools.gen_bind_tweets()\n elif banner_type == 3:\n select_content_list = flow_tools.gen_bind_groups()\n\n result[\"banner_types\"] = flow_tools.gen_banner_type()\n result[\"select_content_list\"] = select_content_list\n\n return self.render(\n template=\"banner/edit.html\",\n data=result,\n return_url=return_url\n )\n else:\n req_data = self.gen_arguments\n return_url = req_data.get(\"return_url\", \"\")\n\n _id = req_data.get(\"id\")\n name = req_data.get(\"name\")\n banner_type = int(req_data.get(\"banner_type\", 0))\n url_target = req_data.get(\"url_target\", \"\") # 外部url\n select_target = req_data.get(\"select_target\", \"\") # 下拉内容\n remark = req_data.get(\"remark\", \"\")\n picture_url_list = req_data.getlist(\"picture_url\") # 图片url\n if not picture_url_list:\n return self.make_write(result_code=4002)\n\n if banner_type == 2:\n target = url_target\n else:\n target = select_target\n\n result = banner.update(\n _id=_id,\n name=name,\n banner_type=banner_type,\n target=target,\n image_url=picture_url_list[0],\n remark=remark\n )\n\n return self.make_write(result_code=0, result_data=self.decode_return_url(return_url))\n\n @expose('/banner/delete.html', methods=('POST',))\n @login_required\n def delete_view(self):\n req_data = self.gen_arguments\n return_url = req_data.get(\"return_url\", \"\")\n _id = req_data.get(\"id\")\n result = banner.delete([_id])\n\n _handler_log.exception(u\"[AdminDeleteHandler] admin_id:{}, operator: {}\".format(\n utf8(_id), self.current_operator))\n\n return self.make_write(result_code=0, result_data=self.decode_return_url(return_url))\n\n @expose('/banner/detail.html', methods=('GET',))\n @login_required\n def detail_view(self):\n pass\n\n @csrf.exempt\n @expose('/banner/ajax/check.html', methods=('POST',))\n def check_view(self):\n pass\n\n",
"step-ids": [
6,
8,
9,
13,
16
]
}
|
[
6,
8,
9,
13,
16
] |
'''
Created on Dec 18, 2011
@author: ppa
'''
import unittest
from ultrafinance.pyTaLib.indicator import Sma
class testPyTaLib(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testSma(self):
sma = Sma(period = 3)
expectedAvgs = [1, 1.5, 2, 3, 4]
for index, number in enumerate(range(1, 6) ):
self.assertEqual(expectedAvgs[index], sma(number))
|
normal
|
{
"blob_id": "fcd2bd91dff3193c661d71ade8039765f8498fd4",
"index": 8317,
"step-1": "<mask token>\n\n\nclass testPyTaLib(unittest.TestCase):\n\n def setUp(self):\n pass\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass testPyTaLib(unittest.TestCase):\n\n def setUp(self):\n pass\n <mask token>\n\n def testSma(self):\n sma = Sma(period=3)\n expectedAvgs = [1, 1.5, 2, 3, 4]\n for index, number in enumerate(range(1, 6)):\n self.assertEqual(expectedAvgs[index], sma(number))\n",
"step-3": "<mask token>\n\n\nclass testPyTaLib(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testSma(self):\n sma = Sma(period=3)\n expectedAvgs = [1, 1.5, 2, 3, 4]\n for index, number in enumerate(range(1, 6)):\n self.assertEqual(expectedAvgs[index], sma(number))\n",
"step-4": "<mask token>\nimport unittest\nfrom ultrafinance.pyTaLib.indicator import Sma\n\n\nclass testPyTaLib(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testSma(self):\n sma = Sma(period=3)\n expectedAvgs = [1, 1.5, 2, 3, 4]\n for index, number in enumerate(range(1, 6)):\n self.assertEqual(expectedAvgs[index], sma(number))\n",
"step-5": "'''\nCreated on Dec 18, 2011\n\n@author: ppa\n'''\nimport unittest\nfrom ultrafinance.pyTaLib.indicator import Sma\n\nclass testPyTaLib(unittest.TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testSma(self):\n sma = Sma(period = 3)\n expectedAvgs = [1, 1.5, 2, 3, 4]\n for index, number in enumerate(range(1, 6) ):\n self.assertEqual(expectedAvgs[index], sma(number))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding:utf-8 -*-
'''
@author:oldwai
'''
# email: frankandrew@163.com
def multipliers():
return lab1(x)
def lab1(x):
list1 = []
for i in range(4):
sum = x*i
list1.append(sum)
return list1
#print ([m(2) for m in multipliers()])
def func1(x):
list2 = []
for m in multipliers():
list2.append(m(x))
return list2
print(func1(3))
|
normal
|
{
"blob_id": "807e19f09f4a46b6c39457b8916714e2c54c3e8d",
"index": 5802,
"step-1": "<mask token>\n\n\ndef lab1(x):\n list1 = []\n for i in range(4):\n sum = x * i\n list1.append(sum)\n return list1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef lab1(x):\n list1 = []\n for i in range(4):\n sum = x * i\n list1.append(sum)\n return list1\n\n\ndef func1(x):\n list2 = []\n for m in multipliers():\n list2.append(m(x))\n return list2\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef multipliers():\n return lab1(x)\n\n\ndef lab1(x):\n list1 = []\n for i in range(4):\n sum = x * i\n list1.append(sum)\n return list1\n\n\ndef func1(x):\n list2 = []\n for m in multipliers():\n list2.append(m(x))\n return list2\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef multipliers():\n return lab1(x)\n\n\ndef lab1(x):\n list1 = []\n for i in range(4):\n sum = x * i\n list1.append(sum)\n return list1\n\n\ndef func1(x):\n list2 = []\n for m in multipliers():\n list2.append(m(x))\n return list2\n\n\nprint(func1(3))\n",
"step-5": "# -*- coding:utf-8 -*-\r\n'''\r\n@author:oldwai\r\n'''\r\n# email: frankandrew@163.com\r\n\r\n\r\ndef multipliers():\r\n return lab1(x)\r\n\r\n\r\ndef lab1(x):\r\n list1 = []\r\n for i in range(4):\r\n sum = x*i\r\n list1.append(sum)\r\n return list1\r\n\r\n#print ([m(2) for m in multipliers()])\r\ndef func1(x):\r\n list2 = []\r\n for m in multipliers():\r\n list2.append(m(x))\r\n return list2\r\n\r\nprint(func1(3))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python
# -*- coding:utf-8 -*-
################################################################
# 服务器程序
################################################################
import json
import time
import traceback
from flask import Flask, abort, render_template, redirect, send_from_directory, request, make_response
from flask.ext.bootstrap import Bootstrap
from tools.http_tools import WeiboHandler
from tools.db_operation.db_tools import save_user_log_info, get_user_log_info, batch_put_info, CONTENT_INFO, SCRAP_INFO, put_info, get_info, put_scrap_info, get_scraped_weibo_info
from tools.__init__ import debug_flag
from tools.scrap_tools import scrap_user
from multiprocessing import Process
global log_handler
global search_user_list
log_handler = {}
search_user_list = {}
process_list = []
server = Flask(__name__)
bootstrap = Bootstrap(server)
def read_wh(username):
if log_handler.get(username) is None:
log_handler[username] = WeiboHandler(username, '', 'flask_server/static/png/')
return log_handler[username]
def read_cookie():
username = request.cookies.get('username')
if username is None:
user_list = []
else:
user_list = [{'username': username}]
return user_list
@server.route('/')
def index():
user_list = read_cookie()
return render_template('index.html', user_list=user_list)
@server.route('/signup')
def sign_up():
return redirect('http://weibo.com/signup/signup.php')
@server.route('/login', methods=['POST'])
def log_in():
username = request.form['id']
wh = read_wh(username)
wh.passwd = request.form['passwd']
vercode = request.form['vercode']
log_flag = request.form['logflag']
if log_flag == '1':
resp = make_response(json.dumps({'stat': '200', 'furl': request.form['ip']}))
resp.set_cookie('username', username)
return resp
# log_handler.prelog_data = get_user_log_info(username)
data2, replace_url = wh.do_log_req(vercode)
if int(data2['retcode'][0]) == 0:
wh.final_log_req(replace_url)
resp = make_response(json.dumps({'stat': '200', 'furl': request.form['ip']}))
resp.set_cookie('username', username)
return resp
print 'Log in failed ... retcode:', data2['retcode'][0], ', reason:', data2['reason'][0].decode('gbk')
no = wh.get_vercode()
return json.dumps({'stat': '502', 'reason': data2['reason'][0].decode('gbk'), 'vercode_no': no})
@server.route('/check_log', methods=['POST'])
def check_log():
username = request.form['id']
wh = read_wh(username)
wh.check_log_status(wh.open_weibo_page())
if wh.log_flag:
return json.dumps({'stat': '200'})
prelog = wh.prelog()
# save_user_log_info(username, prelog)
try:
if prelog['showpin'] == 1:
no = wh.get_vercode()
return json.dumps({'stat': '502', 'vercode_no': no})
return json.dumps({'stat': '501'})
except Exception, e:
return json.dumps({'stat': '501'})
@server.route('/logout')
def log_out():
resp = make_response(redirect('/'))
resp.set_cookie('username', '', expires=0)
return resp
@server.route('/static/<path:path>')
def send_static_file(path):
return send_from_directory('static', path)
@server.route('/search_user/<word>')
def search_user(word):
username = request.cookies.get('username')
wh = read_wh(username)
if username is None:
return {'stat': '404'}
search_user_list[username] = wh.get_user_list(word)
if debug_flag:
print search_user_list
return json.dumps({'stat': '200', 'result': search_user_list[username]})
@server.route('/scrap/<user_no>')
def to_scrap(user_no):
username = request.cookies.get('username')
if username is None:
return render_template('index.html')
user = search_user_list[username][int(user_no)]
last_record = get_info(SCRAP_INFO, cond=' 1=1 order by id desc limit 1')
scrap_id = 0 if len(last_record) == 0 else (int(last_record[0]['id']) + 1)
put_scrap_info(scrap_id, username, user['user_id'], '开始爬取%s的所有微博内容...' % user['title'])
sp = Process(target=scrap_process, name='%s_%s_%s' % (username, user['user_id'], scrap_id), args=(username, user, scrap_id))
sp.start()
process_list.append(sp)
return redirect('/scrap_listen?d=%s' % scrap_id)
@server.route('/scrap_listen', methods=['GET'])
def scrap_listen():
scrap_id = request.args.get('d')
if debug_flag:
print scrap_id
user_list = read_cookie()
return render_template('scrap_listen.html', scrap_id=scrap_id, user_list=user_list)
@server.route('/read_scrap/<scrap_id>/<last_message_id>')
def read_scrap(scrap_id, last_message_id):
data = get_info(SCRAP_INFO, cond=' scrap_id=%s and id > %s ' % (scrap_id, last_message_id))
return json.dumps(data)
def scrap_process(username, user, scrap_id):
try:
wh = read_wh(username)
data_list = scrap_user(wh, user, scrap_id, 0)
batch_put_info(CONTENT_INFO, data_list)
put_scrap_info(scrap_id, username, user['user_id'], '爬取完毕!共爬取%s%s条微博.保存至数据库....' % (user['title'], len(data_list)), 1)
except Exception, e:
traceback.print_exc()
put_scrap_info(scrap_id, username, user['user_id'], '出现异常,数据未保存,请重新爬取数据!', -1)
@server.route('/search')
def search_scrap_result():
user_list = read_cookie()
return render_template('/search.html', user_list=user_list)
@server.route('/search_scraped_weibo/<username>', methods=['GET'])
def search_scraped_weibo(username):
print 'here'
keyword = request.args.get('keyword')
print 'there'
if keyword is None:
weibo_list = get_scraped_weibo_info(username)
else:
weibo_list = get_scraped_weibo_info(username, keyword)
return json.dumps({'stat': '200', 'result': weibo_list})
|
normal
|
{
"blob_id": "2c89f12d633da8da4d500dca910662d351b0958f",
"index": 4509,
"step-1": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n################################################################\n# 服务器程序\n################################################################\nimport json\nimport time\nimport traceback\nfrom flask import Flask, abort, render_template, redirect, send_from_directory, request, make_response\nfrom flask.ext.bootstrap import Bootstrap\nfrom tools.http_tools import WeiboHandler\nfrom tools.db_operation.db_tools import save_user_log_info, get_user_log_info, batch_put_info, CONTENT_INFO, SCRAP_INFO, put_info, get_info, put_scrap_info, get_scraped_weibo_info\nfrom tools.__init__ import debug_flag\nfrom tools.scrap_tools import scrap_user\nfrom multiprocessing import Process\nglobal log_handler\nglobal search_user_list\nlog_handler = {}\nsearch_user_list = {}\nprocess_list = []\n\n\nserver = Flask(__name__)\nbootstrap = Bootstrap(server)\n\n\ndef read_wh(username):\n if log_handler.get(username) is None:\n log_handler[username] = WeiboHandler(username, '', 'flask_server/static/png/')\n return log_handler[username]\n\n\ndef read_cookie():\n username = request.cookies.get('username')\n if username is None:\n user_list = []\n else:\n user_list = [{'username': username}]\n return user_list\n\n\n@server.route('/')\ndef index():\n user_list = read_cookie()\n return render_template('index.html', user_list=user_list)\n\n\n@server.route('/signup')\ndef sign_up():\n return redirect('http://weibo.com/signup/signup.php')\n\n\n@server.route('/login', methods=['POST'])\ndef log_in():\n username = request.form['id']\n wh = read_wh(username)\n wh.passwd = request.form['passwd']\n vercode = request.form['vercode']\n log_flag = request.form['logflag']\n if log_flag == '1':\n resp = make_response(json.dumps({'stat': '200', 'furl': request.form['ip']}))\n resp.set_cookie('username', username)\n return resp\n # log_handler.prelog_data = get_user_log_info(username)\n data2, replace_url = wh.do_log_req(vercode)\n if int(data2['retcode'][0]) == 0:\n wh.final_log_req(replace_url)\n resp = make_response(json.dumps({'stat': '200', 'furl': request.form['ip']}))\n resp.set_cookie('username', username)\n return resp\n print 'Log in failed ... retcode:', data2['retcode'][0], ', reason:', data2['reason'][0].decode('gbk')\n no = wh.get_vercode()\n return json.dumps({'stat': '502', 'reason': data2['reason'][0].decode('gbk'), 'vercode_no': no})\n\n\n@server.route('/check_log', methods=['POST'])\ndef check_log():\n username = request.form['id']\n wh = read_wh(username)\n wh.check_log_status(wh.open_weibo_page())\n if wh.log_flag:\n return json.dumps({'stat': '200'})\n prelog = wh.prelog()\n # save_user_log_info(username, prelog)\n try:\n if prelog['showpin'] == 1:\n no = wh.get_vercode()\n return json.dumps({'stat': '502', 'vercode_no': no})\n return json.dumps({'stat': '501'})\n except Exception, e:\n return json.dumps({'stat': '501'})\n\n\n@server.route('/logout')\ndef log_out():\n resp = make_response(redirect('/'))\n resp.set_cookie('username', '', expires=0)\n return resp\n\n\n@server.route('/static/<path:path>')\ndef send_static_file(path):\n return send_from_directory('static', path)\n\n\n@server.route('/search_user/<word>')\ndef search_user(word):\n username = request.cookies.get('username')\n wh = read_wh(username)\n if username is None:\n return {'stat': '404'}\n search_user_list[username] = wh.get_user_list(word)\n if debug_flag:\n print search_user_list\n return json.dumps({'stat': '200', 'result': search_user_list[username]})\n\n\n@server.route('/scrap/<user_no>')\ndef to_scrap(user_no):\n username = request.cookies.get('username')\n if username is None:\n return render_template('index.html')\n user = search_user_list[username][int(user_no)]\n last_record = get_info(SCRAP_INFO, cond=' 1=1 order by id desc limit 1')\n scrap_id = 0 if len(last_record) == 0 else (int(last_record[0]['id']) + 1)\n put_scrap_info(scrap_id, username, user['user_id'], '开始爬取%s的所有微博内容...' % user['title'])\n sp = Process(target=scrap_process, name='%s_%s_%s' % (username, user['user_id'], scrap_id), args=(username, user, scrap_id))\n sp.start()\n process_list.append(sp)\n return redirect('/scrap_listen?d=%s' % scrap_id)\n\n\n@server.route('/scrap_listen', methods=['GET'])\ndef scrap_listen():\n scrap_id = request.args.get('d')\n if debug_flag:\n print scrap_id\n user_list = read_cookie()\n return render_template('scrap_listen.html', scrap_id=scrap_id, user_list=user_list)\n\n\n@server.route('/read_scrap/<scrap_id>/<last_message_id>')\ndef read_scrap(scrap_id, last_message_id):\n data = get_info(SCRAP_INFO, cond=' scrap_id=%s and id > %s ' % (scrap_id, last_message_id))\n return json.dumps(data)\n\n\ndef scrap_process(username, user, scrap_id):\n try:\n wh = read_wh(username)\n data_list = scrap_user(wh, user, scrap_id, 0)\n batch_put_info(CONTENT_INFO, data_list)\n put_scrap_info(scrap_id, username, user['user_id'], '爬取完毕!共爬取%s%s条微博.保存至数据库....' % (user['title'], len(data_list)), 1)\n except Exception, e:\n traceback.print_exc()\n put_scrap_info(scrap_id, username, user['user_id'], '出现异常,数据未保存,请重新爬取数据!', -1)\n\n\n@server.route('/search')\ndef search_scrap_result():\n user_list = read_cookie()\n return render_template('/search.html', user_list=user_list)\n\n\n@server.route('/search_scraped_weibo/<username>', methods=['GET'])\ndef search_scraped_weibo(username):\n print 'here'\n keyword = request.args.get('keyword')\n print 'there'\n if keyword is None:\n weibo_list = get_scraped_weibo_info(username)\n else:\n weibo_list = get_scraped_weibo_info(username, keyword)\n return json.dumps({'stat': '200', 'result': weibo_list})\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# This file is used to run a program to perform Active measuremnts
import commands
import SocketServer
import sys
#Class to handle Socket request
class Handler(SocketServer.BaseRequestHandler):
def handle(self):
# Get the IP of the client
IP = self.request.recv(1024)
#print 'IP=' + IP
latency = ''
try:
# Use Scamper to determine the latency of the Requesting Client identified by the IP
scamperCommand = "scamper -c 'ping -c 1' -i "+IP
# Get the output of the system command
output = commands.getoutput(scamperCommand)
print "Output=" + output
#Parse and get the Latency
latency = output.split("\n")[1].split("time=")[1].split(" ")[0]
except Exception:
latency = 'Error'
#print latency
# Send latency to requester
self.request.sendall(latency)
return
def main(argv):
port = int(argv[1])
addr = ('', port)
# Start an active measurement system which listenes to a given port
server = SocketServer.TCPServer(addr, Handler);
print 'Active Measurement Server Listening at ' + str(port) + "..."
server.serve_forever()
if __name__ == '__main__':
main(sys.argv)
|
normal
|
{
"blob_id": "c853f922d1e4369df9816d150e5c0abc729b325c",
"index": 4902,
"step-1": "# This file is used to run a program to perform Active measuremnts\n\n\nimport commands\nimport SocketServer\nimport sys\n\n#Class to handle Socket request\nclass Handler(SocketServer.BaseRequestHandler):\n\n def handle(self):\n\n # Get the IP of the client\n IP = self.request.recv(1024)\n\n #print 'IP=' + IP\n\n latency = ''\n\n try:\n\n # Use Scamper to determine the latency of the Requesting Client identified by the IP\n scamperCommand = \"scamper -c 'ping -c 1' -i \"+IP\n\n # Get the output of the system command\n output = commands.getoutput(scamperCommand)\n print \"Output=\" + output\n #Parse and get the Latency\n latency = output.split(\"\\n\")[1].split(\"time=\")[1].split(\" \")[0]\n\n except Exception:\n latency = 'Error'\n\n #print latency\n\n # Send latency to requester\n self.request.sendall(latency)\n\n return\n\ndef main(argv):\n\n port = int(argv[1])\n addr = ('', port)\n\n # Start an active measurement system which listenes to a given port\n server = SocketServer.TCPServer(addr, Handler);\n\n\n print 'Active Measurement Server Listening at ' + str(port) + \"...\"\n\n\n server.serve_forever()\n\nif __name__ == '__main__':\n main(sys.argv)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from functools import wraps
import maya.cmds as mc
import maya.mel as mel
import pymel.core as pm
from PySide2 import QtCore, QtGui, QtWidgets
import adb_core.Class__multi_skin as ms
import adbrower
from CollDict import pysideColorDic as pyQtDic
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
import adb_tools.adb_pyQt.Class__rightClickCustom as adbRC
from maya_script import Adbrower
adb = adbrower.Adbrower()
VERSION = 1.0
PATH_WINDOW = Adbrower.PATH_WINDOW_INIT + 'AppData/Roaming'
PATH_LINUX = Adbrower.PATH_LINUX_INIT
FOLDER_NAME = Adbrower.FOLDER_NAME_INIT
ICONS_FOLDER = Adbrower.ICONS_FOLDER_INIT
YELLOW = '#ffe100'
ORANGE = '#fd651d'
GREEN = '#597A59'
DARKRED = '#745a54'
def undo(func):
'''
Puts the wrapped `func` into a single Maya Undo action, then
undoes it when the function enters the finally: block
from schworer Github
'''
@wraps(func)
def _undofunc(*args, **kwargs):
try:
# start an undo chunk
mc.undoInfo(ock=True)
return func(*args, **kwargs)
finally:
# after calling the func, end the undo chunk
mc.undoInfo(cck=True)
return _undofunc
def flatList(ori_list=''):
"""
Flatten a list
"""
flat_list = []
for item in ori_list:
if isinstance(item, list):
for sub_item in item:
flat_list.append(sub_item)
else:
flat_list.append(item)
return flat_list
#-----------------------------------
# CLASS
#-----------------------------------
class MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):
__dialog = None
@classmethod
def show_dialog(cls):
if cls.__dialog is None:
cls.__dialog = cls()
else:
cls.__dialog.raise_()
cls.__dialog.show()
def __init__(self,parent=None):
super(MultiSkin_UI, self).__init__(parent=parent)
self.meshTreeWidget=QtWidgets.QTreeWidget()
self.setObjectName('multi skin ui')
self.starting_height = 500
self.starting_width = 390
self.setWindowTitle('adbrower - Multi Skin Tool' + ' v' + str(VERSION))
self.setWindowFlags(QtCore.Qt.Tool)
self.setMinimumWidth(self.starting_width)
self.resize(self.starting_width, self.starting_height)
# -----------------------------
# --- Create scrollArea
self.mainBox = QtWidgets.QVBoxLayout()
self.mainBox.setContentsMargins(0, 0, 0, 0)
self.scroll_layout = QtWidgets.QScrollArea()
self.mainBox.addWidget(self.scroll_layout)
self.setLayout(self.mainBox)
self.scroll_layout.setContentsMargins(0, 0, 0, 0)
self.scroll_layout.setWidgetResizable(True)
self.scroll_layout.setFrameStyle(QtWidgets.QFrame.NoFrame)
self.scroll_layout.setFrameShadow(QtWidgets.QFrame.Plain)
self.scroll_widget = QtWidgets.QWidget()
self.scroll_layout.setWidget(self.scroll_widget)
# -----------------------------
# --- Main Layout
self.main_layout = QtWidgets.QVBoxLayout()
self.main_layout.setContentsMargins(*[5] * 4)
self.main_layout.setSpacing(2)
self.setLayout(self.main_layout)
self.scroll_widget.setLayout(self.main_layout)
self.widgetsAndLayouts()
self.create_Button()
self.buildMainLayout()
def widgetsAndLayouts(self):
# --------- Predefine widgets
def addLine():
line = QtWidgets. QFrame()
line.setFrameShape(QtWidgets.QFrame.HLine)
return line
def addText(message, alignement=QtCore.Qt.AlignCenter, height=30, bold=False):
myFont = QtGui.QFont()
myFont.setBold(bold)
text = QtWidgets.QLabel(message)
text.setAlignment(alignement)
text.setFixedHeight(height)
text.setFont(myFont)
return text
# ------------------------------
#--------- Layouts
self.vLayoutAndFunctions = [
# name, margins
['treeWidget', [1, 1, 1, 1]],
]
self.vlayout = {}
for layoutName, margins, in self.vLayoutAndFunctions:
self.vlayout[layoutName] = QtWidgets.QVBoxLayout()
self.vlayout[layoutName].setContentsMargins(margins[0], margins[1], margins[2], margins[3],)
self.hLayoutAndFunctions = [
# name, margins
['filterOptions', [1, 1, 1, 1]],
['buttonsOptions', [1, 1, 1, 1]],
['searchBarWidget', [1, 1, 1, 1]],
]
self.hlayout = {}
for layoutName, margins, in self.hLayoutAndFunctions:
self.hlayout[layoutName] = QtWidgets.QHBoxLayout()
self.hlayout[layoutName].setContentsMargins(margins[0], margins[1], margins[2], margins[3],)
# ------------------------------
# --------- QLINE EDIT WIDGET
self.searchBar = QtWidgets.QLineEdit()
self.searchBar.setPlaceholderText('Search...')
self.searchBar.textEdited.connect(self.searchBarEdited)
self.hlayout['searchBarWidget'].addWidget(self.searchBar)
# ------------------------------
# --------- CHECKBOX WIDGET
self.matchCaseChx = QtWidgets.QCheckBox()
self.matchCaseChx.setChecked(False)
self.matchCaseChx.setText('Match Case')
self.matchCaseChx.stateChanged.connect(self.searchBarEdited)
# ------------------------------
# --------- RADIO BUTTON WIDGET
self.allFilter = QtWidgets.QRadioButton('All', self)
self.allFilter.setChecked(True)
self.allFilter.toggled.connect(self.refreshQtree)
self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)
self.skinClusterFilter.setChecked(True)
self.skinClusterFilter.toggled.connect(self.refreshQtree)
# ------------------------------
# --------- TREE LIST WIDGET
self.meshTreeWidget=QtWidgets.QTreeWidget()
self.meshTreeWidget.setHeaderLabel('Cloth Tree View')
self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.ExtendedSelection)
self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)
header = QtWidgets.QTreeWidgetItem(["Geometries"])
self.meshTreeWidget.setHeaderItem(header)
self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)
self.meshTreeWidget.itemSelectionChanged .connect(self.singleClickedAction)
self.refreshQtree()
def create_Button(self):
""" Create the buttons """
self.buttonAndFunctions = [
# name, function , group number, labelColor, backgroundColor, layout, layout_coordinate width
['Show Selected', self.showSelected, 0, pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'], '', 30],
['Refresh', self.refreshQtree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],
['Clear', self.meshTreeWidget.clear, 0, pyQtDic['colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],
['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['buttonsOptions'], '', 30],
['Close All', self.closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['buttonsOptions'], '', 30],
]
# Build Buttons
self.buttons = {}
for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width, in self.buttonAndFunctions:
self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)
self.buttons[buttonName].clicked.connect(buttonFunction)
try:
layout.addWidget(self.buttons[buttonName], int(layout_coord.split(',')[0]), int(layout_coord.split(',')[1]))
except ValueError:
layout.addWidget(self.buttons[buttonName])
# add Right Clicked Options
_optionsExpandAll = self.buttons['Expand All'].addButtonActions(['Shapes', 'Skin Clusters'])
_optionsExpandAll['Shapes'].triggered.connect(lambda:self.expandTree('shape'))
_optionsExpandAll['Skin Clusters'].triggered.connect(lambda:self.expandTree('skin cluster'))
_optionsCloseAll = self.buttons['Close All'].addButtonActions(['Shapes', 'Skin Clusters'])
_optionsCloseAll['Shapes'].triggered.connect(lambda:self.closeTree('shape'))
_optionsCloseAll['Skin Clusters'].triggered.connect(lambda:self.closeTree('skin cluster'))
def buildMainLayout(self):
# ------------------------------
# --------- BUILD MAIN LAYOUT
self.main_layout.addLayout(self.hlayout['filterOptions'])
self.hlayout['filterOptions'].addWidget(self.allFilter)
self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)
self.hlayout['filterOptions'].addStretch()
self.main_layout.addLayout(self.hlayout['searchBarWidget'])
self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)
self.main_layout.addLayout(self.hlayout['buttonsOptions'])
self.main_layout.addLayout(self.vlayout['treeWidget'])
# ==================================
# SLOTS
# ==================================
def refreshQtree(self):
self.meshTreeWidget.clear()
all_status = self.allFilter.isChecked()
if all_status:
_filter = 'all'
else:
_filter = 'skinClusters'
self.filterList = self.filterMeshes(filter=_filter)
self.populateQTree(self.filterList)
def getSearchBarText(self):
searchBarText = self.searchBar.text()
return searchBarText
def searchBarEdited(self):
matchCase=bool(self.matchCaseChx.checkState())
query = self.searchBar.text()
if matchCase:
query_words = str(query).split(" ")
else:
query_words = str(query).lower().split(" ")
query_words = filter(None, query_words)
scoreList = {}
for item in [str(x) for x in self.filterList]:
score = 0
for query_word in query_words:
if matchCase:
if query_word in item:
score += 1
else:
if query_word in item.lower():
score += 1
scoreList[item] = score
# If user enter more than one words, get only result with a score at least equal to the number of words in the query
sorted_matches = [i for i in scoreList.items() if i[1] >= len(query_words)]
# Sort matches by score
sorted_matches = sorted(sorted_matches, key=lambda x: x[0])
sorted_matches_string = [name for name, index in sorted_matches]
self.meshTreeWidget.clear()
self.populateQTree(sorted_matches_string)
def populateQTree(self, filterList):
# Meshes
# ----------------------
self.roots = [QtWidgets.QTreeWidgetItem(self.meshTreeWidget, [str(item)]) for item in filterList]
[root.setIcon(0, QtGui.QIcon(':/out_mesh.png')) for root in self.roots]
[root.setExpanded(True) for root in self.roots]
# Shapes
# ----------------------
self.QtShapes = []
shape_dic = self.getAllShapes(self.getAllMeshes())
QTroots_dic = {} # Keys are Qtree object
for root in self.roots:
try:
QTroots_dic.update({root:shape_dic[root.text(0)]})
except KeyError:
pass
# added the shapes under there mesh
for QTroot, shapesList in QTroots_dic.items():
[QtWidgets.QTreeWidgetItem(QTroot, [str(shape)]) for shape in shapesList]
# changed their color
child_count=QTroot.childCount()
children=[QTroot.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(YELLOW))) for child in children]
[child.setIcon(0, QtGui.QIcon(':/out_transform.png')) for child in children]
[child.setExpanded(True) for child in children]
[self.QtShapes.append(child) for child in children]
# skinClusters
# ----------------------
self.QTClusters = []
cluster_dic = self.getSkinClusterbyShape(flatList(shape_dic.values()))
QTshape_dic = {}
for shape in self.QtShapes:
QTshape_dic.update({shape:cluster_dic[shape.text(0)]})
# added the skinCluster under there shape
for QTshape, clusterList in QTshape_dic.items():
if clusterList == 'None':
pass
else:
QtWidgets.QTreeWidgetItem(QTshape, [str(clusterList)])
# changed their color
child_count=QTshape.childCount()
children=[QTshape.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(GREEN))) for child in children]
[child.setIcon(0, QtGui.QIcon(':/cluster.png')) for child in children]
[self.QTClusters.append(child) for child in children]
# Joints
# ----------------------
bindJoints_dic = self.getBindJointsFromCluster([x for x in cluster_dic.values() if x != 'None'])
QTcluster_dic = {}
for cluster in self.QTClusters:
QTcluster_dic.update({cluster:bindJoints_dic[cluster.text(0)]})
for QTCluster, jointList in QTcluster_dic.items():
[QtWidgets.QTreeWidgetItem(QTCluster, [str(jnt)]) for jnt in jointList]
# changed their color
child_count=QTCluster.childCount()
children=[QTCluster.child(index) for index in range(child_count)]
[child.setForeground(0, QtGui.QBrush(QtGui.QColor(DARKRED))) for child in children]
[child.setIcon(0, QtGui.QIcon(':/out_joint.png')) for child in children]
def closeTree(self, type='mesh'):
if type == 'mesh':
[root.setExpanded(False) for root in self.roots]
elif type == 'shape':
[shape.setExpanded(False) for shape in self.QtShapes]
elif type == 'skin cluster':
[sclus.setExpanded(False) for sclus in self.QTClusters]
def expandTree(self, type='mesh'):
if type == 'mesh':
[root.setExpanded(True) for root in self.roots]
elif type == 'shape':
[shape.setExpanded(True) for shape in self.QtShapes]
elif type == 'skin cluster':
[sclus.setExpanded(True) for sclus in self.QTClusters]
def showSelected(self):
selection = pm.selected()
selection.sort()
self.meshTreeWidget.clear()
self.populateQTree(selection)
def singleClickedAction(self):
mySelection = self.meshTreeWidget.selectedItems()
str_selected = [x.text(0) for x in mySelection]
pm.select(str_selected, r=1)
def filterMeshes(self, filter = 'all'):
"""
filter:
all : all meshes
skinClusters : all meshes with skinClusters
None
"""
if filter =='all':
return self.getAllMeshes()
elif filter == "skinClusters":
clusters = pm.ls(type='skinCluster')
meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for c in clusters], []))
meshes = set([x.getParent() for x in meshesShapes if pm.objectType(x) == 'mesh'])
return meshes
elif filter == 'None':
return None
# ==================================
# STATIC METHOD
# ==================================
@staticmethod
def test():
print ('test')
@staticmethod
def getSkinCluster(_transform):
"""
Find a SkinCluster from a transform
Returns the skinCluster node
"""
result = []
if not (pm.objExists(_transform)):
return result
validList = mel.eval('findRelatedDeformer("' + str(_transform) + '")')
if validList is None:
return result
for elem in validList:
if pm.nodeType(elem) == 'skinCluster':
result.append(elem)
pm.select(result, r=True)
result_node = pm.selected()
if len(result_node) > 1:
return result_node
else:
try:
return result_node[0]
except IndexError:
return False
@staticmethod
def getBindJointsFromCluster(clusterList):
"""
Find all joints attached to a skinCluster
@param clusterList: List. list of skin Clusters
return dic with key: skin Cluster. Value: list of joint
"""
bindJoints_dic = {}
for cluster in clusterList:
all_binds_jnts = [x for x in pm.listConnections(str(cluster) + '.matrix[*]', s=1)]
bindJoints_dic.update({str(cluster):all_binds_jnts})
return bindJoints_dic
@staticmethod
def getAllMeshes():
"""
return: list of all meshes / geometry
"""
shapesList = pm.ls(type="mesh", ni=1)
transformList = list(set(pm.listRelatives(shapesList ,parent=True)))
transformList.sort()
return transformList
@staticmethod
def getAllShapes(transforms):
"""
@param transforms: List.
return : dictionnary with key:mesh / values: shapes
"""
shapes_dic = {}
for transform in transforms:
all_shapes = pm.PyNode(transform).getShapes(ni=True)
shapes_dic.update({str(transform):all_shapes})
return shapes_dic
def getSkinClusterbyShape(self, shapes):
"""
get skinCluster attached to the shape
@param shapes: List
return: List
"""
cluster_dic = {}
for shape in shapes:
try:
incoming = mc.listConnections('{}.inMesh'.format(shape))[0]
if pm.objectType(incoming) == 'skinCluster':
cluster_dic.update({str(shape):incoming})
else:
skinCluster = self.getSkinCluster(shape)
if skinCluster:
if len(skinCluster) > 1:
cluster_dic.update({str(shape):'None'})
else:
cluster_dic.update({str(shape):skinCluster})
else:
cluster_dic.update({str(shape):'None'})
except TypeError:
cluster_dic.update({str(shape):'None'})
return cluster_dic
# ===============================
# BUILD WINDOW
# ===============================
def showUI(dialog = False):
if dialog:
MultiSkin_UI.show_dialog()
else:
# Make sure the UI is deleted before recreating
global tools_cw_ui
try:
tools_cw_ui.deleteLater()
except:
pass
tools_cw_ui = MultiSkin_UI()
tools_cw_ui.show()
# showUI()
|
normal
|
{
"blob_id": "819607d89035413fc2800e9f16222619a74a5d64",
"index": 6429,
"step-1": "<mask token>\n\n\nclass MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):\n <mask token>\n <mask token>\n <mask token>\n\n def widgetsAndLayouts(self):\n\n def addLine():\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n return line\n\n def addText(message, alignement=QtCore.Qt.AlignCenter, height=30,\n bold=False):\n myFont = QtGui.QFont()\n myFont.setBold(bold)\n text = QtWidgets.QLabel(message)\n text.setAlignment(alignement)\n text.setFixedHeight(height)\n text.setFont(myFont)\n return text\n self.vLayoutAndFunctions = [['treeWidget', [1, 1, 1, 1]]]\n self.vlayout = {}\n for layoutName, margins in self.vLayoutAndFunctions:\n self.vlayout[layoutName] = QtWidgets.QVBoxLayout()\n self.vlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.hLayoutAndFunctions = [['filterOptions', [1, 1, 1, 1]], [\n 'buttonsOptions', [1, 1, 1, 1]], ['searchBarWidget', [1, 1, 1, 1]]]\n self.hlayout = {}\n for layoutName, margins in self.hLayoutAndFunctions:\n self.hlayout[layoutName] = QtWidgets.QHBoxLayout()\n self.hlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.searchBar = QtWidgets.QLineEdit()\n self.searchBar.setPlaceholderText('Search...')\n self.searchBar.textEdited.connect(self.searchBarEdited)\n self.hlayout['searchBarWidget'].addWidget(self.searchBar)\n self.matchCaseChx = QtWidgets.QCheckBox()\n self.matchCaseChx.setChecked(False)\n self.matchCaseChx.setText('Match Case')\n self.matchCaseChx.stateChanged.connect(self.searchBarEdited)\n self.allFilter = QtWidgets.QRadioButton('All', self)\n self.allFilter.setChecked(True)\n self.allFilter.toggled.connect(self.refreshQtree)\n self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)\n self.skinClusterFilter.setChecked(True)\n self.skinClusterFilter.toggled.connect(self.refreshQtree)\n self.meshTreeWidget = QtWidgets.QTreeWidget()\n self.meshTreeWidget.setHeaderLabel('Cloth Tree View')\n self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.\n ExtendedSelection)\n self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)\n header = QtWidgets.QTreeWidgetItem(['Geometries'])\n self.meshTreeWidget.setHeaderItem(header)\n self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)\n self.meshTreeWidget.itemSelectionChanged.connect(self.\n singleClickedAction)\n self.refreshQtree()\n\n def create_Button(self):\n \"\"\" Create the buttons \"\"\"\n self.buttonAndFunctions = [['Show Selected', self.showSelected, 0,\n pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'],\n '', 30], ['Refresh', self.refreshQtree, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Clear', self.meshTreeWidget.clear, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'],\n '', self.hlayout['buttonsOptions'], '', 30], ['Close All', self\n .closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout[\n 'buttonsOptions'], '', 30]]\n self.buttons = {}\n for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width in self.buttonAndFunctions:\n self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)\n self.buttons[buttonName].clicked.connect(buttonFunction)\n try:\n layout.addWidget(self.buttons[buttonName], int(layout_coord\n .split(',')[0]), int(layout_coord.split(',')[1]))\n except ValueError:\n layout.addWidget(self.buttons[buttonName])\n _optionsExpandAll = self.buttons['Expand All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsExpandAll['Shapes'].triggered.connect(lambda : self.\n expandTree('shape'))\n _optionsExpandAll['Skin Clusters'].triggered.connect(lambda : self.\n expandTree('skin cluster'))\n _optionsCloseAll = self.buttons['Close All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsCloseAll['Shapes'].triggered.connect(lambda : self.\n closeTree('shape'))\n _optionsCloseAll['Skin Clusters'].triggered.connect(lambda : self.\n closeTree('skin cluster'))\n\n def buildMainLayout(self):\n self.main_layout.addLayout(self.hlayout['filterOptions'])\n self.hlayout['filterOptions'].addWidget(self.allFilter)\n self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)\n self.hlayout['filterOptions'].addStretch()\n self.main_layout.addLayout(self.hlayout['searchBarWidget'])\n self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)\n self.main_layout.addLayout(self.hlayout['buttonsOptions'])\n self.main_layout.addLayout(self.vlayout['treeWidget'])\n\n def refreshQtree(self):\n self.meshTreeWidget.clear()\n all_status = self.allFilter.isChecked()\n if all_status:\n _filter = 'all'\n else:\n _filter = 'skinClusters'\n self.filterList = self.filterMeshes(filter=_filter)\n self.populateQTree(self.filterList)\n\n def getSearchBarText(self):\n searchBarText = self.searchBar.text()\n return searchBarText\n\n def searchBarEdited(self):\n matchCase = bool(self.matchCaseChx.checkState())\n query = self.searchBar.text()\n if matchCase:\n query_words = str(query).split(' ')\n else:\n query_words = str(query).lower().split(' ')\n query_words = filter(None, query_words)\n scoreList = {}\n for item in [str(x) for x in self.filterList]:\n score = 0\n for query_word in query_words:\n if matchCase:\n if query_word in item:\n score += 1\n elif query_word in item.lower():\n score += 1\n scoreList[item] = score\n sorted_matches = [i for i in scoreList.items() if i[1] >= len(\n query_words)]\n sorted_matches = sorted(sorted_matches, key=lambda x: x[0])\n sorted_matches_string = [name for name, index in sorted_matches]\n self.meshTreeWidget.clear()\n self.populateQTree(sorted_matches_string)\n <mask token>\n <mask token>\n\n def expandTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(True) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(True) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(True) for sclus in self.QTClusters]\n\n def showSelected(self):\n selection = pm.selected()\n selection.sort()\n self.meshTreeWidget.clear()\n self.populateQTree(selection)\n\n def singleClickedAction(self):\n mySelection = self.meshTreeWidget.selectedItems()\n str_selected = [x.text(0) for x in mySelection]\n pm.select(str_selected, r=1)\n\n def filterMeshes(self, filter='all'):\n \"\"\"\n filter:\n all : all meshes\n skinClusters : all meshes with skinClusters\n None\n \"\"\"\n if filter == 'all':\n return self.getAllMeshes()\n elif filter == 'skinClusters':\n clusters = pm.ls(type='skinCluster')\n meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for\n c in clusters], []))\n meshes = set([x.getParent() for x in meshesShapes if pm.\n objectType(x) == 'mesh'])\n return meshes\n elif filter == 'None':\n return None\n\n @staticmethod\n def test():\n print('test')\n\n @staticmethod\n def getSkinCluster(_transform):\n \"\"\"\n Find a SkinCluster from a transform\n Returns the skinCluster node\n \"\"\"\n result = []\n if not pm.objExists(_transform):\n return result\n validList = mel.eval('findRelatedDeformer(\"' + str(_transform) + '\")')\n if validList is None:\n return result\n for elem in validList:\n if pm.nodeType(elem) == 'skinCluster':\n result.append(elem)\n pm.select(result, r=True)\n result_node = pm.selected()\n if len(result_node) > 1:\n return result_node\n else:\n try:\n return result_node[0]\n except IndexError:\n return False\n\n @staticmethod\n def getBindJointsFromCluster(clusterList):\n \"\"\"\n Find all joints attached to a skinCluster\n @param clusterList: List. list of skin Clusters\n return dic with key: skin Cluster. Value: list of joint \n \"\"\"\n bindJoints_dic = {}\n for cluster in clusterList:\n all_binds_jnts = [x for x in pm.listConnections(str(cluster) +\n '.matrix[*]', s=1)]\n bindJoints_dic.update({str(cluster): all_binds_jnts})\n return bindJoints_dic\n\n @staticmethod\n def getAllMeshes():\n \"\"\"\n return: list of all meshes / geometry\n \"\"\"\n shapesList = pm.ls(type='mesh', ni=1)\n transformList = list(set(pm.listRelatives(shapesList, parent=True)))\n transformList.sort()\n return transformList\n\n @staticmethod\n def getAllShapes(transforms):\n \"\"\"\n @param transforms: List. \n return : dictionnary with key:mesh / values: shapes\n \"\"\"\n shapes_dic = {}\n for transform in transforms:\n all_shapes = pm.PyNode(transform).getShapes(ni=True)\n shapes_dic.update({str(transform): all_shapes})\n return shapes_dic\n\n def getSkinClusterbyShape(self, shapes):\n \"\"\"\n get skinCluster attached to the shape\n @param shapes: List\n return: List\n \"\"\"\n cluster_dic = {}\n for shape in shapes:\n try:\n incoming = mc.listConnections('{}.inMesh'.format(shape))[0]\n if pm.objectType(incoming) == 'skinCluster':\n cluster_dic.update({str(shape): incoming})\n else:\n skinCluster = self.getSkinCluster(shape)\n if skinCluster:\n if len(skinCluster) > 1:\n cluster_dic.update({str(shape): 'None'})\n else:\n cluster_dic.update({str(shape): skinCluster})\n else:\n cluster_dic.update({str(shape): 'None'})\n except TypeError:\n cluster_dic.update({str(shape): 'None'})\n return cluster_dic\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):\n <mask token>\n <mask token>\n <mask token>\n\n def widgetsAndLayouts(self):\n\n def addLine():\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n return line\n\n def addText(message, alignement=QtCore.Qt.AlignCenter, height=30,\n bold=False):\n myFont = QtGui.QFont()\n myFont.setBold(bold)\n text = QtWidgets.QLabel(message)\n text.setAlignment(alignement)\n text.setFixedHeight(height)\n text.setFont(myFont)\n return text\n self.vLayoutAndFunctions = [['treeWidget', [1, 1, 1, 1]]]\n self.vlayout = {}\n for layoutName, margins in self.vLayoutAndFunctions:\n self.vlayout[layoutName] = QtWidgets.QVBoxLayout()\n self.vlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.hLayoutAndFunctions = [['filterOptions', [1, 1, 1, 1]], [\n 'buttonsOptions', [1, 1, 1, 1]], ['searchBarWidget', [1, 1, 1, 1]]]\n self.hlayout = {}\n for layoutName, margins in self.hLayoutAndFunctions:\n self.hlayout[layoutName] = QtWidgets.QHBoxLayout()\n self.hlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.searchBar = QtWidgets.QLineEdit()\n self.searchBar.setPlaceholderText('Search...')\n self.searchBar.textEdited.connect(self.searchBarEdited)\n self.hlayout['searchBarWidget'].addWidget(self.searchBar)\n self.matchCaseChx = QtWidgets.QCheckBox()\n self.matchCaseChx.setChecked(False)\n self.matchCaseChx.setText('Match Case')\n self.matchCaseChx.stateChanged.connect(self.searchBarEdited)\n self.allFilter = QtWidgets.QRadioButton('All', self)\n self.allFilter.setChecked(True)\n self.allFilter.toggled.connect(self.refreshQtree)\n self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)\n self.skinClusterFilter.setChecked(True)\n self.skinClusterFilter.toggled.connect(self.refreshQtree)\n self.meshTreeWidget = QtWidgets.QTreeWidget()\n self.meshTreeWidget.setHeaderLabel('Cloth Tree View')\n self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.\n ExtendedSelection)\n self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)\n header = QtWidgets.QTreeWidgetItem(['Geometries'])\n self.meshTreeWidget.setHeaderItem(header)\n self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)\n self.meshTreeWidget.itemSelectionChanged.connect(self.\n singleClickedAction)\n self.refreshQtree()\n\n def create_Button(self):\n \"\"\" Create the buttons \"\"\"\n self.buttonAndFunctions = [['Show Selected', self.showSelected, 0,\n pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'],\n '', 30], ['Refresh', self.refreshQtree, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Clear', self.meshTreeWidget.clear, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'],\n '', self.hlayout['buttonsOptions'], '', 30], ['Close All', self\n .closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout[\n 'buttonsOptions'], '', 30]]\n self.buttons = {}\n for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width in self.buttonAndFunctions:\n self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)\n self.buttons[buttonName].clicked.connect(buttonFunction)\n try:\n layout.addWidget(self.buttons[buttonName], int(layout_coord\n .split(',')[0]), int(layout_coord.split(',')[1]))\n except ValueError:\n layout.addWidget(self.buttons[buttonName])\n _optionsExpandAll = self.buttons['Expand All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsExpandAll['Shapes'].triggered.connect(lambda : self.\n expandTree('shape'))\n _optionsExpandAll['Skin Clusters'].triggered.connect(lambda : self.\n expandTree('skin cluster'))\n _optionsCloseAll = self.buttons['Close All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsCloseAll['Shapes'].triggered.connect(lambda : self.\n closeTree('shape'))\n _optionsCloseAll['Skin Clusters'].triggered.connect(lambda : self.\n closeTree('skin cluster'))\n\n def buildMainLayout(self):\n self.main_layout.addLayout(self.hlayout['filterOptions'])\n self.hlayout['filterOptions'].addWidget(self.allFilter)\n self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)\n self.hlayout['filterOptions'].addStretch()\n self.main_layout.addLayout(self.hlayout['searchBarWidget'])\n self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)\n self.main_layout.addLayout(self.hlayout['buttonsOptions'])\n self.main_layout.addLayout(self.vlayout['treeWidget'])\n\n def refreshQtree(self):\n self.meshTreeWidget.clear()\n all_status = self.allFilter.isChecked()\n if all_status:\n _filter = 'all'\n else:\n _filter = 'skinClusters'\n self.filterList = self.filterMeshes(filter=_filter)\n self.populateQTree(self.filterList)\n\n def getSearchBarText(self):\n searchBarText = self.searchBar.text()\n return searchBarText\n\n def searchBarEdited(self):\n matchCase = bool(self.matchCaseChx.checkState())\n query = self.searchBar.text()\n if matchCase:\n query_words = str(query).split(' ')\n else:\n query_words = str(query).lower().split(' ')\n query_words = filter(None, query_words)\n scoreList = {}\n for item in [str(x) for x in self.filterList]:\n score = 0\n for query_word in query_words:\n if matchCase:\n if query_word in item:\n score += 1\n elif query_word in item.lower():\n score += 1\n scoreList[item] = score\n sorted_matches = [i for i in scoreList.items() if i[1] >= len(\n query_words)]\n sorted_matches = sorted(sorted_matches, key=lambda x: x[0])\n sorted_matches_string = [name for name, index in sorted_matches]\n self.meshTreeWidget.clear()\n self.populateQTree(sorted_matches_string)\n <mask token>\n\n def closeTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(False) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(False) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(False) for sclus in self.QTClusters]\n\n def expandTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(True) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(True) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(True) for sclus in self.QTClusters]\n\n def showSelected(self):\n selection = pm.selected()\n selection.sort()\n self.meshTreeWidget.clear()\n self.populateQTree(selection)\n\n def singleClickedAction(self):\n mySelection = self.meshTreeWidget.selectedItems()\n str_selected = [x.text(0) for x in mySelection]\n pm.select(str_selected, r=1)\n\n def filterMeshes(self, filter='all'):\n \"\"\"\n filter:\n all : all meshes\n skinClusters : all meshes with skinClusters\n None\n \"\"\"\n if filter == 'all':\n return self.getAllMeshes()\n elif filter == 'skinClusters':\n clusters = pm.ls(type='skinCluster')\n meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for\n c in clusters], []))\n meshes = set([x.getParent() for x in meshesShapes if pm.\n objectType(x) == 'mesh'])\n return meshes\n elif filter == 'None':\n return None\n\n @staticmethod\n def test():\n print('test')\n\n @staticmethod\n def getSkinCluster(_transform):\n \"\"\"\n Find a SkinCluster from a transform\n Returns the skinCluster node\n \"\"\"\n result = []\n if not pm.objExists(_transform):\n return result\n validList = mel.eval('findRelatedDeformer(\"' + str(_transform) + '\")')\n if validList is None:\n return result\n for elem in validList:\n if pm.nodeType(elem) == 'skinCluster':\n result.append(elem)\n pm.select(result, r=True)\n result_node = pm.selected()\n if len(result_node) > 1:\n return result_node\n else:\n try:\n return result_node[0]\n except IndexError:\n return False\n\n @staticmethod\n def getBindJointsFromCluster(clusterList):\n \"\"\"\n Find all joints attached to a skinCluster\n @param clusterList: List. list of skin Clusters\n return dic with key: skin Cluster. Value: list of joint \n \"\"\"\n bindJoints_dic = {}\n for cluster in clusterList:\n all_binds_jnts = [x for x in pm.listConnections(str(cluster) +\n '.matrix[*]', s=1)]\n bindJoints_dic.update({str(cluster): all_binds_jnts})\n return bindJoints_dic\n\n @staticmethod\n def getAllMeshes():\n \"\"\"\n return: list of all meshes / geometry\n \"\"\"\n shapesList = pm.ls(type='mesh', ni=1)\n transformList = list(set(pm.listRelatives(shapesList, parent=True)))\n transformList.sort()\n return transformList\n\n @staticmethod\n def getAllShapes(transforms):\n \"\"\"\n @param transforms: List. \n return : dictionnary with key:mesh / values: shapes\n \"\"\"\n shapes_dic = {}\n for transform in transforms:\n all_shapes = pm.PyNode(transform).getShapes(ni=True)\n shapes_dic.update({str(transform): all_shapes})\n return shapes_dic\n\n def getSkinClusterbyShape(self, shapes):\n \"\"\"\n get skinCluster attached to the shape\n @param shapes: List\n return: List\n \"\"\"\n cluster_dic = {}\n for shape in shapes:\n try:\n incoming = mc.listConnections('{}.inMesh'.format(shape))[0]\n if pm.objectType(incoming) == 'skinCluster':\n cluster_dic.update({str(shape): incoming})\n else:\n skinCluster = self.getSkinCluster(shape)\n if skinCluster:\n if len(skinCluster) > 1:\n cluster_dic.update({str(shape): 'None'})\n else:\n cluster_dic.update({str(shape): skinCluster})\n else:\n cluster_dic.update({str(shape): 'None'})\n except TypeError:\n cluster_dic.update({str(shape): 'None'})\n return cluster_dic\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):\n __dialog = None\n\n @classmethod\n def show_dialog(cls):\n if cls.__dialog is None:\n cls.__dialog = cls()\n else:\n cls.__dialog.raise_()\n cls.__dialog.show()\n\n def __init__(self, parent=None):\n super(MultiSkin_UI, self).__init__(parent=parent)\n self.meshTreeWidget = QtWidgets.QTreeWidget()\n self.setObjectName('multi skin ui')\n self.starting_height = 500\n self.starting_width = 390\n self.setWindowTitle('adbrower - Multi Skin Tool' + ' v' + str(VERSION))\n self.setWindowFlags(QtCore.Qt.Tool)\n self.setMinimumWidth(self.starting_width)\n self.resize(self.starting_width, self.starting_height)\n self.mainBox = QtWidgets.QVBoxLayout()\n self.mainBox.setContentsMargins(0, 0, 0, 0)\n self.scroll_layout = QtWidgets.QScrollArea()\n self.mainBox.addWidget(self.scroll_layout)\n self.setLayout(self.mainBox)\n self.scroll_layout.setContentsMargins(0, 0, 0, 0)\n self.scroll_layout.setWidgetResizable(True)\n self.scroll_layout.setFrameStyle(QtWidgets.QFrame.NoFrame)\n self.scroll_layout.setFrameShadow(QtWidgets.QFrame.Plain)\n self.scroll_widget = QtWidgets.QWidget()\n self.scroll_layout.setWidget(self.scroll_widget)\n self.main_layout = QtWidgets.QVBoxLayout()\n self.main_layout.setContentsMargins(*([5] * 4))\n self.main_layout.setSpacing(2)\n self.setLayout(self.main_layout)\n self.scroll_widget.setLayout(self.main_layout)\n self.widgetsAndLayouts()\n self.create_Button()\n self.buildMainLayout()\n\n def widgetsAndLayouts(self):\n\n def addLine():\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n return line\n\n def addText(message, alignement=QtCore.Qt.AlignCenter, height=30,\n bold=False):\n myFont = QtGui.QFont()\n myFont.setBold(bold)\n text = QtWidgets.QLabel(message)\n text.setAlignment(alignement)\n text.setFixedHeight(height)\n text.setFont(myFont)\n return text\n self.vLayoutAndFunctions = [['treeWidget', [1, 1, 1, 1]]]\n self.vlayout = {}\n for layoutName, margins in self.vLayoutAndFunctions:\n self.vlayout[layoutName] = QtWidgets.QVBoxLayout()\n self.vlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.hLayoutAndFunctions = [['filterOptions', [1, 1, 1, 1]], [\n 'buttonsOptions', [1, 1, 1, 1]], ['searchBarWidget', [1, 1, 1, 1]]]\n self.hlayout = {}\n for layoutName, margins in self.hLayoutAndFunctions:\n self.hlayout[layoutName] = QtWidgets.QHBoxLayout()\n self.hlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.searchBar = QtWidgets.QLineEdit()\n self.searchBar.setPlaceholderText('Search...')\n self.searchBar.textEdited.connect(self.searchBarEdited)\n self.hlayout['searchBarWidget'].addWidget(self.searchBar)\n self.matchCaseChx = QtWidgets.QCheckBox()\n self.matchCaseChx.setChecked(False)\n self.matchCaseChx.setText('Match Case')\n self.matchCaseChx.stateChanged.connect(self.searchBarEdited)\n self.allFilter = QtWidgets.QRadioButton('All', self)\n self.allFilter.setChecked(True)\n self.allFilter.toggled.connect(self.refreshQtree)\n self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)\n self.skinClusterFilter.setChecked(True)\n self.skinClusterFilter.toggled.connect(self.refreshQtree)\n self.meshTreeWidget = QtWidgets.QTreeWidget()\n self.meshTreeWidget.setHeaderLabel('Cloth Tree View')\n self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.\n ExtendedSelection)\n self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)\n header = QtWidgets.QTreeWidgetItem(['Geometries'])\n self.meshTreeWidget.setHeaderItem(header)\n self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)\n self.meshTreeWidget.itemSelectionChanged.connect(self.\n singleClickedAction)\n self.refreshQtree()\n\n def create_Button(self):\n \"\"\" Create the buttons \"\"\"\n self.buttonAndFunctions = [['Show Selected', self.showSelected, 0,\n pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'],\n '', 30], ['Refresh', self.refreshQtree, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Clear', self.meshTreeWidget.clear, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'],\n '', self.hlayout['buttonsOptions'], '', 30], ['Close All', self\n .closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout[\n 'buttonsOptions'], '', 30]]\n self.buttons = {}\n for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width in self.buttonAndFunctions:\n self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)\n self.buttons[buttonName].clicked.connect(buttonFunction)\n try:\n layout.addWidget(self.buttons[buttonName], int(layout_coord\n .split(',')[0]), int(layout_coord.split(',')[1]))\n except ValueError:\n layout.addWidget(self.buttons[buttonName])\n _optionsExpandAll = self.buttons['Expand All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsExpandAll['Shapes'].triggered.connect(lambda : self.\n expandTree('shape'))\n _optionsExpandAll['Skin Clusters'].triggered.connect(lambda : self.\n expandTree('skin cluster'))\n _optionsCloseAll = self.buttons['Close All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsCloseAll['Shapes'].triggered.connect(lambda : self.\n closeTree('shape'))\n _optionsCloseAll['Skin Clusters'].triggered.connect(lambda : self.\n closeTree('skin cluster'))\n\n def buildMainLayout(self):\n self.main_layout.addLayout(self.hlayout['filterOptions'])\n self.hlayout['filterOptions'].addWidget(self.allFilter)\n self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)\n self.hlayout['filterOptions'].addStretch()\n self.main_layout.addLayout(self.hlayout['searchBarWidget'])\n self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)\n self.main_layout.addLayout(self.hlayout['buttonsOptions'])\n self.main_layout.addLayout(self.vlayout['treeWidget'])\n\n def refreshQtree(self):\n self.meshTreeWidget.clear()\n all_status = self.allFilter.isChecked()\n if all_status:\n _filter = 'all'\n else:\n _filter = 'skinClusters'\n self.filterList = self.filterMeshes(filter=_filter)\n self.populateQTree(self.filterList)\n\n def getSearchBarText(self):\n searchBarText = self.searchBar.text()\n return searchBarText\n\n def searchBarEdited(self):\n matchCase = bool(self.matchCaseChx.checkState())\n query = self.searchBar.text()\n if matchCase:\n query_words = str(query).split(' ')\n else:\n query_words = str(query).lower().split(' ')\n query_words = filter(None, query_words)\n scoreList = {}\n for item in [str(x) for x in self.filterList]:\n score = 0\n for query_word in query_words:\n if matchCase:\n if query_word in item:\n score += 1\n elif query_word in item.lower():\n score += 1\n scoreList[item] = score\n sorted_matches = [i for i in scoreList.items() if i[1] >= len(\n query_words)]\n sorted_matches = sorted(sorted_matches, key=lambda x: x[0])\n sorted_matches_string = [name for name, index in sorted_matches]\n self.meshTreeWidget.clear()\n self.populateQTree(sorted_matches_string)\n\n def populateQTree(self, filterList):\n self.roots = [QtWidgets.QTreeWidgetItem(self.meshTreeWidget, [str(\n item)]) for item in filterList]\n [root.setIcon(0, QtGui.QIcon(':/out_mesh.png')) for root in self.roots]\n [root.setExpanded(True) for root in self.roots]\n self.QtShapes = []\n shape_dic = self.getAllShapes(self.getAllMeshes())\n QTroots_dic = {}\n for root in self.roots:\n try:\n QTroots_dic.update({root: shape_dic[root.text(0)]})\n except KeyError:\n pass\n for QTroot, shapesList in QTroots_dic.items():\n [QtWidgets.QTreeWidgetItem(QTroot, [str(shape)]) for shape in\n shapesList]\n child_count = QTroot.childCount()\n children = [QTroot.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(YELLOW))) for\n child in children]\n [child.setIcon(0, QtGui.QIcon(':/out_transform.png')) for child in\n children]\n [child.setExpanded(True) for child in children]\n [self.QtShapes.append(child) for child in children]\n self.QTClusters = []\n cluster_dic = self.getSkinClusterbyShape(flatList(shape_dic.values()))\n QTshape_dic = {}\n for shape in self.QtShapes:\n QTshape_dic.update({shape: cluster_dic[shape.text(0)]})\n for QTshape, clusterList in QTshape_dic.items():\n if clusterList == 'None':\n pass\n else:\n QtWidgets.QTreeWidgetItem(QTshape, [str(clusterList)])\n child_count = QTshape.childCount()\n children = [QTshape.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(GREEN))) for\n child in children]\n [child.setIcon(0, QtGui.QIcon(':/cluster.png')) for child in\n children]\n [self.QTClusters.append(child) for child in children]\n bindJoints_dic = self.getBindJointsFromCluster([x for x in\n cluster_dic.values() if x != 'None'])\n QTcluster_dic = {}\n for cluster in self.QTClusters:\n QTcluster_dic.update({cluster: bindJoints_dic[cluster.text(0)]})\n for QTCluster, jointList in QTcluster_dic.items():\n [QtWidgets.QTreeWidgetItem(QTCluster, [str(jnt)]) for jnt in\n jointList]\n child_count = QTCluster.childCount()\n children = [QTCluster.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(DARKRED))) for\n child in children]\n [child.setIcon(0, QtGui.QIcon(':/out_joint.png')) for child in\n children]\n\n def closeTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(False) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(False) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(False) for sclus in self.QTClusters]\n\n def expandTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(True) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(True) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(True) for sclus in self.QTClusters]\n\n def showSelected(self):\n selection = pm.selected()\n selection.sort()\n self.meshTreeWidget.clear()\n self.populateQTree(selection)\n\n def singleClickedAction(self):\n mySelection = self.meshTreeWidget.selectedItems()\n str_selected = [x.text(0) for x in mySelection]\n pm.select(str_selected, r=1)\n\n def filterMeshes(self, filter='all'):\n \"\"\"\n filter:\n all : all meshes\n skinClusters : all meshes with skinClusters\n None\n \"\"\"\n if filter == 'all':\n return self.getAllMeshes()\n elif filter == 'skinClusters':\n clusters = pm.ls(type='skinCluster')\n meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for\n c in clusters], []))\n meshes = set([x.getParent() for x in meshesShapes if pm.\n objectType(x) == 'mesh'])\n return meshes\n elif filter == 'None':\n return None\n\n @staticmethod\n def test():\n print('test')\n\n @staticmethod\n def getSkinCluster(_transform):\n \"\"\"\n Find a SkinCluster from a transform\n Returns the skinCluster node\n \"\"\"\n result = []\n if not pm.objExists(_transform):\n return result\n validList = mel.eval('findRelatedDeformer(\"' + str(_transform) + '\")')\n if validList is None:\n return result\n for elem in validList:\n if pm.nodeType(elem) == 'skinCluster':\n result.append(elem)\n pm.select(result, r=True)\n result_node = pm.selected()\n if len(result_node) > 1:\n return result_node\n else:\n try:\n return result_node[0]\n except IndexError:\n return False\n\n @staticmethod\n def getBindJointsFromCluster(clusterList):\n \"\"\"\n Find all joints attached to a skinCluster\n @param clusterList: List. list of skin Clusters\n return dic with key: skin Cluster. Value: list of joint \n \"\"\"\n bindJoints_dic = {}\n for cluster in clusterList:\n all_binds_jnts = [x for x in pm.listConnections(str(cluster) +\n '.matrix[*]', s=1)]\n bindJoints_dic.update({str(cluster): all_binds_jnts})\n return bindJoints_dic\n\n @staticmethod\n def getAllMeshes():\n \"\"\"\n return: list of all meshes / geometry\n \"\"\"\n shapesList = pm.ls(type='mesh', ni=1)\n transformList = list(set(pm.listRelatives(shapesList, parent=True)))\n transformList.sort()\n return transformList\n\n @staticmethod\n def getAllShapes(transforms):\n \"\"\"\n @param transforms: List. \n return : dictionnary with key:mesh / values: shapes\n \"\"\"\n shapes_dic = {}\n for transform in transforms:\n all_shapes = pm.PyNode(transform).getShapes(ni=True)\n shapes_dic.update({str(transform): all_shapes})\n return shapes_dic\n\n def getSkinClusterbyShape(self, shapes):\n \"\"\"\n get skinCluster attached to the shape\n @param shapes: List\n return: List\n \"\"\"\n cluster_dic = {}\n for shape in shapes:\n try:\n incoming = mc.listConnections('{}.inMesh'.format(shape))[0]\n if pm.objectType(incoming) == 'skinCluster':\n cluster_dic.update({str(shape): incoming})\n else:\n skinCluster = self.getSkinCluster(shape)\n if skinCluster:\n if len(skinCluster) > 1:\n cluster_dic.update({str(shape): 'None'})\n else:\n cluster_dic.update({str(shape): skinCluster})\n else:\n cluster_dic.update({str(shape): 'None'})\n except TypeError:\n cluster_dic.update({str(shape): 'None'})\n return cluster_dic\n\n\ndef showUI(dialog=False):\n if dialog:\n MultiSkin_UI.show_dialog()\n else:\n global tools_cw_ui\n try:\n tools_cw_ui.deleteLater()\n except:\n pass\n tools_cw_ui = MultiSkin_UI()\n tools_cw_ui.show()\n",
"step-4": "<mask token>\n\n\ndef undo(func):\n \"\"\" \n Puts the wrapped `func` into a single Maya Undo action, then\n undoes it when the function enters the finally: block\n from schworer Github\n \"\"\"\n\n @wraps(func)\n def _undofunc(*args, **kwargs):\n try:\n mc.undoInfo(ock=True)\n return func(*args, **kwargs)\n finally:\n mc.undoInfo(cck=True)\n return _undofunc\n\n\n<mask token>\n\n\nclass MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):\n __dialog = None\n\n @classmethod\n def show_dialog(cls):\n if cls.__dialog is None:\n cls.__dialog = cls()\n else:\n cls.__dialog.raise_()\n cls.__dialog.show()\n\n def __init__(self, parent=None):\n super(MultiSkin_UI, self).__init__(parent=parent)\n self.meshTreeWidget = QtWidgets.QTreeWidget()\n self.setObjectName('multi skin ui')\n self.starting_height = 500\n self.starting_width = 390\n self.setWindowTitle('adbrower - Multi Skin Tool' + ' v' + str(VERSION))\n self.setWindowFlags(QtCore.Qt.Tool)\n self.setMinimumWidth(self.starting_width)\n self.resize(self.starting_width, self.starting_height)\n self.mainBox = QtWidgets.QVBoxLayout()\n self.mainBox.setContentsMargins(0, 0, 0, 0)\n self.scroll_layout = QtWidgets.QScrollArea()\n self.mainBox.addWidget(self.scroll_layout)\n self.setLayout(self.mainBox)\n self.scroll_layout.setContentsMargins(0, 0, 0, 0)\n self.scroll_layout.setWidgetResizable(True)\n self.scroll_layout.setFrameStyle(QtWidgets.QFrame.NoFrame)\n self.scroll_layout.setFrameShadow(QtWidgets.QFrame.Plain)\n self.scroll_widget = QtWidgets.QWidget()\n self.scroll_layout.setWidget(self.scroll_widget)\n self.main_layout = QtWidgets.QVBoxLayout()\n self.main_layout.setContentsMargins(*([5] * 4))\n self.main_layout.setSpacing(2)\n self.setLayout(self.main_layout)\n self.scroll_widget.setLayout(self.main_layout)\n self.widgetsAndLayouts()\n self.create_Button()\n self.buildMainLayout()\n\n def widgetsAndLayouts(self):\n\n def addLine():\n line = QtWidgets.QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n return line\n\n def addText(message, alignement=QtCore.Qt.AlignCenter, height=30,\n bold=False):\n myFont = QtGui.QFont()\n myFont.setBold(bold)\n text = QtWidgets.QLabel(message)\n text.setAlignment(alignement)\n text.setFixedHeight(height)\n text.setFont(myFont)\n return text\n self.vLayoutAndFunctions = [['treeWidget', [1, 1, 1, 1]]]\n self.vlayout = {}\n for layoutName, margins in self.vLayoutAndFunctions:\n self.vlayout[layoutName] = QtWidgets.QVBoxLayout()\n self.vlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.hLayoutAndFunctions = [['filterOptions', [1, 1, 1, 1]], [\n 'buttonsOptions', [1, 1, 1, 1]], ['searchBarWidget', [1, 1, 1, 1]]]\n self.hlayout = {}\n for layoutName, margins in self.hLayoutAndFunctions:\n self.hlayout[layoutName] = QtWidgets.QHBoxLayout()\n self.hlayout[layoutName].setContentsMargins(margins[0], margins\n [1], margins[2], margins[3])\n self.searchBar = QtWidgets.QLineEdit()\n self.searchBar.setPlaceholderText('Search...')\n self.searchBar.textEdited.connect(self.searchBarEdited)\n self.hlayout['searchBarWidget'].addWidget(self.searchBar)\n self.matchCaseChx = QtWidgets.QCheckBox()\n self.matchCaseChx.setChecked(False)\n self.matchCaseChx.setText('Match Case')\n self.matchCaseChx.stateChanged.connect(self.searchBarEdited)\n self.allFilter = QtWidgets.QRadioButton('All', self)\n self.allFilter.setChecked(True)\n self.allFilter.toggled.connect(self.refreshQtree)\n self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)\n self.skinClusterFilter.setChecked(True)\n self.skinClusterFilter.toggled.connect(self.refreshQtree)\n self.meshTreeWidget = QtWidgets.QTreeWidget()\n self.meshTreeWidget.setHeaderLabel('Cloth Tree View')\n self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.\n ExtendedSelection)\n self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)\n header = QtWidgets.QTreeWidgetItem(['Geometries'])\n self.meshTreeWidget.setHeaderItem(header)\n self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)\n self.meshTreeWidget.itemSelectionChanged.connect(self.\n singleClickedAction)\n self.refreshQtree()\n\n def create_Button(self):\n \"\"\" Create the buttons \"\"\"\n self.buttonAndFunctions = [['Show Selected', self.showSelected, 0,\n pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'],\n '', 30], ['Refresh', self.refreshQtree, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Clear', self.meshTreeWidget.clear, 0, pyQtDic[\n 'colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'],\n '', self.hlayout['buttonsOptions'], '', 30], ['Close All', self\n .closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout[\n 'buttonsOptions'], '', 30]]\n self.buttons = {}\n for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width in self.buttonAndFunctions:\n self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)\n self.buttons[buttonName].clicked.connect(buttonFunction)\n try:\n layout.addWidget(self.buttons[buttonName], int(layout_coord\n .split(',')[0]), int(layout_coord.split(',')[1]))\n except ValueError:\n layout.addWidget(self.buttons[buttonName])\n _optionsExpandAll = self.buttons['Expand All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsExpandAll['Shapes'].triggered.connect(lambda : self.\n expandTree('shape'))\n _optionsExpandAll['Skin Clusters'].triggered.connect(lambda : self.\n expandTree('skin cluster'))\n _optionsCloseAll = self.buttons['Close All'].addButtonActions([\n 'Shapes', 'Skin Clusters'])\n _optionsCloseAll['Shapes'].triggered.connect(lambda : self.\n closeTree('shape'))\n _optionsCloseAll['Skin Clusters'].triggered.connect(lambda : self.\n closeTree('skin cluster'))\n\n def buildMainLayout(self):\n self.main_layout.addLayout(self.hlayout['filterOptions'])\n self.hlayout['filterOptions'].addWidget(self.allFilter)\n self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)\n self.hlayout['filterOptions'].addStretch()\n self.main_layout.addLayout(self.hlayout['searchBarWidget'])\n self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)\n self.main_layout.addLayout(self.hlayout['buttonsOptions'])\n self.main_layout.addLayout(self.vlayout['treeWidget'])\n\n def refreshQtree(self):\n self.meshTreeWidget.clear()\n all_status = self.allFilter.isChecked()\n if all_status:\n _filter = 'all'\n else:\n _filter = 'skinClusters'\n self.filterList = self.filterMeshes(filter=_filter)\n self.populateQTree(self.filterList)\n\n def getSearchBarText(self):\n searchBarText = self.searchBar.text()\n return searchBarText\n\n def searchBarEdited(self):\n matchCase = bool(self.matchCaseChx.checkState())\n query = self.searchBar.text()\n if matchCase:\n query_words = str(query).split(' ')\n else:\n query_words = str(query).lower().split(' ')\n query_words = filter(None, query_words)\n scoreList = {}\n for item in [str(x) for x in self.filterList]:\n score = 0\n for query_word in query_words:\n if matchCase:\n if query_word in item:\n score += 1\n elif query_word in item.lower():\n score += 1\n scoreList[item] = score\n sorted_matches = [i for i in scoreList.items() if i[1] >= len(\n query_words)]\n sorted_matches = sorted(sorted_matches, key=lambda x: x[0])\n sorted_matches_string = [name for name, index in sorted_matches]\n self.meshTreeWidget.clear()\n self.populateQTree(sorted_matches_string)\n\n def populateQTree(self, filterList):\n self.roots = [QtWidgets.QTreeWidgetItem(self.meshTreeWidget, [str(\n item)]) for item in filterList]\n [root.setIcon(0, QtGui.QIcon(':/out_mesh.png')) for root in self.roots]\n [root.setExpanded(True) for root in self.roots]\n self.QtShapes = []\n shape_dic = self.getAllShapes(self.getAllMeshes())\n QTroots_dic = {}\n for root in self.roots:\n try:\n QTroots_dic.update({root: shape_dic[root.text(0)]})\n except KeyError:\n pass\n for QTroot, shapesList in QTroots_dic.items():\n [QtWidgets.QTreeWidgetItem(QTroot, [str(shape)]) for shape in\n shapesList]\n child_count = QTroot.childCount()\n children = [QTroot.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(YELLOW))) for\n child in children]\n [child.setIcon(0, QtGui.QIcon(':/out_transform.png')) for child in\n children]\n [child.setExpanded(True) for child in children]\n [self.QtShapes.append(child) for child in children]\n self.QTClusters = []\n cluster_dic = self.getSkinClusterbyShape(flatList(shape_dic.values()))\n QTshape_dic = {}\n for shape in self.QtShapes:\n QTshape_dic.update({shape: cluster_dic[shape.text(0)]})\n for QTshape, clusterList in QTshape_dic.items():\n if clusterList == 'None':\n pass\n else:\n QtWidgets.QTreeWidgetItem(QTshape, [str(clusterList)])\n child_count = QTshape.childCount()\n children = [QTshape.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(GREEN))) for\n child in children]\n [child.setIcon(0, QtGui.QIcon(':/cluster.png')) for child in\n children]\n [self.QTClusters.append(child) for child in children]\n bindJoints_dic = self.getBindJointsFromCluster([x for x in\n cluster_dic.values() if x != 'None'])\n QTcluster_dic = {}\n for cluster in self.QTClusters:\n QTcluster_dic.update({cluster: bindJoints_dic[cluster.text(0)]})\n for QTCluster, jointList in QTcluster_dic.items():\n [QtWidgets.QTreeWidgetItem(QTCluster, [str(jnt)]) for jnt in\n jointList]\n child_count = QTCluster.childCount()\n children = [QTCluster.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(DARKRED))) for\n child in children]\n [child.setIcon(0, QtGui.QIcon(':/out_joint.png')) for child in\n children]\n\n def closeTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(False) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(False) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(False) for sclus in self.QTClusters]\n\n def expandTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(True) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(True) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(True) for sclus in self.QTClusters]\n\n def showSelected(self):\n selection = pm.selected()\n selection.sort()\n self.meshTreeWidget.clear()\n self.populateQTree(selection)\n\n def singleClickedAction(self):\n mySelection = self.meshTreeWidget.selectedItems()\n str_selected = [x.text(0) for x in mySelection]\n pm.select(str_selected, r=1)\n\n def filterMeshes(self, filter='all'):\n \"\"\"\n filter:\n all : all meshes\n skinClusters : all meshes with skinClusters\n None\n \"\"\"\n if filter == 'all':\n return self.getAllMeshes()\n elif filter == 'skinClusters':\n clusters = pm.ls(type='skinCluster')\n meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for\n c in clusters], []))\n meshes = set([x.getParent() for x in meshesShapes if pm.\n objectType(x) == 'mesh'])\n return meshes\n elif filter == 'None':\n return None\n\n @staticmethod\n def test():\n print('test')\n\n @staticmethod\n def getSkinCluster(_transform):\n \"\"\"\n Find a SkinCluster from a transform\n Returns the skinCluster node\n \"\"\"\n result = []\n if not pm.objExists(_transform):\n return result\n validList = mel.eval('findRelatedDeformer(\"' + str(_transform) + '\")')\n if validList is None:\n return result\n for elem in validList:\n if pm.nodeType(elem) == 'skinCluster':\n result.append(elem)\n pm.select(result, r=True)\n result_node = pm.selected()\n if len(result_node) > 1:\n return result_node\n else:\n try:\n return result_node[0]\n except IndexError:\n return False\n\n @staticmethod\n def getBindJointsFromCluster(clusterList):\n \"\"\"\n Find all joints attached to a skinCluster\n @param clusterList: List. list of skin Clusters\n return dic with key: skin Cluster. Value: list of joint \n \"\"\"\n bindJoints_dic = {}\n for cluster in clusterList:\n all_binds_jnts = [x for x in pm.listConnections(str(cluster) +\n '.matrix[*]', s=1)]\n bindJoints_dic.update({str(cluster): all_binds_jnts})\n return bindJoints_dic\n\n @staticmethod\n def getAllMeshes():\n \"\"\"\n return: list of all meshes / geometry\n \"\"\"\n shapesList = pm.ls(type='mesh', ni=1)\n transformList = list(set(pm.listRelatives(shapesList, parent=True)))\n transformList.sort()\n return transformList\n\n @staticmethod\n def getAllShapes(transforms):\n \"\"\"\n @param transforms: List. \n return : dictionnary with key:mesh / values: shapes\n \"\"\"\n shapes_dic = {}\n for transform in transforms:\n all_shapes = pm.PyNode(transform).getShapes(ni=True)\n shapes_dic.update({str(transform): all_shapes})\n return shapes_dic\n\n def getSkinClusterbyShape(self, shapes):\n \"\"\"\n get skinCluster attached to the shape\n @param shapes: List\n return: List\n \"\"\"\n cluster_dic = {}\n for shape in shapes:\n try:\n incoming = mc.listConnections('{}.inMesh'.format(shape))[0]\n if pm.objectType(incoming) == 'skinCluster':\n cluster_dic.update({str(shape): incoming})\n else:\n skinCluster = self.getSkinCluster(shape)\n if skinCluster:\n if len(skinCluster) > 1:\n cluster_dic.update({str(shape): 'None'})\n else:\n cluster_dic.update({str(shape): skinCluster})\n else:\n cluster_dic.update({str(shape): 'None'})\n except TypeError:\n cluster_dic.update({str(shape): 'None'})\n return cluster_dic\n\n\ndef showUI(dialog=False):\n if dialog:\n MultiSkin_UI.show_dialog()\n else:\n global tools_cw_ui\n try:\n tools_cw_ui.deleteLater()\n except:\n pass\n tools_cw_ui = MultiSkin_UI()\n tools_cw_ui.show()\n",
"step-5": "from functools import wraps\n\nimport maya.cmds as mc\nimport maya.mel as mel\nimport pymel.core as pm\nfrom PySide2 import QtCore, QtGui, QtWidgets\n\nimport adb_core.Class__multi_skin as ms\nimport adbrower\nfrom CollDict import pysideColorDic as pyQtDic\nfrom maya.app.general.mayaMixin import MayaQWidgetDockableMixin\nimport adb_tools.adb_pyQt.Class__rightClickCustom as adbRC\nfrom maya_script import Adbrower\n\nadb = adbrower.Adbrower()\n\nVERSION = 1.0\n\nPATH_WINDOW = Adbrower.PATH_WINDOW_INIT + 'AppData/Roaming'\nPATH_LINUX = Adbrower.PATH_LINUX_INIT\nFOLDER_NAME = Adbrower.FOLDER_NAME_INIT\nICONS_FOLDER = Adbrower.ICONS_FOLDER_INIT\n\nYELLOW = '#ffe100'\nORANGE = '#fd651d'\nGREEN = '#597A59'\nDARKRED = '#745a54'\n\ndef undo(func):\n ''' \n Puts the wrapped `func` into a single Maya Undo action, then\n undoes it when the function enters the finally: block\n from schworer Github\n '''\n @wraps(func)\n def _undofunc(*args, **kwargs):\n try:\n # start an undo chunk\n mc.undoInfo(ock=True)\n return func(*args, **kwargs)\n finally:\n # after calling the func, end the undo chunk\n mc.undoInfo(cck=True)\n return _undofunc\n\n\ndef flatList(ori_list=''):\n \"\"\"\n Flatten a list\n \"\"\"\n flat_list = []\n for item in ori_list:\n if isinstance(item, list):\n for sub_item in item:\n flat_list.append(sub_item)\n else:\n flat_list.append(item)\n return flat_list\n\n#-----------------------------------\n# CLASS\n#----------------------------------- \n\n\nclass MultiSkin_UI(MayaQWidgetDockableMixin, QtWidgets.QDialog):\n __dialog = None\n \n @classmethod\n def show_dialog(cls):\n if cls.__dialog is None:\n cls.__dialog = cls()\n else:\n cls.__dialog.raise_() \n cls.__dialog.show()\n \n def __init__(self,parent=None): \n super(MultiSkin_UI, self).__init__(parent=parent)\n \n self.meshTreeWidget=QtWidgets.QTreeWidget()\n \n self.setObjectName('multi skin ui')\n self.starting_height = 500\n self.starting_width = 390\n self.setWindowTitle('adbrower - Multi Skin Tool' + ' v' + str(VERSION))\n self.setWindowFlags(QtCore.Qt.Tool)\n self.setMinimumWidth(self.starting_width)\n self.resize(self.starting_width, self.starting_height)\n \n # -----------------------------\n # --- Create scrollArea\n\n self.mainBox = QtWidgets.QVBoxLayout()\n self.mainBox.setContentsMargins(0, 0, 0, 0)\n self.scroll_layout = QtWidgets.QScrollArea()\n\n self.mainBox.addWidget(self.scroll_layout)\n self.setLayout(self.mainBox)\n self.scroll_layout.setContentsMargins(0, 0, 0, 0)\n\n self.scroll_layout.setWidgetResizable(True)\n self.scroll_layout.setFrameStyle(QtWidgets.QFrame.NoFrame)\n self.scroll_layout.setFrameShadow(QtWidgets.QFrame.Plain)\n\n self.scroll_widget = QtWidgets.QWidget()\n self.scroll_layout.setWidget(self.scroll_widget) \n \n # -----------------------------\n # --- Main Layout\n\n self.main_layout = QtWidgets.QVBoxLayout()\n self.main_layout.setContentsMargins(*[5] * 4)\n self.main_layout.setSpacing(2)\n self.setLayout(self.main_layout)\n\n self.scroll_widget.setLayout(self.main_layout)\n self.widgetsAndLayouts()\n self.create_Button()\n self.buildMainLayout()\n\n\n def widgetsAndLayouts(self):\n\n # --------- Predefine widgets\n\n def addLine():\n line = QtWidgets. QFrame()\n line.setFrameShape(QtWidgets.QFrame.HLine)\n return line\n\n def addText(message, alignement=QtCore.Qt.AlignCenter, height=30, bold=False):\n myFont = QtGui.QFont()\n myFont.setBold(bold)\n text = QtWidgets.QLabel(message)\n text.setAlignment(alignement)\n text.setFixedHeight(height)\n text.setFont(myFont)\n return text \n \n # ------------------------------\n #--------- Layouts\n\n self.vLayoutAndFunctions = [\n # name, margins\n ['treeWidget', [1, 1, 1, 1]],\n ]\n self.vlayout = {}\n for layoutName, margins, in self.vLayoutAndFunctions:\n self.vlayout[layoutName] = QtWidgets.QVBoxLayout()\n self.vlayout[layoutName].setContentsMargins(margins[0], margins[1], margins[2], margins[3],) \n \n self.hLayoutAndFunctions = [\n # name, margins\n ['filterOptions', [1, 1, 1, 1]],\n ['buttonsOptions', [1, 1, 1, 1]],\n ['searchBarWidget', [1, 1, 1, 1]],\n ]\n self.hlayout = {}\n for layoutName, margins, in self.hLayoutAndFunctions:\n self.hlayout[layoutName] = QtWidgets.QHBoxLayout()\n self.hlayout[layoutName].setContentsMargins(margins[0], margins[1], margins[2], margins[3],) \n \n # ------------------------------\n # --------- QLINE EDIT WIDGET\n\n self.searchBar = QtWidgets.QLineEdit()\n self.searchBar.setPlaceholderText('Search...')\n self.searchBar.textEdited.connect(self.searchBarEdited)\n self.hlayout['searchBarWidget'].addWidget(self.searchBar) \n \n # ------------------------------\n # --------- CHECKBOX WIDGET\n \n self.matchCaseChx = QtWidgets.QCheckBox()\n self.matchCaseChx.setChecked(False)\n self.matchCaseChx.setText('Match Case')\n self.matchCaseChx.stateChanged.connect(self.searchBarEdited)\n \n # ------------------------------\n # --------- RADIO BUTTON WIDGET\n \n self.allFilter = QtWidgets.QRadioButton('All', self)\n self.allFilter.setChecked(True)\n self.allFilter.toggled.connect(self.refreshQtree)\n\n self.skinClusterFilter = QtWidgets.QRadioButton('Skin Clusters', self)\n self.skinClusterFilter.setChecked(True)\n self.skinClusterFilter.toggled.connect(self.refreshQtree)\n \n # ------------------------------\n # --------- TREE LIST WIDGET\n\n self.meshTreeWidget=QtWidgets.QTreeWidget()\n\n self.meshTreeWidget.setHeaderLabel('Cloth Tree View')\n self.meshTreeWidget.setSelectionMode(self.meshTreeWidget.ExtendedSelection)\n \n self.vlayout['treeWidget'].addWidget(self.meshTreeWidget)\n header = QtWidgets.QTreeWidgetItem([\"Geometries\"])\n self.meshTreeWidget.setHeaderItem(header)\n \n self.meshTreeWidget.itemClicked.connect(self.singleClickedAction)\n self.meshTreeWidget.itemSelectionChanged .connect(self.singleClickedAction)\n \n self.refreshQtree()\n \n def create_Button(self):\n \"\"\" Create the buttons \"\"\"\n self.buttonAndFunctions = [\n # name, function , group number, labelColor, backgroundColor, layout, layout_coordinate width\n ['Show Selected', self.showSelected, 0, pyQtDic['colorLightGrey'], '', self.hlayout['searchBarWidget'], '', 30],\n ['Refresh', self.refreshQtree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n ['Clear', self.meshTreeWidget.clear, 0, pyQtDic['colorLightGrey'], '', self.hlayout['filterOptions'], '', 30],\n \n ['Expand All', self.expandTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['buttonsOptions'], '', 30],\n ['Close All', self.closeTree, 0, pyQtDic['colorLightGrey'], '', self.hlayout['buttonsOptions'], '', 30],\n ]\n\n # Build Buttons\n self.buttons = {}\n for buttonName, buttonFunction, _, labColor, bgColor, layout, layout_coord, width, in self.buttonAndFunctions:\n self.buttons[buttonName] = adbRC.CustomQPushButton(buttonName)\n self.buttons[buttonName].clicked.connect(buttonFunction) \n try:\n layout.addWidget(self.buttons[buttonName], int(layout_coord.split(',')[0]), int(layout_coord.split(',')[1]))\n except ValueError:\n layout.addWidget(self.buttons[buttonName])\n\n # add Right Clicked Options\n _optionsExpandAll = self.buttons['Expand All'].addButtonActions(['Shapes', 'Skin Clusters'])\n _optionsExpandAll['Shapes'].triggered.connect(lambda:self.expandTree('shape'))\n _optionsExpandAll['Skin Clusters'].triggered.connect(lambda:self.expandTree('skin cluster'))\n \n _optionsCloseAll = self.buttons['Close All'].addButtonActions(['Shapes', 'Skin Clusters'])\n _optionsCloseAll['Shapes'].triggered.connect(lambda:self.closeTree('shape'))\n _optionsCloseAll['Skin Clusters'].triggered.connect(lambda:self.closeTree('skin cluster'))\n\n\n def buildMainLayout(self):\n # ------------------------------\n # --------- BUILD MAIN LAYOUT \n \n self.main_layout.addLayout(self.hlayout['filterOptions'])\n self.hlayout['filterOptions'].addWidget(self.allFilter)\n self.hlayout['filterOptions'].addWidget(self.skinClusterFilter)\n self.hlayout['filterOptions'].addStretch()\n \n self.main_layout.addLayout(self.hlayout['searchBarWidget'])\n self.hlayout['searchBarWidget'].addWidget(self.matchCaseChx)\n self.main_layout.addLayout(self.hlayout['buttonsOptions'])\n self.main_layout.addLayout(self.vlayout['treeWidget'])\n\n\n# ==================================\n# SLOTS\n# ================================== \n\n def refreshQtree(self):\n self.meshTreeWidget.clear()\n all_status = self.allFilter.isChecked()\n if all_status:\n _filter = 'all'\n else:\n _filter = 'skinClusters'\n self.filterList = self.filterMeshes(filter=_filter)\n self.populateQTree(self.filterList)\n \n def getSearchBarText(self):\n searchBarText = self.searchBar.text()\n return searchBarText\n \n def searchBarEdited(self):\n matchCase=bool(self.matchCaseChx.checkState())\n query = self.searchBar.text()\n if matchCase:\n query_words = str(query).split(\" \")\n else:\n query_words = str(query).lower().split(\" \")\n query_words = filter(None, query_words)\n scoreList = {}\n \n for item in [str(x) for x in self.filterList]:\n score = 0\n for query_word in query_words:\n if matchCase:\n if query_word in item:\n score += 1\n else:\n if query_word in item.lower():\n score += 1\n scoreList[item] = score\n\n # If user enter more than one words, get only result with a score at least equal to the number of words in the query\n sorted_matches = [i for i in scoreList.items() if i[1] >= len(query_words)]\n \n # Sort matches by score\n sorted_matches = sorted(sorted_matches, key=lambda x: x[0])\n sorted_matches_string = [name for name, index in sorted_matches]\n \n self.meshTreeWidget.clear()\n self.populateQTree(sorted_matches_string)\n \n\n def populateQTree(self, filterList):\n # Meshes\n # ----------------------\n \n self.roots = [QtWidgets.QTreeWidgetItem(self.meshTreeWidget, [str(item)]) for item in filterList]\n [root.setIcon(0, QtGui.QIcon(':/out_mesh.png')) for root in self.roots]\n [root.setExpanded(True) for root in self.roots]\n \n # Shapes\n # ----------------------\n self.QtShapes = []\n shape_dic = self.getAllShapes(self.getAllMeshes())\n QTroots_dic = {} # Keys are Qtree object\n for root in self.roots:\n try:\n QTroots_dic.update({root:shape_dic[root.text(0)]})\n except KeyError:\n pass\n \n # added the shapes under there mesh\n for QTroot, shapesList in QTroots_dic.items():\n [QtWidgets.QTreeWidgetItem(QTroot, [str(shape)]) for shape in shapesList]\n \n # changed their color\n child_count=QTroot.childCount()\n children=[QTroot.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(YELLOW))) for child in children] \n [child.setIcon(0, QtGui.QIcon(':/out_transform.png')) for child in children] \n [child.setExpanded(True) for child in children] \n [self.QtShapes.append(child) for child in children]\n \n # skinClusters\n # ----------------------\n self.QTClusters = [] \n \n cluster_dic = self.getSkinClusterbyShape(flatList(shape_dic.values()))\n QTshape_dic = {}\n for shape in self.QtShapes:\n QTshape_dic.update({shape:cluster_dic[shape.text(0)]})\n \n # added the skinCluster under there shape\n for QTshape, clusterList in QTshape_dic.items():\n if clusterList == 'None':\n pass\n else:\n QtWidgets.QTreeWidgetItem(QTshape, [str(clusterList)]) \n \n # changed their color\n child_count=QTshape.childCount()\n children=[QTshape.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(GREEN))) for child in children] \n [child.setIcon(0, QtGui.QIcon(':/cluster.png')) for child in children] \n [self.QTClusters.append(child) for child in children] \n \n # Joints\n # ---------------------- \n bindJoints_dic = self.getBindJointsFromCluster([x for x in cluster_dic.values() if x != 'None'])\n \n QTcluster_dic = {}\n for cluster in self.QTClusters:\n QTcluster_dic.update({cluster:bindJoints_dic[cluster.text(0)]})\n \n for QTCluster, jointList in QTcluster_dic.items():\n [QtWidgets.QTreeWidgetItem(QTCluster, [str(jnt)]) for jnt in jointList]\n \n # changed their color\n child_count=QTCluster.childCount()\n children=[QTCluster.child(index) for index in range(child_count)]\n [child.setForeground(0, QtGui.QBrush(QtGui.QColor(DARKRED))) for child in children] \n [child.setIcon(0, QtGui.QIcon(':/out_joint.png')) for child in children] \n \n def closeTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(False) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(False) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(False) for sclus in self.QTClusters]\n\n def expandTree(self, type='mesh'):\n if type == 'mesh':\n [root.setExpanded(True) for root in self.roots]\n elif type == 'shape':\n [shape.setExpanded(True) for shape in self.QtShapes]\n elif type == 'skin cluster':\n [sclus.setExpanded(True) for sclus in self.QTClusters]\n \n def showSelected(self):\n selection = pm.selected()\n selection.sort()\n self.meshTreeWidget.clear()\n self.populateQTree(selection)\n \n def singleClickedAction(self):\n mySelection = self.meshTreeWidget.selectedItems()\n str_selected = [x.text(0) for x in mySelection]\n pm.select(str_selected, r=1)\n \n def filterMeshes(self, filter = 'all'):\n \"\"\"\n filter:\n all : all meshes\n skinClusters : all meshes with skinClusters\n None\n \"\"\"\n if filter =='all':\n return self.getAllMeshes()\n\n elif filter == \"skinClusters\":\n clusters = pm.ls(type='skinCluster')\n meshesShapes = set(sum([pm.skinCluster(c, q=1, geometry=1) for c in clusters], []))\n meshes = set([x.getParent() for x in meshesShapes if pm.objectType(x) == 'mesh'])\n return meshes\n \n elif filter == 'None':\n return None\n \n \n# ==================================\n# STATIC METHOD\n# ================================== \n \n @staticmethod\n def test():\n print ('test')\n\n @staticmethod\n def getSkinCluster(_transform):\n \"\"\"\n Find a SkinCluster from a transform\n Returns the skinCluster node\n \"\"\"\n result = []\n if not (pm.objExists(_transform)):\n return result\n validList = mel.eval('findRelatedDeformer(\"' + str(_transform) + '\")')\n if validList is None:\n return result\n for elem in validList:\n if pm.nodeType(elem) == 'skinCluster':\n result.append(elem)\n pm.select(result, r=True)\n result_node = pm.selected()\n \n if len(result_node) > 1:\n return result_node\n else:\n try:\n return result_node[0]\n except IndexError:\n return False\n\n @staticmethod\n def getBindJointsFromCluster(clusterList):\n \"\"\"\n Find all joints attached to a skinCluster\n @param clusterList: List. list of skin Clusters\n return dic with key: skin Cluster. Value: list of joint \n \"\"\"\n bindJoints_dic = {}\n for cluster in clusterList:\n all_binds_jnts = [x for x in pm.listConnections(str(cluster) + '.matrix[*]', s=1)]\n bindJoints_dic.update({str(cluster):all_binds_jnts})\n return bindJoints_dic\n \n @staticmethod\n def getAllMeshes():\n \"\"\"\n return: list of all meshes / geometry\n \"\"\"\n shapesList = pm.ls(type=\"mesh\", ni=1)\n transformList = list(set(pm.listRelatives(shapesList ,parent=True)))\n transformList.sort()\n return transformList\n \n @staticmethod\n def getAllShapes(transforms):\n \"\"\"\n @param transforms: List. \n return : dictionnary with key:mesh / values: shapes\n \"\"\"\n shapes_dic = {}\n for transform in transforms:\n all_shapes = pm.PyNode(transform).getShapes(ni=True)\n shapes_dic.update({str(transform):all_shapes}) \n return shapes_dic\n \n \n def getSkinClusterbyShape(self, shapes):\n \"\"\"\n get skinCluster attached to the shape\n @param shapes: List\n return: List\n \"\"\"\n cluster_dic = {}\n for shape in shapes: \n try:\n incoming = mc.listConnections('{}.inMesh'.format(shape))[0]\n if pm.objectType(incoming) == 'skinCluster':\n cluster_dic.update({str(shape):incoming})\n else:\n skinCluster = self.getSkinCluster(shape)\n if skinCluster:\n if len(skinCluster) > 1:\n cluster_dic.update({str(shape):'None'})\n else:\n cluster_dic.update({str(shape):skinCluster}) \n else:\n cluster_dic.update({str(shape):'None'}) \n except TypeError:\n cluster_dic.update({str(shape):'None'})\n return cluster_dic\n\n \n \n# ===============================\n# BUILD WINDOW\n# ===============================\n\n\ndef showUI(dialog = False):\n if dialog:\n MultiSkin_UI.show_dialog()\n else: \n # Make sure the UI is deleted before recreating\n global tools_cw_ui\n try:\n tools_cw_ui.deleteLater()\n except:\n pass\n tools_cw_ui = MultiSkin_UI()\n tools_cw_ui.show()\n \n \n \n# showUI()\n",
"step-ids": [
17,
18,
23,
24,
28
]
}
|
[
17,
18,
23,
24,
28
] |
n = int(input())
a = [int(e) for e in input().split()]
ans = [0] * n
for i in range(n):
s = a[i]
ans[s - 1] = i + 1
print(*ans)
|
normal
|
{
"blob_id": "f74e2e6b59330bd63fee9192e74a72178abc1cab",
"index": 8195,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n):\n s = a[i]\n ans[s - 1] = i + 1\nprint(*ans)\n",
"step-3": "n = int(input())\na = [int(e) for e in input().split()]\nans = [0] * n\nfor i in range(n):\n s = a[i]\n ans[s - 1] = i + 1\nprint(*ans)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
yuki = list(map(int, input().split()))
S = input()
enemy = [S.count('G'), S.count('C'), S.count('P')]
ans = 0
for i in range(3):
ans += min(yuki[i], enemy[(i + 1) % 3]) * 3
yuki[i], enemy[(i + 1) % 3] = max(0, yuki[i] - enemy[(i + 1) % 3]), max(
0, enemy[(i + 1) % 3] - yuki[i])
for i in range(3):
ans += min(yuki[i], enemy[i])
print(ans)
|
normal
|
{
"blob_id": "ce98c13555c474de0a9cb12e99a97b2316312b00",
"index": 979,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(3):\n ans += min(yuki[i], enemy[(i + 1) % 3]) * 3\n yuki[i], enemy[(i + 1) % 3] = max(0, yuki[i] - enemy[(i + 1) % 3]), max(\n 0, enemy[(i + 1) % 3] - yuki[i])\nfor i in range(3):\n ans += min(yuki[i], enemy[i])\nprint(ans)\n",
"step-3": "yuki = list(map(int, input().split()))\nS = input()\nenemy = [S.count('G'), S.count('C'), S.count('P')]\nans = 0\nfor i in range(3):\n ans += min(yuki[i], enemy[(i + 1) % 3]) * 3\n yuki[i], enemy[(i + 1) % 3] = max(0, yuki[i] - enemy[(i + 1) % 3]), max(\n 0, enemy[(i + 1) % 3] - yuki[i])\nfor i in range(3):\n ans += min(yuki[i], enemy[i])\nprint(ans)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# The following code causes an infinite loop. Can you figure out what’s missing and how to fix it?
# def print_range(start, end):
# # Loop through the numbers from start to end
# n = start
# while n <= end:
# print(n)
# print_range(1, 5) # Should print 1 2 3 4 5 (each number on its own line)
# Solution
# Variable n's value is not being incremented. We need to increment the value.
# Here is the example
def print_range(start, end):
# Loop through the numbers from start to end
n = start
while n <= end:
print(n)
n+=1
print_range(1, 5) # Should print 1 2 3 4 5 (each number on its own line)
|
normal
|
{
"blob_id": "05454cc6c9961aa5e0de6979bb546342f5bd7b79",
"index": 3321,
"step-1": "# The following code causes an infinite loop. Can you figure out what’s missing and how to fix it?\n\n# def print_range(start, end):\n# \t# Loop through the numbers from start to end\n# \tn = start\n# \twhile n <= end:\n# \t\tprint(n)\n\n# print_range(1, 5) # Should print 1 2 3 4 5 (each number on its own line) \n\n# Solution\n# Variable n's value is not being incremented. We need to increment the value.\n# Here is the example\n\n\ndef print_range(start, end):\n\t# Loop through the numbers from start to end\n\tn = start\n \n\twhile n <= end:\n\t\tprint(n)\n n+=1 \n\nprint_range(1, 5) # Should print 1 2 3 4 5 (each number on its own line) ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#multi layer perceptron with back propogation
import numpy as np
import theano
import matplotlib.pyplot as plt
# In[2]:
inputs=[[0,0],
[1,0],
[0,1],
[1,1]]
outputs=[1,0,0,1]
# In[3]:
x=theano.tensor.matrix(name='x')
# In[4]:
#Hidden layer as inputs from every neuron are 2 and we have 3 neuron
w1val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse
w1=theano.shared(w1val,name='w1')
w2val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse
w2=theano.shared(w2val,name='w2')
w3val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse
w3=theano.shared(w3val,name='w3')
# In[5]:
#Bias value is 1
b1 = theano.shared(1.1,name='b1')
b2 = theano.shared(1.2,name='b2')
b3 = theano.shared(1.3,name='b3')
# In[6]:
#computation foe every neuron
#hidden layer
a1sum=theano.tensor.dot(x,w1)+b1
a2sum=theano.tensor.dot(x,w2)+b2
a1=1/(1+theano.tensor.exp(-1*a1sum))
a2=1/(1+theano.tensor.exp(-1*a2sum))
#output layer neuron
#stack is combining two hiding layer values & feeding to the output layer
x2 = theano.tensor.stack([a1,a2],axis=1)
# In[7]:
'''if we write
[[a11,a12,a21,a22],[a33,a34,a43,a44]]-> inputs
what stack will do is
[a11,a33],[a12,a34],[a21,a43],[a22,a44]'''
a3sum=theano.tensor.dot(x2,w3)+b3
a3=1/(1+theano.tensor.exp(-1*a3sum))
#final output
ahat=a3
#actual output
a=theano.tensor.vector(name='a')
# In[8]:
#cost function
cost=-(a*theano.tensor.log(ahat)+(1-a)*theano.tensor.log(1-ahat)).sum()#it is defined for 1/1+eraise to -z
#GDA role
#for calculating gradient
dcostdw1 = theano.tensor.grad(cost,w1)
dcostdw2 = theano.tensor.grad(cost,w2)
dcostdw3 = theano.tensor.grad(cost,w3)
dcostdb1=theano.tensor.grad(cost,b1)
dcostdb2=theano.tensor.grad(cost,b2)
dcostdb3=theano.tensor.grad(cost,b3)
#apply GDA to update the weights
wn1=w1-0.02*dcostdw1
wn2=w2-0.02*dcostdw2
wn3=w3-0.02*dcostdw3
wb1=b1-0.02*dcostdb1
wb2=b2-0.02*dcostdb2
wb3=b3-0.02*dcostdb3
#theano function for training the algorithm
train=theano.function([x,a],[ahat,cost],updates=[(w1,wn1),(w2,wn2),(w3,wn3),(b1,wb1),(b2,wb2),(b3,wb3)])
cost1=[]
val1=[]
#training a model
for i in range(25000):
pval,costval=train(inputs,outputs)
print(costval)
val1.append(pval)
cost1.append(costval)
# In[9]:
print('the final outputs are:')
for i in range(len(inputs)):
print("the output of x1=%d | x2=%d is %.2f"%(inputs[i][0],inputs[i][1],pval[i]))
plt.plot(cost1,color='red')
plt.show()
# In[ ]:
# In[ ]:
|
normal
|
{
"blob_id": "adec7efceb038c0ecb23c256c23c2ea212752d64",
"index": 4010,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(25000):\n pval, costval = train(inputs, outputs)\n print(costval)\n val1.append(pval)\n cost1.append(costval)\nprint('the final outputs are:')\nfor i in range(len(inputs)):\n print('the output of x1=%d | x2=%d is %.2f' % (inputs[i][0], inputs[i][\n 1], pval[i]))\nplt.plot(cost1, color='red')\nplt.show()\n",
"step-3": "<mask token>\ninputs = [[0, 0], [1, 0], [0, 1], [1, 1]]\noutputs = [1, 0, 0, 1]\nx = theano.tensor.matrix(name='x')\nw1val = np.asarray([np.random.randn(), np.random.randn()])\nw1 = theano.shared(w1val, name='w1')\nw2val = np.asarray([np.random.randn(), np.random.randn()])\nw2 = theano.shared(w2val, name='w2')\nw3val = np.asarray([np.random.randn(), np.random.randn()])\nw3 = theano.shared(w3val, name='w3')\nb1 = theano.shared(1.1, name='b1')\nb2 = theano.shared(1.2, name='b2')\nb3 = theano.shared(1.3, name='b3')\na1sum = theano.tensor.dot(x, w1) + b1\na2sum = theano.tensor.dot(x, w2) + b2\na1 = 1 / (1 + theano.tensor.exp(-1 * a1sum))\na2 = 1 / (1 + theano.tensor.exp(-1 * a2sum))\nx2 = theano.tensor.stack([a1, a2], axis=1)\n<mask token>\na3sum = theano.tensor.dot(x2, w3) + b3\na3 = 1 / (1 + theano.tensor.exp(-1 * a3sum))\nahat = a3\na = theano.tensor.vector(name='a')\ncost = -(a * theano.tensor.log(ahat) + (1 - a) * theano.tensor.log(1 - ahat)\n ).sum()\ndcostdw1 = theano.tensor.grad(cost, w1)\ndcostdw2 = theano.tensor.grad(cost, w2)\ndcostdw3 = theano.tensor.grad(cost, w3)\ndcostdb1 = theano.tensor.grad(cost, b1)\ndcostdb2 = theano.tensor.grad(cost, b2)\ndcostdb3 = theano.tensor.grad(cost, b3)\nwn1 = w1 - 0.02 * dcostdw1\nwn2 = w2 - 0.02 * dcostdw2\nwn3 = w3 - 0.02 * dcostdw3\nwb1 = b1 - 0.02 * dcostdb1\nwb2 = b2 - 0.02 * dcostdb2\nwb3 = b3 - 0.02 * dcostdb3\ntrain = theano.function([x, a], [ahat, cost], updates=[(w1, wn1), (w2, wn2),\n (w3, wn3), (b1, wb1), (b2, wb2), (b3, wb3)])\ncost1 = []\nval1 = []\nfor i in range(25000):\n pval, costval = train(inputs, outputs)\n print(costval)\n val1.append(pval)\n cost1.append(costval)\nprint('the final outputs are:')\nfor i in range(len(inputs)):\n print('the output of x1=%d | x2=%d is %.2f' % (inputs[i][0], inputs[i][\n 1], pval[i]))\nplt.plot(cost1, color='red')\nplt.show()\n",
"step-4": "import numpy as np\nimport theano\nimport matplotlib.pyplot as plt\ninputs = [[0, 0], [1, 0], [0, 1], [1, 1]]\noutputs = [1, 0, 0, 1]\nx = theano.tensor.matrix(name='x')\nw1val = np.asarray([np.random.randn(), np.random.randn()])\nw1 = theano.shared(w1val, name='w1')\nw2val = np.asarray([np.random.randn(), np.random.randn()])\nw2 = theano.shared(w2val, name='w2')\nw3val = np.asarray([np.random.randn(), np.random.randn()])\nw3 = theano.shared(w3val, name='w3')\nb1 = theano.shared(1.1, name='b1')\nb2 = theano.shared(1.2, name='b2')\nb3 = theano.shared(1.3, name='b3')\na1sum = theano.tensor.dot(x, w1) + b1\na2sum = theano.tensor.dot(x, w2) + b2\na1 = 1 / (1 + theano.tensor.exp(-1 * a1sum))\na2 = 1 / (1 + theano.tensor.exp(-1 * a2sum))\nx2 = theano.tensor.stack([a1, a2], axis=1)\n<mask token>\na3sum = theano.tensor.dot(x2, w3) + b3\na3 = 1 / (1 + theano.tensor.exp(-1 * a3sum))\nahat = a3\na = theano.tensor.vector(name='a')\ncost = -(a * theano.tensor.log(ahat) + (1 - a) * theano.tensor.log(1 - ahat)\n ).sum()\ndcostdw1 = theano.tensor.grad(cost, w1)\ndcostdw2 = theano.tensor.grad(cost, w2)\ndcostdw3 = theano.tensor.grad(cost, w3)\ndcostdb1 = theano.tensor.grad(cost, b1)\ndcostdb2 = theano.tensor.grad(cost, b2)\ndcostdb3 = theano.tensor.grad(cost, b3)\nwn1 = w1 - 0.02 * dcostdw1\nwn2 = w2 - 0.02 * dcostdw2\nwn3 = w3 - 0.02 * dcostdw3\nwb1 = b1 - 0.02 * dcostdb1\nwb2 = b2 - 0.02 * dcostdb2\nwb3 = b3 - 0.02 * dcostdb3\ntrain = theano.function([x, a], [ahat, cost], updates=[(w1, wn1), (w2, wn2),\n (w3, wn3), (b1, wb1), (b2, wb2), (b3, wb3)])\ncost1 = []\nval1 = []\nfor i in range(25000):\n pval, costval = train(inputs, outputs)\n print(costval)\n val1.append(pval)\n cost1.append(costval)\nprint('the final outputs are:')\nfor i in range(len(inputs)):\n print('the output of x1=%d | x2=%d is %.2f' % (inputs[i][0], inputs[i][\n 1], pval[i]))\nplt.plot(cost1, color='red')\nplt.show()\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#multi layer perceptron with back propogation\nimport numpy as np\nimport theano\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\ninputs=[[0,0],\n [1,0],\n [0,1],\n [1,1]]\noutputs=[1,0,0,1]\n\n\n# In[3]:\n\n\nx=theano.tensor.matrix(name='x')\n\n\n# In[4]:\n\n\n#Hidden layer as inputs from every neuron are 2 and we have 3 neuron\nw1val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse\nw1=theano.shared(w1val,name='w1')\nw2val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse\nw2=theano.shared(w2val,name='w2')\nw3val=np.asarray([np.random.randn(),np.random.randn()])#weight of synapse\nw3=theano.shared(w3val,name='w3')\n\n\n# In[5]:\n\n\n#Bias value is 1\nb1 = theano.shared(1.1,name='b1')\nb2 = theano.shared(1.2,name='b2')\nb3 = theano.shared(1.3,name='b3')\n\n\n# In[6]:\n\n\n#computation foe every neuron\n#hidden layer\na1sum=theano.tensor.dot(x,w1)+b1\na2sum=theano.tensor.dot(x,w2)+b2\n\na1=1/(1+theano.tensor.exp(-1*a1sum))\na2=1/(1+theano.tensor.exp(-1*a2sum))\n\n#output layer neuron\n#stack is combining two hiding layer values & feeding to the output layer\nx2 = theano.tensor.stack([a1,a2],axis=1)\n\n\n# In[7]:\n\n\n'''if we write\n[[a11,a12,a21,a22],[a33,a34,a43,a44]]-> inputs\nwhat stack will do is\n[a11,a33],[a12,a34],[a21,a43],[a22,a44]'''\n\na3sum=theano.tensor.dot(x2,w3)+b3\na3=1/(1+theano.tensor.exp(-1*a3sum))\n\n#final output\nahat=a3\n\n#actual output\na=theano.tensor.vector(name='a')\n\n\n# In[8]:\n\n\n#cost function\ncost=-(a*theano.tensor.log(ahat)+(1-a)*theano.tensor.log(1-ahat)).sum()#it is defined for 1/1+eraise to -z\n#GDA role\n#for calculating gradient\n\ndcostdw1 = theano.tensor.grad(cost,w1)\ndcostdw2 = theano.tensor.grad(cost,w2)\ndcostdw3 = theano.tensor.grad(cost,w3)\n\ndcostdb1=theano.tensor.grad(cost,b1)\ndcostdb2=theano.tensor.grad(cost,b2)\ndcostdb3=theano.tensor.grad(cost,b3)\n\n#apply GDA to update the weights\nwn1=w1-0.02*dcostdw1\nwn2=w2-0.02*dcostdw2\nwn3=w3-0.02*dcostdw3\n\nwb1=b1-0.02*dcostdb1\nwb2=b2-0.02*dcostdb2\nwb3=b3-0.02*dcostdb3\n#theano function for training the algorithm\ntrain=theano.function([x,a],[ahat,cost],updates=[(w1,wn1),(w2,wn2),(w3,wn3),(b1,wb1),(b2,wb2),(b3,wb3)])\n\ncost1=[]\nval1=[]\n\n#training a model\nfor i in range(25000):\n pval,costval=train(inputs,outputs)\n print(costval)\n val1.append(pval)\n cost1.append(costval)\n\n\n# In[9]:\n\n\nprint('the final outputs are:')\nfor i in range(len(inputs)):\n print(\"the output of x1=%d | x2=%d is %.2f\"%(inputs[i][0],inputs[i][1],pval[i]))\nplt.plot(cost1,color='red')\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask_restful import Resource, reqparse
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.RegexpTokenizer(r"\w+")
# CLASS DESCRIPTION:
# Devides and clears the sentence of punctuation marks and builds a dependency tree on each sentence
# Allocates its own names and verbs
# added: Temuri Kitoshvili
class Chunk_CleanSentences(Resource):
parser = reqparse.RequestParser()
parser.add_argument('text',
type=str,
required=True,
help="გთხოვთ შეიყვანოთ სწორი წინადადება")
def get(self):
data = Chunk_CleanSentences.parser.parse_args()
text = data['text']
sentences = sent_tokenize(text)
clean_sentences = []
for sent in sentences:
clear_sentence = tokenizer.tokenize(sent)
clean_sentences.append(clear_sentence)
for word in clean_sentences:
tagged_sent = nltk.pos_tag(word)
chunkGram = r"""Chunk: {<VB.?>*<NNP>?} """
chuckParser = nltk.RegexpParser(chunkGram)
chunked = chuckParser.parse(tagged_sent)
chunked.draw()
return {"clean_sentences": clean_sentences}
|
normal
|
{
"blob_id": "6d042a2035eab579193452e4dc44c425125d9515",
"index": 9402,
"step-1": "<mask token>\n\n\nclass Chunk_CleanSentences(Resource):\n <mask token>\n parser.add_argument('text', type=str, required=True, help=\n 'გთხოვთ შეიყვანოთ სწორი წინადადება')\n\n def get(self):\n data = Chunk_CleanSentences.parser.parse_args()\n text = data['text']\n sentences = sent_tokenize(text)\n clean_sentences = []\n for sent in sentences:\n clear_sentence = tokenizer.tokenize(sent)\n clean_sentences.append(clear_sentence)\n for word in clean_sentences:\n tagged_sent = nltk.pos_tag(word)\n chunkGram = 'Chunk: {<VB.?>*<NNP>?} '\n chuckParser = nltk.RegexpParser(chunkGram)\n chunked = chuckParser.parse(tagged_sent)\n chunked.draw()\n return {'clean_sentences': clean_sentences}\n",
"step-2": "<mask token>\n\n\nclass Chunk_CleanSentences(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('text', type=str, required=True, help=\n 'გთხოვთ შეიყვანოთ სწორი წინადადება')\n\n def get(self):\n data = Chunk_CleanSentences.parser.parse_args()\n text = data['text']\n sentences = sent_tokenize(text)\n clean_sentences = []\n for sent in sentences:\n clear_sentence = tokenizer.tokenize(sent)\n clean_sentences.append(clear_sentence)\n for word in clean_sentences:\n tagged_sent = nltk.pos_tag(word)\n chunkGram = 'Chunk: {<VB.?>*<NNP>?} '\n chuckParser = nltk.RegexpParser(chunkGram)\n chunked = chuckParser.parse(tagged_sent)\n chunked.draw()\n return {'clean_sentences': clean_sentences}\n",
"step-3": "<mask token>\ntokenizer = nltk.RegexpTokenizer('\\\\w+')\n\n\nclass Chunk_CleanSentences(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('text', type=str, required=True, help=\n 'გთხოვთ შეიყვანოთ სწორი წინადადება')\n\n def get(self):\n data = Chunk_CleanSentences.parser.parse_args()\n text = data['text']\n sentences = sent_tokenize(text)\n clean_sentences = []\n for sent in sentences:\n clear_sentence = tokenizer.tokenize(sent)\n clean_sentences.append(clear_sentence)\n for word in clean_sentences:\n tagged_sent = nltk.pos_tag(word)\n chunkGram = 'Chunk: {<VB.?>*<NNP>?} '\n chuckParser = nltk.RegexpParser(chunkGram)\n chunked = chuckParser.parse(tagged_sent)\n chunked.draw()\n return {'clean_sentences': clean_sentences}\n",
"step-4": "from flask_restful import Resource, reqparse\nimport nltk\nfrom nltk.tokenize import sent_tokenize\ntokenizer = nltk.RegexpTokenizer('\\\\w+')\n\n\nclass Chunk_CleanSentences(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('text', type=str, required=True, help=\n 'გთხოვთ შეიყვანოთ სწორი წინადადება')\n\n def get(self):\n data = Chunk_CleanSentences.parser.parse_args()\n text = data['text']\n sentences = sent_tokenize(text)\n clean_sentences = []\n for sent in sentences:\n clear_sentence = tokenizer.tokenize(sent)\n clean_sentences.append(clear_sentence)\n for word in clean_sentences:\n tagged_sent = nltk.pos_tag(word)\n chunkGram = 'Chunk: {<VB.?>*<NNP>?} '\n chuckParser = nltk.RegexpParser(chunkGram)\n chunked = chuckParser.parse(tagged_sent)\n chunked.draw()\n return {'clean_sentences': clean_sentences}\n",
"step-5": "from flask_restful import Resource, reqparse\nimport nltk\nfrom nltk.tokenize import sent_tokenize\ntokenizer = nltk.RegexpTokenizer(r\"\\w+\")\n\n# CLASS DESCRIPTION:\n # Devides and clears the sentence of punctuation marks and builds a dependency tree on each sentence\n # Allocates its own names and verbs\n # added: Temuri Kitoshvili\n\nclass Chunk_CleanSentences(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('text',\n type=str,\n required=True,\n help=\"გთხოვთ შეიყვანოთ სწორი წინადადება\")\n\n def get(self):\n data = Chunk_CleanSentences.parser.parse_args()\n text = data['text']\n\n sentences = sent_tokenize(text)\n clean_sentences = []\n\n for sent in sentences:\n clear_sentence = tokenizer.tokenize(sent)\n clean_sentences.append(clear_sentence)\n\n for word in clean_sentences:\n tagged_sent = nltk.pos_tag(word)\n chunkGram = r\"\"\"Chunk: {<VB.?>*<NNP>?} \"\"\"\n chuckParser = nltk.RegexpParser(chunkGram)\n chunked = chuckParser.parse(tagged_sent)\n\n chunked.draw()\n\n return {\"clean_sentences\": clean_sentences}\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
"""Transcoder with TOSHIBA RECAIUS API."""
import threading
import queue
import time
import numpy as np
from logzero import logger
import requests
import model.key
AUTH_URL = 'https://api.recaius.jp/auth/v2/tokens'
VOICE_URL = 'https://api.recaius.jp/asr/v2/voices'
class Transcoder:
"""Transcoder Class."""
def __init__(self):
"""Constructor."""
logger.info('__init__:Enter')
self._token = None
self.transcript = None
self._queue = queue.Queue()
def start(self, token):
"""Start recognition."""
logger.info('start:Enter')
self._token = token
threading.Thread(target=self._process).start()
def write_stream(self, buf):
"""Write audio stream."""
self._queue.put(buf)
def _process(self):
logger.info('_process:Enter')
token = self._authenticate()['token']
uuid = self._start_recognition(token)['uuid']
logger.info('start transcode')
i = 1
while True:
arr = self._stream_generator()
if(arr is None):
break
# logger.debug(f'{len(arr)} , {self._queue.qsize()}')
inline = np.hstack(arr)
arr_bytes = inline.tobytes('C')
header = {
'Content-Type': 'multipart/form-data',
'X-Token': token
}
files = {
'voice_id': ('', i, ''),
'voice': ('', arr_bytes, 'application/octet-stream')
}
resp = requests.put(
f'{VOICE_URL}/{uuid}', headers=header, files=files)
if(resp.status_code == 200):
logger.debug(resp.json())
result = resp.json()[0]
if(result[0] == 'TMP_RESULT' or result[0] == 'RESULT'):
self._write_result(result[1])
i = i + 1
self._flush_recognition(uuid, token, i)
while True:
if(self._get_result(uuid, token) is None):
break
time.sleep(0.1)
self._end_recognition(uuid, token)
logger.info('end transcode')
def _authenticate(self):
speechrecog_jajp_id = model.key.RECAIUS_ID
speechrecog_jajp_password = model.key.RECAIUS_PASSWORD
param = {
"speech_recog_jaJP": {
'service_id': speechrecog_jajp_id,
'password': speechrecog_jajp_password
}
}
return requests.post(AUTH_URL, json=param).json()
def _flush_recognition(self, uuid, token, i):
header = {
'Content-Type': 'application/json',
'X-Token': token
}
param = {
'voice_id': i,
}
resp = requests.put(
f'{VOICE_URL}/{uuid}/flush', headers=header, json=param)
if(resp.status_code == 200):
logger.debug(f'frush result:{resp.json()}')
return resp.json()
else:
logger.debug(f'flush result(status:{resp.status_code})')
def _get_result(self, uuid, token):
header = {
'X-Token': token
}
resp = requests.get(f'{VOICE_URL}/{uuid}/results', headers=header)
if(resp.status_code == 200):
logger.debug(f'get result:{resp.json()}')
return resp.json()
else:
logger.debug(f'get result(status:{resp.status_code})')
def _stream_generator(self):
arr = []
while True:
try:
v = self._queue.get_nowait()
# print(v)
if v is None:
return None
arr.append((v * 32767).astype(np.int16))
except queue.Empty:
if(len(arr) != 0):
break
else:
time.sleep(0.1)
return arr
def _start_recognition(self, token):
header = {
'Content-Type': 'application/json',
'X-Token': token
}
param = {
'model_id': 1
}
return requests.post(VOICE_URL, headers=header, json=param).json()
def _end_recognition(self, uuid, token):
header = {
'X-Token': token
}
resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)
if(resp.status_code == 204):
logger.debug(f'delete result(status:{resp.status_code})')
def _write_result(self, transcipt):
self.transcript = transcipt
|
normal
|
{
"blob_id": "421b0c1871350ff541b4e56d1e18d77016884552",
"index": 5199,
"step-1": "<mask token>\n\n\nclass Transcoder:\n <mask token>\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n logger.info('__init__:Enter')\n self._token = None\n self.transcript = None\n self._queue = queue.Queue()\n\n def start(self, token):\n \"\"\"Start recognition.\"\"\"\n logger.info('start:Enter')\n self._token = token\n threading.Thread(target=self._process).start()\n <mask token>\n\n def _process(self):\n logger.info('_process:Enter')\n token = self._authenticate()['token']\n uuid = self._start_recognition(token)['uuid']\n logger.info('start transcode')\n i = 1\n while True:\n arr = self._stream_generator()\n if arr is None:\n break\n inline = np.hstack(arr)\n arr_bytes = inline.tobytes('C')\n header = {'Content-Type': 'multipart/form-data', 'X-Token': token}\n files = {'voice_id': ('', i, ''), 'voice': ('', arr_bytes,\n 'application/octet-stream')}\n resp = requests.put(f'{VOICE_URL}/{uuid}', headers=header,\n files=files)\n if resp.status_code == 200:\n logger.debug(resp.json())\n result = resp.json()[0]\n if result[0] == 'TMP_RESULT' or result[0] == 'RESULT':\n self._write_result(result[1])\n i = i + 1\n self._flush_recognition(uuid, token, i)\n while True:\n if self._get_result(uuid, token) is None:\n break\n time.sleep(0.1)\n self._end_recognition(uuid, token)\n logger.info('end transcode')\n\n def _authenticate(self):\n speechrecog_jajp_id = model.key.RECAIUS_ID\n speechrecog_jajp_password = model.key.RECAIUS_PASSWORD\n param = {'speech_recog_jaJP': {'service_id': speechrecog_jajp_id,\n 'password': speechrecog_jajp_password}}\n return requests.post(AUTH_URL, json=param).json()\n\n def _flush_recognition(self, uuid, token, i):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'voice_id': i}\n resp = requests.put(f'{VOICE_URL}/{uuid}/flush', headers=header,\n json=param)\n if resp.status_code == 200:\n logger.debug(f'frush result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'flush result(status:{resp.status_code})')\n <mask token>\n <mask token>\n\n def _start_recognition(self, token):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'model_id': 1}\n return requests.post(VOICE_URL, headers=header, json=param).json()\n\n def _end_recognition(self, uuid, token):\n header = {'X-Token': token}\n resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)\n if resp.status_code == 204:\n logger.debug(f'delete result(status:{resp.status_code})')\n\n def _write_result(self, transcipt):\n self.transcript = transcipt\n",
"step-2": "<mask token>\n\n\nclass Transcoder:\n <mask token>\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n logger.info('__init__:Enter')\n self._token = None\n self.transcript = None\n self._queue = queue.Queue()\n\n def start(self, token):\n \"\"\"Start recognition.\"\"\"\n logger.info('start:Enter')\n self._token = token\n threading.Thread(target=self._process).start()\n\n def write_stream(self, buf):\n \"\"\"Write audio stream.\"\"\"\n self._queue.put(buf)\n\n def _process(self):\n logger.info('_process:Enter')\n token = self._authenticate()['token']\n uuid = self._start_recognition(token)['uuid']\n logger.info('start transcode')\n i = 1\n while True:\n arr = self._stream_generator()\n if arr is None:\n break\n inline = np.hstack(arr)\n arr_bytes = inline.tobytes('C')\n header = {'Content-Type': 'multipart/form-data', 'X-Token': token}\n files = {'voice_id': ('', i, ''), 'voice': ('', arr_bytes,\n 'application/octet-stream')}\n resp = requests.put(f'{VOICE_URL}/{uuid}', headers=header,\n files=files)\n if resp.status_code == 200:\n logger.debug(resp.json())\n result = resp.json()[0]\n if result[0] == 'TMP_RESULT' or result[0] == 'RESULT':\n self._write_result(result[1])\n i = i + 1\n self._flush_recognition(uuid, token, i)\n while True:\n if self._get_result(uuid, token) is None:\n break\n time.sleep(0.1)\n self._end_recognition(uuid, token)\n logger.info('end transcode')\n\n def _authenticate(self):\n speechrecog_jajp_id = model.key.RECAIUS_ID\n speechrecog_jajp_password = model.key.RECAIUS_PASSWORD\n param = {'speech_recog_jaJP': {'service_id': speechrecog_jajp_id,\n 'password': speechrecog_jajp_password}}\n return requests.post(AUTH_URL, json=param).json()\n\n def _flush_recognition(self, uuid, token, i):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'voice_id': i}\n resp = requests.put(f'{VOICE_URL}/{uuid}/flush', headers=header,\n json=param)\n if resp.status_code == 200:\n logger.debug(f'frush result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'flush result(status:{resp.status_code})')\n <mask token>\n <mask token>\n\n def _start_recognition(self, token):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'model_id': 1}\n return requests.post(VOICE_URL, headers=header, json=param).json()\n\n def _end_recognition(self, uuid, token):\n header = {'X-Token': token}\n resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)\n if resp.status_code == 204:\n logger.debug(f'delete result(status:{resp.status_code})')\n\n def _write_result(self, transcipt):\n self.transcript = transcipt\n",
"step-3": "<mask token>\n\n\nclass Transcoder:\n <mask token>\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n logger.info('__init__:Enter')\n self._token = None\n self.transcript = None\n self._queue = queue.Queue()\n\n def start(self, token):\n \"\"\"Start recognition.\"\"\"\n logger.info('start:Enter')\n self._token = token\n threading.Thread(target=self._process).start()\n\n def write_stream(self, buf):\n \"\"\"Write audio stream.\"\"\"\n self._queue.put(buf)\n\n def _process(self):\n logger.info('_process:Enter')\n token = self._authenticate()['token']\n uuid = self._start_recognition(token)['uuid']\n logger.info('start transcode')\n i = 1\n while True:\n arr = self._stream_generator()\n if arr is None:\n break\n inline = np.hstack(arr)\n arr_bytes = inline.tobytes('C')\n header = {'Content-Type': 'multipart/form-data', 'X-Token': token}\n files = {'voice_id': ('', i, ''), 'voice': ('', arr_bytes,\n 'application/octet-stream')}\n resp = requests.put(f'{VOICE_URL}/{uuid}', headers=header,\n files=files)\n if resp.status_code == 200:\n logger.debug(resp.json())\n result = resp.json()[0]\n if result[0] == 'TMP_RESULT' or result[0] == 'RESULT':\n self._write_result(result[1])\n i = i + 1\n self._flush_recognition(uuid, token, i)\n while True:\n if self._get_result(uuid, token) is None:\n break\n time.sleep(0.1)\n self._end_recognition(uuid, token)\n logger.info('end transcode')\n\n def _authenticate(self):\n speechrecog_jajp_id = model.key.RECAIUS_ID\n speechrecog_jajp_password = model.key.RECAIUS_PASSWORD\n param = {'speech_recog_jaJP': {'service_id': speechrecog_jajp_id,\n 'password': speechrecog_jajp_password}}\n return requests.post(AUTH_URL, json=param).json()\n\n def _flush_recognition(self, uuid, token, i):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'voice_id': i}\n resp = requests.put(f'{VOICE_URL}/{uuid}/flush', headers=header,\n json=param)\n if resp.status_code == 200:\n logger.debug(f'frush result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'flush result(status:{resp.status_code})')\n\n def _get_result(self, uuid, token):\n header = {'X-Token': token}\n resp = requests.get(f'{VOICE_URL}/{uuid}/results', headers=header)\n if resp.status_code == 200:\n logger.debug(f'get result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'get result(status:{resp.status_code})')\n\n def _stream_generator(self):\n arr = []\n while True:\n try:\n v = self._queue.get_nowait()\n if v is None:\n return None\n arr.append((v * 32767).astype(np.int16))\n except queue.Empty:\n if len(arr) != 0:\n break\n else:\n time.sleep(0.1)\n return arr\n\n def _start_recognition(self, token):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'model_id': 1}\n return requests.post(VOICE_URL, headers=header, json=param).json()\n\n def _end_recognition(self, uuid, token):\n header = {'X-Token': token}\n resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)\n if resp.status_code == 204:\n logger.debug(f'delete result(status:{resp.status_code})')\n\n def _write_result(self, transcipt):\n self.transcript = transcipt\n",
"step-4": "<mask token>\nAUTH_URL = 'https://api.recaius.jp/auth/v2/tokens'\nVOICE_URL = 'https://api.recaius.jp/asr/v2/voices'\n\n\nclass Transcoder:\n \"\"\"Transcoder Class.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n logger.info('__init__:Enter')\n self._token = None\n self.transcript = None\n self._queue = queue.Queue()\n\n def start(self, token):\n \"\"\"Start recognition.\"\"\"\n logger.info('start:Enter')\n self._token = token\n threading.Thread(target=self._process).start()\n\n def write_stream(self, buf):\n \"\"\"Write audio stream.\"\"\"\n self._queue.put(buf)\n\n def _process(self):\n logger.info('_process:Enter')\n token = self._authenticate()['token']\n uuid = self._start_recognition(token)['uuid']\n logger.info('start transcode')\n i = 1\n while True:\n arr = self._stream_generator()\n if arr is None:\n break\n inline = np.hstack(arr)\n arr_bytes = inline.tobytes('C')\n header = {'Content-Type': 'multipart/form-data', 'X-Token': token}\n files = {'voice_id': ('', i, ''), 'voice': ('', arr_bytes,\n 'application/octet-stream')}\n resp = requests.put(f'{VOICE_URL}/{uuid}', headers=header,\n files=files)\n if resp.status_code == 200:\n logger.debug(resp.json())\n result = resp.json()[0]\n if result[0] == 'TMP_RESULT' or result[0] == 'RESULT':\n self._write_result(result[1])\n i = i + 1\n self._flush_recognition(uuid, token, i)\n while True:\n if self._get_result(uuid, token) is None:\n break\n time.sleep(0.1)\n self._end_recognition(uuid, token)\n logger.info('end transcode')\n\n def _authenticate(self):\n speechrecog_jajp_id = model.key.RECAIUS_ID\n speechrecog_jajp_password = model.key.RECAIUS_PASSWORD\n param = {'speech_recog_jaJP': {'service_id': speechrecog_jajp_id,\n 'password': speechrecog_jajp_password}}\n return requests.post(AUTH_URL, json=param).json()\n\n def _flush_recognition(self, uuid, token, i):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'voice_id': i}\n resp = requests.put(f'{VOICE_URL}/{uuid}/flush', headers=header,\n json=param)\n if resp.status_code == 200:\n logger.debug(f'frush result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'flush result(status:{resp.status_code})')\n\n def _get_result(self, uuid, token):\n header = {'X-Token': token}\n resp = requests.get(f'{VOICE_URL}/{uuid}/results', headers=header)\n if resp.status_code == 200:\n logger.debug(f'get result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'get result(status:{resp.status_code})')\n\n def _stream_generator(self):\n arr = []\n while True:\n try:\n v = self._queue.get_nowait()\n if v is None:\n return None\n arr.append((v * 32767).astype(np.int16))\n except queue.Empty:\n if len(arr) != 0:\n break\n else:\n time.sleep(0.1)\n return arr\n\n def _start_recognition(self, token):\n header = {'Content-Type': 'application/json', 'X-Token': token}\n param = {'model_id': 1}\n return requests.post(VOICE_URL, headers=header, json=param).json()\n\n def _end_recognition(self, uuid, token):\n header = {'X-Token': token}\n resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)\n if resp.status_code == 204:\n logger.debug(f'delete result(status:{resp.status_code})')\n\n def _write_result(self, transcipt):\n self.transcript = transcipt\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Transcoder with TOSHIBA RECAIUS API.\"\"\"\nimport threading\nimport queue\nimport time\n\nimport numpy as np\nfrom logzero import logger\nimport requests\n\nimport model.key\n\nAUTH_URL = 'https://api.recaius.jp/auth/v2/tokens'\nVOICE_URL = 'https://api.recaius.jp/asr/v2/voices'\n\n\nclass Transcoder:\n \"\"\"Transcoder Class.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n logger.info('__init__:Enter')\n self._token = None\n self.transcript = None\n self._queue = queue.Queue()\n\n def start(self, token):\n \"\"\"Start recognition.\"\"\"\n logger.info('start:Enter')\n self._token = token\n threading.Thread(target=self._process).start()\n\n def write_stream(self, buf):\n \"\"\"Write audio stream.\"\"\"\n self._queue.put(buf)\n\n def _process(self):\n logger.info('_process:Enter')\n token = self._authenticate()['token']\n uuid = self._start_recognition(token)['uuid']\n logger.info('start transcode')\n i = 1\n while True:\n arr = self._stream_generator()\n if(arr is None):\n break\n # logger.debug(f'{len(arr)} , {self._queue.qsize()}')\n inline = np.hstack(arr)\n arr_bytes = inline.tobytes('C')\n header = {\n 'Content-Type': 'multipart/form-data',\n 'X-Token': token\n }\n files = {\n 'voice_id': ('', i, ''),\n 'voice': ('', arr_bytes, 'application/octet-stream')\n }\n resp = requests.put(\n f'{VOICE_URL}/{uuid}', headers=header, files=files)\n if(resp.status_code == 200):\n logger.debug(resp.json())\n result = resp.json()[0]\n if(result[0] == 'TMP_RESULT' or result[0] == 'RESULT'):\n self._write_result(result[1])\n i = i + 1\n self._flush_recognition(uuid, token, i)\n while True:\n if(self._get_result(uuid, token) is None):\n break\n time.sleep(0.1)\n self._end_recognition(uuid, token)\n logger.info('end transcode')\n\n def _authenticate(self):\n speechrecog_jajp_id = model.key.RECAIUS_ID\n speechrecog_jajp_password = model.key.RECAIUS_PASSWORD\n param = {\n \"speech_recog_jaJP\": {\n 'service_id': speechrecog_jajp_id,\n 'password': speechrecog_jajp_password\n }\n }\n return requests.post(AUTH_URL, json=param).json()\n\n def _flush_recognition(self, uuid, token, i):\n header = {\n 'Content-Type': 'application/json',\n 'X-Token': token\n }\n param = {\n 'voice_id': i,\n }\n resp = requests.put(\n f'{VOICE_URL}/{uuid}/flush', headers=header, json=param)\n if(resp.status_code == 200):\n logger.debug(f'frush result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'flush result(status:{resp.status_code})')\n\n def _get_result(self, uuid, token):\n header = {\n 'X-Token': token\n }\n resp = requests.get(f'{VOICE_URL}/{uuid}/results', headers=header)\n if(resp.status_code == 200):\n logger.debug(f'get result:{resp.json()}')\n return resp.json()\n else:\n logger.debug(f'get result(status:{resp.status_code})')\n\n def _stream_generator(self):\n arr = []\n while True:\n try:\n v = self._queue.get_nowait()\n # print(v)\n if v is None:\n return None\n arr.append((v * 32767).astype(np.int16))\n except queue.Empty:\n if(len(arr) != 0):\n break\n else:\n time.sleep(0.1)\n return arr\n\n def _start_recognition(self, token):\n header = {\n 'Content-Type': 'application/json',\n 'X-Token': token\n }\n param = {\n 'model_id': 1\n }\n return requests.post(VOICE_URL, headers=header, json=param).json()\n\n def _end_recognition(self, uuid, token):\n header = {\n 'X-Token': token\n }\n resp = requests.delete(f'{VOICE_URL}/{uuid}', headers=header)\n if(resp.status_code == 204):\n logger.debug(f'delete result(status:{resp.status_code})')\n\n def _write_result(self, transcipt):\n self.transcript = transcipt\n",
"step-ids": [
9,
10,
12,
14,
16
]
}
|
[
9,
10,
12,
14,
16
] |
def print_duplicates(arr):
uniques = set()
for elem in arr:
if elem in uniques:
print(elem, end=' ')
else:
uniques.add(elem)
|
normal
|
{
"blob_id": "420c3944de0a5436a9824604fd6caf27706eb99c",
"index": 4102,
"step-1": "<mask token>\n",
"step-2": "def print_duplicates(arr):\n uniques = set()\n for elem in arr:\n if elem in uniques:\n print(elem, end=' ')\n else:\n uniques.add(elem)\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# module: order functionality
# HW2: complete this func
def process_option(food, option):
# print(food.keys())
food_name = list(food.keys())[option-1]
food_price = food[food_name]
print(food_price)
print("You have chosen: ", option, food_name, "!", " For unit price: ", food_price)
# HW2: ask quantity
# if ENTER = cancel
# if ent numb = calc total (func separate func)
# print total
# ask confirmation (y/n)
# ask for costumer name
# save the order data in data/<name>order.txt
q = int(input("How many? "))
total = q * food_price
print(food_name, "x", q, "=", total)
# file = open("copy.txt", "w")
# file.write("Your text goes here")
# file.close()
client_name = input("Your name pls: ")
# file = open("data/" + client_name + ".txt", "w")
# file.write(food_name + "|" + str(q) + str(food_price) + "|" + str(total))
# file.close()
with open("data/" + client_name + ".txt", "w") as file:
file.write(food_name + "|" + str(q) + "|" + str(food_price) + "|" + str(total))
def confirmation():
c = input("Press y/n for confirmation: ")
if c == "y":
print("Reservation confirmed!")
elif c == "n":
print("Reservation decline!")
elif c == "":
print("Cancel reservation")
else:
print("CK next time...")
def show_order_info():
client_name = input("Your name in data: ")
file = open("data/" + client_name + ".txt", "r")
data = file.read()
file.close()
print(data)
|
normal
|
{
"blob_id": "07bd3c7cacbf8d0e39d06b21456258ad92cb2294",
"index": 676,
"step-1": "<mask token>\n",
"step-2": "def process_option(food, option):\n food_name = list(food.keys())[option - 1]\n food_price = food[food_name]\n print(food_price)\n print('You have chosen: ', option, food_name, '!', ' For unit price: ',\n food_price)\n q = int(input('How many? '))\n total = q * food_price\n print(food_name, 'x', q, '=', total)\n client_name = input('Your name pls: ')\n with open('data/' + client_name + '.txt', 'w') as file:\n file.write(food_name + '|' + str(q) + '|' + str(food_price) + '|' +\n str(total))\n\n\n<mask token>\n",
"step-3": "def process_option(food, option):\n food_name = list(food.keys())[option - 1]\n food_price = food[food_name]\n print(food_price)\n print('You have chosen: ', option, food_name, '!', ' For unit price: ',\n food_price)\n q = int(input('How many? '))\n total = q * food_price\n print(food_name, 'x', q, '=', total)\n client_name = input('Your name pls: ')\n with open('data/' + client_name + '.txt', 'w') as file:\n file.write(food_name + '|' + str(q) + '|' + str(food_price) + '|' +\n str(total))\n\n\ndef confirmation():\n c = input('Press y/n for confirmation: ')\n if c == 'y':\n print('Reservation confirmed!')\n elif c == 'n':\n print('Reservation decline!')\n elif c == '':\n print('Cancel reservation')\n else:\n print('CK next time...')\n\n\n<mask token>\n",
"step-4": "def process_option(food, option):\n food_name = list(food.keys())[option - 1]\n food_price = food[food_name]\n print(food_price)\n print('You have chosen: ', option, food_name, '!', ' For unit price: ',\n food_price)\n q = int(input('How many? '))\n total = q * food_price\n print(food_name, 'x', q, '=', total)\n client_name = input('Your name pls: ')\n with open('data/' + client_name + '.txt', 'w') as file:\n file.write(food_name + '|' + str(q) + '|' + str(food_price) + '|' +\n str(total))\n\n\ndef confirmation():\n c = input('Press y/n for confirmation: ')\n if c == 'y':\n print('Reservation confirmed!')\n elif c == 'n':\n print('Reservation decline!')\n elif c == '':\n print('Cancel reservation')\n else:\n print('CK next time...')\n\n\ndef show_order_info():\n client_name = input('Your name in data: ')\n file = open('data/' + client_name + '.txt', 'r')\n data = file.read()\n file.close()\n print(data)\n",
"step-5": "\r\n# module: order functionality\r\n\r\n\r\n# HW2: complete this func\r\n\r\ndef process_option(food, option):\r\n # print(food.keys())\r\n food_name = list(food.keys())[option-1]\r\n food_price = food[food_name]\r\n\r\n print(food_price)\r\n print(\"You have chosen: \", option, food_name, \"!\", \" For unit price: \", food_price)\r\n\r\n # HW2: ask quantity\r\n # if ENTER = cancel\r\n\r\n # if ent numb = calc total (func separate func)\r\n # print total\r\n # ask confirmation (y/n)\r\n # ask for costumer name\r\n # save the order data in data/<name>order.txt\r\n\r\n q = int(input(\"How many? \"))\r\n total = q * food_price\r\n print(food_name, \"x\", q, \"=\", total)\r\n\r\n\r\n # file = open(\"copy.txt\", \"w\")\r\n # file.write(\"Your text goes here\")\r\n # file.close()\r\n\r\n client_name = input(\"Your name pls: \")\r\n # file = open(\"data/\" + client_name + \".txt\", \"w\")\r\n # file.write(food_name + \"|\" + str(q) + str(food_price) + \"|\" + str(total))\r\n # file.close()\r\n\r\n with open(\"data/\" + client_name + \".txt\", \"w\") as file:\r\n file.write(food_name + \"|\" + str(q) + \"|\" + str(food_price) + \"|\" + str(total))\r\n\r\n\r\n\r\ndef confirmation():\r\n c = input(\"Press y/n for confirmation: \")\r\n if c == \"y\":\r\n print(\"Reservation confirmed!\")\r\n elif c == \"n\":\r\n print(\"Reservation decline!\")\r\n elif c == \"\":\r\n print(\"Cancel reservation\")\r\n else:\r\n print(\"CK next time...\")\r\n\r\n\r\ndef show_order_info():\r\n client_name = input(\"Your name in data: \")\r\n file = open(\"data/\" + client_name + \".txt\", \"r\")\r\n data = file.read()\r\n file.close()\r\n print(data)\r\n\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
table.py [-m] base1 base2 ... baseN
Combines output from base1.txt, base2.txt, etc., which are created by
the TestDriver (such as timcv.py) output, and displays tabulated
comparison statistics to stdout. Each input file is represented by
one column in the table.
Optional argument -m shows a final column with the mean value of each
statistic.
"""
def suck(f):
hamdevall = spamdevall = (0.0, 0.0)
cost = 0.0
bestcost = 0.0
fp = 0
fn = 0
un = 0
fpp = 0.0
fnp = 0.0
unp = 0.0
htest = 0
stest = 0
get = f.readline
while 1:
line = get()
if line.startswith('-> <stat> tested'):
print(line, end=' ')
elif line.find(' items; mean ') > 0 and line.find('for all runs') > 0:
vals = line.split(';')
mean = float(vals[1].split()[-1])
sdev = float(vals[2].split()[-1])
val = (mean, sdev)
ntested = int(vals[0].split()[-2])
typ = vals[0].split()[2]
if line.find('for all runs') != -1:
if typ == 'Ham':
hamdevall = val
htest = ntested
else:
spamdevall = val
stest = ntested
elif line.startswith('-> best cost for all runs: $'):
bestcost = float(line.split('$')[-1])
elif line.startswith('-> <stat> all runs false positives: '):
fp = int(line.split()[-1])
elif line.startswith('-> <stat> all runs false negatives: '):
fn = int(line.split()[-1])
elif line.startswith('-> <stat> all runs unsure: '):
un = int(line.split()[-1])
elif line.startswith('-> <stat> all runs false positive %: '):
fpp = float(line.split()[-1])
elif line.startswith('-> <stat> all runs false negative %: '):
fnp = float(line.split()[-1])
elif line.startswith('-> <stat> all runs unsure %: '):
unp = float(line.split()[-1])
elif line.startswith('-> <stat> all runs cost: '):
cost = float(line.split('$')[-1])
break
return (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,
hamdevall, spamdevall)
def windowsfy(fn):
import os
if os.path.exists(fn + '.txt'):
return fn + '.txt'
else:
return fn
def table():
import getopt, sys
showMean = 0
fname = "filename: "
fnam2 = " "
ratio = "ham:spam: "
rat2 = " "
fptot = "fp total: "
fpper = "fp %: "
fntot = "fn total: "
fnper = "fn %: "
untot = "unsure t: "
unper = "unsure %: "
rcost = "real cost:"
bcost = "best cost:"
hmean = "h mean: "
hsdev = "h sdev: "
smean = "s mean: "
ssdev = "s sdev: "
meand = "mean diff:"
kval = "k: "
tfptot = tfpper = tfntot = tfnper = tuntot = tunper = trcost = tbcost = \
thmean = thsdev = tsmean = tssdev = tmeand = tkval = 0
args, fileargs = getopt.getopt(sys.argv[1:], 'm')
for arg, val in args:
if arg == "-m":
showMean = 1
for filename in fileargs:
filename = windowsfy(filename)
(htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,
hamdevall, spamdevall) = suck(file(filename))
if filename.endswith('.txt'):
filename = filename[:-4]
filename = filename[filename.rfind('/')+1:]
filename = filename[filename.rfind("\\")+1:]
if len(fname) > len(fnam2):
fname += " "
fname = fname[0:(len(fnam2) + 12)]
fnam2 += " %11s" % filename
else:
fnam2 += " "
fnam2 = fnam2[0:(len(fname) + 12)]
fname += " %11s" % filename
if len(ratio) > len(rat2):
ratio += " "
ratio = ratio[0:(len(rat2) + 12)]
rat2 += " %11s" % ("%d:%d" % (htest, stest))
else:
rat2 += " "
rat2 = rat2[0:(len(ratio) + 12)]
ratio += " %11s" % ("%d:%d" % (htest, stest))
fptot += "%12d" % fp
tfptot += fp
fpper += "%12.2f" % fpp
tfpper += fpp
fntot += "%12d" % fn
tfntot += fn
fnper += "%12.2f" % fnp
tfnper += fnp
untot += "%12d" % un
tuntot += un
unper += "%12.2f" % unp
tunper += unp
rcost += "%12s" % ("$%.2f" % cost)
trcost += cost
bcost += "%12s" % ("$%.2f" % bestcost)
tbcost += bestcost
hmean += "%12.2f" % hamdevall[0]
thmean += hamdevall[0]
hsdev += "%12.2f" % hamdevall[1]
thsdev += hamdevall[1]
smean += "%12.2f" % spamdevall[0]
tsmean += spamdevall[0]
ssdev += "%12.2f" % spamdevall[1]
tssdev += spamdevall[1]
meand += "%12.2f" % (spamdevall[0] - hamdevall[0])
tmeand += (spamdevall[0] - hamdevall[0])
k = (spamdevall[0] - hamdevall[0]) / (spamdevall[1] + hamdevall[1])
kval += "%12.2f" % k
tkval += k
nfiles = len(fileargs)
if nfiles and showMean:
fptot += "%12d" % (tfptot/nfiles)
fpper += "%12.2f" % (tfpper/nfiles)
fntot += "%12d" % (tfntot/nfiles)
fnper += "%12.2f" % (tfnper/nfiles)
untot += "%12d" % (tuntot/nfiles)
unper += "%12.2f" % (tunper/nfiles)
rcost += "%12s" % ("$%.2f" % (trcost/nfiles))
bcost += "%12s" % ("$%.2f" % (tbcost/nfiles))
hmean += "%12.2f" % (thmean/nfiles)
hsdev += "%12.2f" % (thsdev/nfiles)
smean += "%12.2f" % (tsmean/nfiles)
ssdev += "%12.2f" % (tssdev/nfiles)
meand += "%12.2f" % (tmeand/nfiles)
kval += "%12.2f" % (tkval/nfiles)
print(fname)
if len(fnam2.strip()) > 0:
print(fnam2)
print(ratio)
if len(rat2.strip()) > 0:
print(rat2)
print(fptot)
print(fpper)
print(fntot)
print(fnper)
print(untot)
print(unper)
print(rcost)
print(bcost)
print(hmean)
print(hsdev)
print(smean)
print(ssdev)
print(meand)
print(kval)
if __name__ == "__main__":
table()
|
normal
|
{
"blob_id": "4e94e9e2b45d3786aa86be800be882cc3d5a80b5",
"index": 8328,
"step-1": "<mask token>\n\n\ndef suck(f):\n hamdevall = spamdevall = 0.0, 0.0\n cost = 0.0\n bestcost = 0.0\n fp = 0\n fn = 0\n un = 0\n fpp = 0.0\n fnp = 0.0\n unp = 0.0\n htest = 0\n stest = 0\n get = f.readline\n while 1:\n line = get()\n if line.startswith('-> <stat> tested'):\n print(line, end=' ')\n elif line.find(' items; mean ') > 0 and line.find('for all runs') > 0:\n vals = line.split(';')\n mean = float(vals[1].split()[-1])\n sdev = float(vals[2].split()[-1])\n val = mean, sdev\n ntested = int(vals[0].split()[-2])\n typ = vals[0].split()[2]\n if line.find('for all runs') != -1:\n if typ == 'Ham':\n hamdevall = val\n htest = ntested\n else:\n spamdevall = val\n stest = ntested\n elif line.startswith('-> best cost for all runs: $'):\n bestcost = float(line.split('$')[-1])\n elif line.startswith('-> <stat> all runs false positives: '):\n fp = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negatives: '):\n fn = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure: '):\n un = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false positive %: '):\n fpp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negative %: '):\n fnp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure %: '):\n unp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs cost: '):\n cost = float(line.split('$')[-1])\n break\n return (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,\n hamdevall, spamdevall)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef suck(f):\n hamdevall = spamdevall = 0.0, 0.0\n cost = 0.0\n bestcost = 0.0\n fp = 0\n fn = 0\n un = 0\n fpp = 0.0\n fnp = 0.0\n unp = 0.0\n htest = 0\n stest = 0\n get = f.readline\n while 1:\n line = get()\n if line.startswith('-> <stat> tested'):\n print(line, end=' ')\n elif line.find(' items; mean ') > 0 and line.find('for all runs') > 0:\n vals = line.split(';')\n mean = float(vals[1].split()[-1])\n sdev = float(vals[2].split()[-1])\n val = mean, sdev\n ntested = int(vals[0].split()[-2])\n typ = vals[0].split()[2]\n if line.find('for all runs') != -1:\n if typ == 'Ham':\n hamdevall = val\n htest = ntested\n else:\n spamdevall = val\n stest = ntested\n elif line.startswith('-> best cost for all runs: $'):\n bestcost = float(line.split('$')[-1])\n elif line.startswith('-> <stat> all runs false positives: '):\n fp = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negatives: '):\n fn = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure: '):\n un = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false positive %: '):\n fpp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negative %: '):\n fnp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure %: '):\n unp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs cost: '):\n cost = float(line.split('$')[-1])\n break\n return (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,\n hamdevall, spamdevall)\n\n\ndef windowsfy(fn):\n import os\n if os.path.exists(fn + '.txt'):\n return fn + '.txt'\n else:\n return fn\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef suck(f):\n hamdevall = spamdevall = 0.0, 0.0\n cost = 0.0\n bestcost = 0.0\n fp = 0\n fn = 0\n un = 0\n fpp = 0.0\n fnp = 0.0\n unp = 0.0\n htest = 0\n stest = 0\n get = f.readline\n while 1:\n line = get()\n if line.startswith('-> <stat> tested'):\n print(line, end=' ')\n elif line.find(' items; mean ') > 0 and line.find('for all runs') > 0:\n vals = line.split(';')\n mean = float(vals[1].split()[-1])\n sdev = float(vals[2].split()[-1])\n val = mean, sdev\n ntested = int(vals[0].split()[-2])\n typ = vals[0].split()[2]\n if line.find('for all runs') != -1:\n if typ == 'Ham':\n hamdevall = val\n htest = ntested\n else:\n spamdevall = val\n stest = ntested\n elif line.startswith('-> best cost for all runs: $'):\n bestcost = float(line.split('$')[-1])\n elif line.startswith('-> <stat> all runs false positives: '):\n fp = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negatives: '):\n fn = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure: '):\n un = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false positive %: '):\n fpp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negative %: '):\n fnp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure %: '):\n unp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs cost: '):\n cost = float(line.split('$')[-1])\n break\n return (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,\n hamdevall, spamdevall)\n\n\ndef windowsfy(fn):\n import os\n if os.path.exists(fn + '.txt'):\n return fn + '.txt'\n else:\n return fn\n\n\ndef table():\n import getopt, sys\n showMean = 0\n fname = 'filename: '\n fnam2 = ' '\n ratio = 'ham:spam: '\n rat2 = ' '\n fptot = 'fp total: '\n fpper = 'fp %: '\n fntot = 'fn total: '\n fnper = 'fn %: '\n untot = 'unsure t: '\n unper = 'unsure %: '\n rcost = 'real cost:'\n bcost = 'best cost:'\n hmean = 'h mean: '\n hsdev = 'h sdev: '\n smean = 's mean: '\n ssdev = 's sdev: '\n meand = 'mean diff:'\n kval = 'k: '\n (tfptot) = (tfpper) = (tfntot) = (tfnper) = (tuntot) = (tunper) = (trcost\n ) = (tbcost) = (thmean) = (thsdev) = (tsmean) = (tssdev) = (tmeand) = (\n tkval) = 0\n args, fileargs = getopt.getopt(sys.argv[1:], 'm')\n for arg, val in args:\n if arg == '-m':\n showMean = 1\n for filename in fileargs:\n filename = windowsfy(filename)\n (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost, hamdevall,\n spamdevall) = suck(file(filename))\n if filename.endswith('.txt'):\n filename = filename[:-4]\n filename = filename[filename.rfind('/') + 1:]\n filename = filename[filename.rfind('\\\\') + 1:]\n if len(fname) > len(fnam2):\n fname += ' '\n fname = fname[0:len(fnam2) + 12]\n fnam2 += ' %11s' % filename\n else:\n fnam2 += ' '\n fnam2 = fnam2[0:len(fname) + 12]\n fname += ' %11s' % filename\n if len(ratio) > len(rat2):\n ratio += ' '\n ratio = ratio[0:len(rat2) + 12]\n rat2 += ' %11s' % ('%d:%d' % (htest, stest))\n else:\n rat2 += ' '\n rat2 = rat2[0:len(ratio) + 12]\n ratio += ' %11s' % ('%d:%d' % (htest, stest))\n fptot += '%12d' % fp\n tfptot += fp\n fpper += '%12.2f' % fpp\n tfpper += fpp\n fntot += '%12d' % fn\n tfntot += fn\n fnper += '%12.2f' % fnp\n tfnper += fnp\n untot += '%12d' % un\n tuntot += un\n unper += '%12.2f' % unp\n tunper += unp\n rcost += '%12s' % ('$%.2f' % cost)\n trcost += cost\n bcost += '%12s' % ('$%.2f' % bestcost)\n tbcost += bestcost\n hmean += '%12.2f' % hamdevall[0]\n thmean += hamdevall[0]\n hsdev += '%12.2f' % hamdevall[1]\n thsdev += hamdevall[1]\n smean += '%12.2f' % spamdevall[0]\n tsmean += spamdevall[0]\n ssdev += '%12.2f' % spamdevall[1]\n tssdev += spamdevall[1]\n meand += '%12.2f' % (spamdevall[0] - hamdevall[0])\n tmeand += spamdevall[0] - hamdevall[0]\n k = (spamdevall[0] - hamdevall[0]) / (spamdevall[1] + hamdevall[1])\n kval += '%12.2f' % k\n tkval += k\n nfiles = len(fileargs)\n if nfiles and showMean:\n fptot += '%12d' % (tfptot / nfiles)\n fpper += '%12.2f' % (tfpper / nfiles)\n fntot += '%12d' % (tfntot / nfiles)\n fnper += '%12.2f' % (tfnper / nfiles)\n untot += '%12d' % (tuntot / nfiles)\n unper += '%12.2f' % (tunper / nfiles)\n rcost += '%12s' % ('$%.2f' % (trcost / nfiles))\n bcost += '%12s' % ('$%.2f' % (tbcost / nfiles))\n hmean += '%12.2f' % (thmean / nfiles)\n hsdev += '%12.2f' % (thsdev / nfiles)\n smean += '%12.2f' % (tsmean / nfiles)\n ssdev += '%12.2f' % (tssdev / nfiles)\n meand += '%12.2f' % (tmeand / nfiles)\n kval += '%12.2f' % (tkval / nfiles)\n print(fname)\n if len(fnam2.strip()) > 0:\n print(fnam2)\n print(ratio)\n if len(rat2.strip()) > 0:\n print(rat2)\n print(fptot)\n print(fpper)\n print(fntot)\n print(fnper)\n print(untot)\n print(unper)\n print(rcost)\n print(bcost)\n print(hmean)\n print(hsdev)\n print(smean)\n print(ssdev)\n print(meand)\n print(kval)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef suck(f):\n hamdevall = spamdevall = 0.0, 0.0\n cost = 0.0\n bestcost = 0.0\n fp = 0\n fn = 0\n un = 0\n fpp = 0.0\n fnp = 0.0\n unp = 0.0\n htest = 0\n stest = 0\n get = f.readline\n while 1:\n line = get()\n if line.startswith('-> <stat> tested'):\n print(line, end=' ')\n elif line.find(' items; mean ') > 0 and line.find('for all runs') > 0:\n vals = line.split(';')\n mean = float(vals[1].split()[-1])\n sdev = float(vals[2].split()[-1])\n val = mean, sdev\n ntested = int(vals[0].split()[-2])\n typ = vals[0].split()[2]\n if line.find('for all runs') != -1:\n if typ == 'Ham':\n hamdevall = val\n htest = ntested\n else:\n spamdevall = val\n stest = ntested\n elif line.startswith('-> best cost for all runs: $'):\n bestcost = float(line.split('$')[-1])\n elif line.startswith('-> <stat> all runs false positives: '):\n fp = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negatives: '):\n fn = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure: '):\n un = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false positive %: '):\n fpp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negative %: '):\n fnp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure %: '):\n unp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs cost: '):\n cost = float(line.split('$')[-1])\n break\n return (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,\n hamdevall, spamdevall)\n\n\ndef windowsfy(fn):\n import os\n if os.path.exists(fn + '.txt'):\n return fn + '.txt'\n else:\n return fn\n\n\ndef table():\n import getopt, sys\n showMean = 0\n fname = 'filename: '\n fnam2 = ' '\n ratio = 'ham:spam: '\n rat2 = ' '\n fptot = 'fp total: '\n fpper = 'fp %: '\n fntot = 'fn total: '\n fnper = 'fn %: '\n untot = 'unsure t: '\n unper = 'unsure %: '\n rcost = 'real cost:'\n bcost = 'best cost:'\n hmean = 'h mean: '\n hsdev = 'h sdev: '\n smean = 's mean: '\n ssdev = 's sdev: '\n meand = 'mean diff:'\n kval = 'k: '\n (tfptot) = (tfpper) = (tfntot) = (tfnper) = (tuntot) = (tunper) = (trcost\n ) = (tbcost) = (thmean) = (thsdev) = (tsmean) = (tssdev) = (tmeand) = (\n tkval) = 0\n args, fileargs = getopt.getopt(sys.argv[1:], 'm')\n for arg, val in args:\n if arg == '-m':\n showMean = 1\n for filename in fileargs:\n filename = windowsfy(filename)\n (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost, hamdevall,\n spamdevall) = suck(file(filename))\n if filename.endswith('.txt'):\n filename = filename[:-4]\n filename = filename[filename.rfind('/') + 1:]\n filename = filename[filename.rfind('\\\\') + 1:]\n if len(fname) > len(fnam2):\n fname += ' '\n fname = fname[0:len(fnam2) + 12]\n fnam2 += ' %11s' % filename\n else:\n fnam2 += ' '\n fnam2 = fnam2[0:len(fname) + 12]\n fname += ' %11s' % filename\n if len(ratio) > len(rat2):\n ratio += ' '\n ratio = ratio[0:len(rat2) + 12]\n rat2 += ' %11s' % ('%d:%d' % (htest, stest))\n else:\n rat2 += ' '\n rat2 = rat2[0:len(ratio) + 12]\n ratio += ' %11s' % ('%d:%d' % (htest, stest))\n fptot += '%12d' % fp\n tfptot += fp\n fpper += '%12.2f' % fpp\n tfpper += fpp\n fntot += '%12d' % fn\n tfntot += fn\n fnper += '%12.2f' % fnp\n tfnper += fnp\n untot += '%12d' % un\n tuntot += un\n unper += '%12.2f' % unp\n tunper += unp\n rcost += '%12s' % ('$%.2f' % cost)\n trcost += cost\n bcost += '%12s' % ('$%.2f' % bestcost)\n tbcost += bestcost\n hmean += '%12.2f' % hamdevall[0]\n thmean += hamdevall[0]\n hsdev += '%12.2f' % hamdevall[1]\n thsdev += hamdevall[1]\n smean += '%12.2f' % spamdevall[0]\n tsmean += spamdevall[0]\n ssdev += '%12.2f' % spamdevall[1]\n tssdev += spamdevall[1]\n meand += '%12.2f' % (spamdevall[0] - hamdevall[0])\n tmeand += spamdevall[0] - hamdevall[0]\n k = (spamdevall[0] - hamdevall[0]) / (spamdevall[1] + hamdevall[1])\n kval += '%12.2f' % k\n tkval += k\n nfiles = len(fileargs)\n if nfiles and showMean:\n fptot += '%12d' % (tfptot / nfiles)\n fpper += '%12.2f' % (tfpper / nfiles)\n fntot += '%12d' % (tfntot / nfiles)\n fnper += '%12.2f' % (tfnper / nfiles)\n untot += '%12d' % (tuntot / nfiles)\n unper += '%12.2f' % (tunper / nfiles)\n rcost += '%12s' % ('$%.2f' % (trcost / nfiles))\n bcost += '%12s' % ('$%.2f' % (tbcost / nfiles))\n hmean += '%12.2f' % (thmean / nfiles)\n hsdev += '%12.2f' % (thsdev / nfiles)\n smean += '%12.2f' % (tsmean / nfiles)\n ssdev += '%12.2f' % (tssdev / nfiles)\n meand += '%12.2f' % (tmeand / nfiles)\n kval += '%12.2f' % (tkval / nfiles)\n print(fname)\n if len(fnam2.strip()) > 0:\n print(fnam2)\n print(ratio)\n if len(rat2.strip()) > 0:\n print(rat2)\n print(fptot)\n print(fpper)\n print(fntot)\n print(fnper)\n print(untot)\n print(unper)\n print(rcost)\n print(bcost)\n print(hmean)\n print(hsdev)\n print(smean)\n print(ssdev)\n print(meand)\n print(kval)\n\n\nif __name__ == '__main__':\n table()\n",
"step-5": "\"\"\"\ntable.py [-m] base1 base2 ... baseN\nCombines output from base1.txt, base2.txt, etc., which are created by\nthe TestDriver (such as timcv.py) output, and displays tabulated\ncomparison statistics to stdout. Each input file is represented by\none column in the table.\nOptional argument -m shows a final column with the mean value of each\nstatistic.\n\"\"\"\ndef suck(f):\n hamdevall = spamdevall = (0.0, 0.0)\n cost = 0.0\n bestcost = 0.0\n fp = 0\n fn = 0\n un = 0\n fpp = 0.0\n fnp = 0.0\n unp = 0.0\n htest = 0\n stest = 0\n get = f.readline\n while 1:\n line = get()\n if line.startswith('-> <stat> tested'):\n print(line, end=' ')\n elif line.find(' items; mean ') > 0 and line.find('for all runs') > 0:\n vals = line.split(';')\n mean = float(vals[1].split()[-1])\n sdev = float(vals[2].split()[-1])\n val = (mean, sdev)\n ntested = int(vals[0].split()[-2])\n typ = vals[0].split()[2]\n if line.find('for all runs') != -1:\n if typ == 'Ham':\n hamdevall = val\n htest = ntested\n else:\n spamdevall = val\n stest = ntested\n elif line.startswith('-> best cost for all runs: $'):\n bestcost = float(line.split('$')[-1])\n elif line.startswith('-> <stat> all runs false positives: '):\n fp = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negatives: '):\n fn = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure: '):\n un = int(line.split()[-1])\n elif line.startswith('-> <stat> all runs false positive %: '):\n fpp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs false negative %: '):\n fnp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs unsure %: '):\n unp = float(line.split()[-1])\n elif line.startswith('-> <stat> all runs cost: '):\n cost = float(line.split('$')[-1])\n break\n return (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,\n hamdevall, spamdevall)\ndef windowsfy(fn):\n import os\n if os.path.exists(fn + '.txt'):\n return fn + '.txt'\n else:\n return fn\ndef table():\n import getopt, sys\n showMean = 0\n fname = \"filename: \"\n fnam2 = \" \"\n ratio = \"ham:spam: \"\n rat2 = \" \"\n fptot = \"fp total: \"\n fpper = \"fp %: \"\n fntot = \"fn total: \"\n fnper = \"fn %: \"\n untot = \"unsure t: \"\n unper = \"unsure %: \"\n rcost = \"real cost:\"\n bcost = \"best cost:\"\n hmean = \"h mean: \"\n hsdev = \"h sdev: \"\n smean = \"s mean: \"\n ssdev = \"s sdev: \"\n meand = \"mean diff:\"\n kval = \"k: \"\n tfptot = tfpper = tfntot = tfnper = tuntot = tunper = trcost = tbcost = \\\n thmean = thsdev = tsmean = tssdev = tmeand = tkval = 0\n args, fileargs = getopt.getopt(sys.argv[1:], 'm')\n for arg, val in args:\n if arg == \"-m\":\n showMean = 1\n for filename in fileargs:\n filename = windowsfy(filename)\n (htest, stest, fp, fn, un, fpp, fnp, unp, cost, bestcost,\n hamdevall, spamdevall) = suck(file(filename))\n if filename.endswith('.txt'):\n filename = filename[:-4]\n filename = filename[filename.rfind('/')+1:]\n filename = filename[filename.rfind(\"\\\\\")+1:]\n if len(fname) > len(fnam2):\n fname += \" \"\n fname = fname[0:(len(fnam2) + 12)]\n fnam2 += \" %11s\" % filename\n else:\n fnam2 += \" \"\n fnam2 = fnam2[0:(len(fname) + 12)]\n fname += \" %11s\" % filename\n if len(ratio) > len(rat2):\n ratio += \" \"\n ratio = ratio[0:(len(rat2) + 12)]\n rat2 += \" %11s\" % (\"%d:%d\" % (htest, stest))\n else:\n rat2 += \" \"\n rat2 = rat2[0:(len(ratio) + 12)]\n ratio += \" %11s\" % (\"%d:%d\" % (htest, stest))\n fptot += \"%12d\" % fp\n tfptot += fp\n fpper += \"%12.2f\" % fpp\n tfpper += fpp\n fntot += \"%12d\" % fn\n tfntot += fn\n fnper += \"%12.2f\" % fnp\n tfnper += fnp\n untot += \"%12d\" % un\n tuntot += un\n unper += \"%12.2f\" % unp\n tunper += unp\n rcost += \"%12s\" % (\"$%.2f\" % cost)\n trcost += cost\n bcost += \"%12s\" % (\"$%.2f\" % bestcost)\n tbcost += bestcost\n hmean += \"%12.2f\" % hamdevall[0]\n thmean += hamdevall[0]\n hsdev += \"%12.2f\" % hamdevall[1]\n thsdev += hamdevall[1]\n smean += \"%12.2f\" % spamdevall[0]\n tsmean += spamdevall[0]\n ssdev += \"%12.2f\" % spamdevall[1]\n tssdev += spamdevall[1]\n meand += \"%12.2f\" % (spamdevall[0] - hamdevall[0])\n tmeand += (spamdevall[0] - hamdevall[0])\n k = (spamdevall[0] - hamdevall[0]) / (spamdevall[1] + hamdevall[1])\n kval += \"%12.2f\" % k\n tkval += k\n nfiles = len(fileargs)\n if nfiles and showMean:\n fptot += \"%12d\" % (tfptot/nfiles)\n fpper += \"%12.2f\" % (tfpper/nfiles)\n fntot += \"%12d\" % (tfntot/nfiles)\n fnper += \"%12.2f\" % (tfnper/nfiles)\n untot += \"%12d\" % (tuntot/nfiles)\n unper += \"%12.2f\" % (tunper/nfiles)\n rcost += \"%12s\" % (\"$%.2f\" % (trcost/nfiles))\n bcost += \"%12s\" % (\"$%.2f\" % (tbcost/nfiles))\n hmean += \"%12.2f\" % (thmean/nfiles)\n hsdev += \"%12.2f\" % (thsdev/nfiles)\n smean += \"%12.2f\" % (tsmean/nfiles)\n ssdev += \"%12.2f\" % (tssdev/nfiles)\n meand += \"%12.2f\" % (tmeand/nfiles)\n kval += \"%12.2f\" % (tkval/nfiles)\n print(fname)\n if len(fnam2.strip()) > 0:\n print(fnam2)\n print(ratio)\n if len(rat2.strip()) > 0:\n print(rat2)\n print(fptot)\n print(fpper)\n print(fntot)\n print(fnper)\n print(untot)\n print(unper)\n print(rcost)\n print(bcost)\n print(hmean)\n print(hsdev)\n print(smean)\n print(ssdev)\n print(meand)\n print(kval)\nif __name__ == \"__main__\":\n table()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Advent of Code: Day 4
"""A new system policy has been put in place that requires all accounts to
use a passphrase instead of simply a password. A passphrase consists of a
series of words (lowercase letters) separated by spaces.
To ensure security, a valid passphrase must contain no duplicate words.
"""
def valid(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
result = 0
for line in lines:
split = line.rstrip().split(' ')
if len(split) == len(set(split)):
result += 1
return result
"""For added security, yet another system policy has been put in place.
Now, a valid passphrase must contain no two words that are anagrams of
each other - that is, a passphrase is invalid if any word's letters can
be rearranged to form any other word in the passphrase.
"""
def valid_anagram(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
result = len(lines)
for line in lines:
split = line.rstrip().split(' ')
split = [sorted(s) for s in split]
for word in split:
if split.count(word) > 1:
result -= 1
break
return result
if __name__ == '__main__':
print(valid('day4-input.txt'))
print(valid_anagram('day4-input.txt'))
|
normal
|
{
"blob_id": "7dce240a891e807b1f5251a09a69368f4e513973",
"index": 4472,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef valid_anagram(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = len(lines)\n for line in lines:\n split = line.rstrip().split(' ')\n split = [sorted(s) for s in split]\n for word in split:\n if split.count(word) > 1:\n result -= 1\n break\n return result\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef valid(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = 0\n for line in lines:\n split = line.rstrip().split(' ')\n if len(split) == len(set(split)):\n result += 1\n return result\n\n\n<mask token>\n\n\ndef valid_anagram(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = len(lines)\n for line in lines:\n split = line.rstrip().split(' ')\n split = [sorted(s) for s in split]\n for word in split:\n if split.count(word) > 1:\n result -= 1\n break\n return result\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef valid(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = 0\n for line in lines:\n split = line.rstrip().split(' ')\n if len(split) == len(set(split)):\n result += 1\n return result\n\n\n<mask token>\n\n\ndef valid_anagram(filename):\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n result = len(lines)\n for line in lines:\n split = line.rstrip().split(' ')\n split = [sorted(s) for s in split]\n for word in split:\n if split.count(word) > 1:\n result -= 1\n break\n return result\n\n\nif __name__ == '__main__':\n print(valid('day4-input.txt'))\n print(valid_anagram('day4-input.txt'))\n",
"step-5": "# Advent of Code: Day 4\n\n\"\"\"A new system policy has been put in place that requires all accounts to \nuse a passphrase instead of simply a password. A passphrase consists of a \nseries of words (lowercase letters) separated by spaces.\n\nTo ensure security, a valid passphrase must contain no duplicate words.\n\n\"\"\"\ndef valid(filename):\n\tf = open(filename, 'r')\n\tlines = f.readlines()\n\tf.close()\n\t\n\tresult = 0\n\tfor line in lines:\n\t\tsplit = line.rstrip().split(' ')\n\t\tif len(split) == len(set(split)):\n\t\t\tresult += 1\t\t\n\t\t\t\n\treturn result\n\t\n\n\"\"\"For added security, yet another system policy has been put in place. \nNow, a valid passphrase must contain no two words that are anagrams of \neach other - that is, a passphrase is invalid if any word's letters can \nbe rearranged to form any other word in the passphrase.\n\n\"\"\"\t\t\ndef valid_anagram(filename):\n\tf = open(filename, 'r')\n\tlines = f.readlines()\n\tf.close()\n\t\n\tresult = len(lines)\n\tfor line in lines:\n\t\tsplit = line.rstrip().split(' ')\n\t\tsplit = [sorted(s) for s in split]\n\t\tfor word in split:\n\t\t\tif split.count(word) > 1:\n\t\t\t\tresult -= 1\n\t\t\t\tbreak\t\t\n\t\t\t\n\treturn result\t\n\t\n\t\nif __name__ == '__main__':\n\tprint(valid('day4-input.txt'))\n\tprint(valid_anagram('day4-input.txt'))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# THIS FILE WAS CREATED IN THIS DIRECTORY EARLIER, NOW MOIVED TO ROOT OF THE REPO
print "Hello buddy"
print "Let's get started"
spy_name = raw_input ("What is your spy name? ")
if len(spy_name) >3:
print "Welcome " + spy_name + ". Glad to have you with us."
spy_salutation= raw_input("What's your title? ")
if spy_salutation == "Mr." or spy_salutation =="Ms.":
spy_name = spy_salutation + " " + spy_name
print "Welcome " + spy_name + ". Let me know about you a bit more."
spy_age = input("Please enter your age")
if 50>spy_age>18:
print "Your age is Correct."
spy_rating = input("Please enter your rating ")
if spy_rating>=5.0:
print "Great spy"
elif 3.5<=spy_rating<5.0:
print "Good spy"
elif 2<=spy_rating<3.5:
print "Not bad."
else :
print "Not good. Need hardwork"
spy_is_active = True
print "Authentication process completed successfully. Welcome " +spy_name+ "age: " + str(spy_age) + " and rating: " + str(spy_rating) + " Glad to have ypou with us."
else:
print "Sorry, you are not eligible to be a spy"
else:
print "Invalid Information."
else:
print "Opps! please enter a valid name."
|
normal
|
{
"blob_id": "79f03af05fb40f5f5247b582eabae2dc125e6b52",
"index": 4522,
"step-1": "# THIS FILE WAS CREATED IN THIS DIRECTORY EARLIER, NOW MOIVED TO ROOT OF THE REPO\r\n\r\n\r\nprint \"Hello buddy\"\r\nprint \"Let's get started\"\r\nspy_name = raw_input (\"What is your spy name? \")\r\nif len(spy_name) >3:\r\n print \"Welcome \" + spy_name + \". Glad to have you with us.\"\r\n spy_salutation= raw_input(\"What's your title? \")\r\n if spy_salutation == \"Mr.\" or spy_salutation ==\"Ms.\":\r\n spy_name = spy_salutation + \" \" + spy_name\r\n print \"Welcome \" + spy_name + \". Let me know about you a bit more.\"\r\n spy_age = input(\"Please enter your age\")\r\n if 50>spy_age>18:\r\n print \"Your age is Correct.\"\r\n spy_rating = input(\"Please enter your rating \")\r\n if spy_rating>=5.0:\r\n print \"Great spy\"\r\n elif 3.5<=spy_rating<5.0:\r\n print \"Good spy\"\r\n elif 2<=spy_rating<3.5:\r\n print \"Not bad.\"\r\n else :\r\n print \"Not good. Need hardwork\"\r\n spy_is_active = True\r\n print \"Authentication process completed successfully. Welcome \" +spy_name+ \"age: \" + str(spy_age) + \" and rating: \" + str(spy_rating) + \" Glad to have ypou with us.\"\r\n\r\n else:\r\n print \"Sorry, you are not eligible to be a spy\"\r\n else:\r\n print \"Invalid Information.\"\r\nelse:\r\n print \"Opps! please enter a valid name.\"\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding:utf-8 -*-
import time
class Base:
def getTime(self):
'''
获取时间戳
:return:
'''
return str(time.time()).split('.')[0]
|
normal
|
{
"blob_id": "28a920072bad1b411d71f7f70cd991cb7dfbeb8c",
"index": 8754,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Base:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Base:\n\n def getTime(self):\n \"\"\"\n 获取时间戳\n :return: \n \"\"\"\n return str(time.time()).split('.')[0]\n",
"step-4": "import time\n\n\nclass Base:\n\n def getTime(self):\n \"\"\"\n 获取时间戳\n :return: \n \"\"\"\n return str(time.time()).split('.')[0]\n",
"step-5": "# -*- coding:utf-8 -*-\nimport time\nclass Base:\n def getTime(self):\n '''\n 获取时间戳\n :return: \n '''\n return str(time.time()).split('.')[0]",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Created on 02.09.2013
@author: Paul Schweizer
@email: paulschweizer@gmx.net
@brief: Holds all the namingconventions for pandora's box
"""
import os
import json
class NamingConvention():
"""Imports naming conventions from the respective .json file and puts them
into class variables.
"""
def __init__(self):
namingconventions = os.path.join(os.path.dirname(os.path.dirname(__file__)),
'data', 'strings', 'namingconvention.json')
namingconventions = json.load(open(namingconventions))
for key, value in namingconventions.items():
setattr(NamingConvention, key, value)
# end for constant in constants
# end def __init__
# end class NamingConvention
|
normal
|
{
"blob_id": "d2a153fffccd4b681eebce823e641e195197cde7",
"index": 54,
"step-1": "<mask token>\n\n\nclass NamingConvention:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass NamingConvention:\n <mask token>\n\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(\n __file__)), 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n",
"step-3": "<mask token>\n\n\nclass NamingConvention:\n \"\"\"Imports naming conventions from the respective .json file and puts them\n into class variables.\n \"\"\"\n\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(\n __file__)), 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n",
"step-4": "<mask token>\nimport os\nimport json\n\n\nclass NamingConvention:\n \"\"\"Imports naming conventions from the respective .json file and puts them\n into class variables.\n \"\"\"\n\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(\n __file__)), 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n",
"step-5": "\"\"\"\nCreated on 02.09.2013\n@author: Paul Schweizer\n@email: paulschweizer@gmx.net\n@brief: Holds all the namingconventions for pandora's box\n\"\"\"\n\nimport os\nimport json\n\n\nclass NamingConvention():\n \"\"\"Imports naming conventions from the respective .json file and puts them\n into class variables.\n \"\"\"\n def __init__(self):\n namingconventions = os.path.join(os.path.dirname(os.path.dirname(__file__)),\n 'data', 'strings', 'namingconvention.json')\n namingconventions = json.load(open(namingconventions))\n for key, value in namingconventions.items():\n setattr(NamingConvention, key, value)\n # end for constant in constants\n # end def __init__\n# end class NamingConvention\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import argparse
import cv2
import numpy as np
refPt = []
cropping = False
def click_and_crop(event, x, y, flags, param):
global refPt, cropping
if event == cv2.EVENT_LBUTTONDOWN:
refPt = [(x, y)]
cropping = True
elif event == cv2.EVENT_LBUTTONUP:
refPt.append((x, y))
cropping = False
cv2.rectangle(image, refPt[0], refPt[1], (0, 255, 0), 2)
cv2.imshow("image", image)
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
clone = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", click_and_crop)
while True:
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
if key == ord("r"):
image = clone.copy()
elif key == ord("c"):
break
if len(refPt) == 2:
roi = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
cv2.imshow("ROI", roi)
count=0
sum=np.array([0,0,0])
for i in range (0,np.size(roi,0)):
for j in range(0,np.size(roi,1)):
count+=1
sum+=roi[i,j]
print "Average bgr: ",sum/count
cv2.waitKey(0)
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "986df5a41bc87ecb390dfbd1db9e1f5cd6c5b8fb",
"index": 9702,
"step-1": "\nimport argparse\nimport cv2\nimport numpy as np\n \n\nrefPt = []\ncropping = False\n \ndef click_and_crop(event, x, y, flags, param):\n\tglobal refPt, cropping\n \n\tif event == cv2.EVENT_LBUTTONDOWN:\n\t\trefPt = [(x, y)]\n\t\tcropping = True\n \n\telif event == cv2.EVENT_LBUTTONUP:\n\t\trefPt.append((x, y))\n\t\tcropping = False\n \n\t\n\t\tcv2.rectangle(image, refPt[0], refPt[1], (0, 255, 0), 2)\n\t\tcv2.imshow(\"image\", image)\n\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", required=True, help=\"Path to the image\")\nargs = vars(ap.parse_args())\n \nimage = cv2.imread(args[\"image\"])\nclone = image.copy()\ncv2.namedWindow(\"image\")\ncv2.setMouseCallback(\"image\", click_and_crop)\n \n\nwhile True:\n\tcv2.imshow(\"image\", image)\n\tkey = cv2.waitKey(1) & 0xFF\n \n\n\tif key == ord(\"r\"):\n\t\timage = clone.copy()\n \n\telif key == ord(\"c\"):\n\t\tbreak\n \n\nif len(refPt) == 2:\n\troi = clone[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]\n\tcv2.imshow(\"ROI\", roi)\n\tcount=0\n\tsum=np.array([0,0,0])\n\tfor i in range (0,np.size(roi,0)):\n\t\tfor j in range(0,np.size(roi,1)):\n\t\t\tcount+=1\n\t\t\tsum+=roi[i,j]\n\tprint \"Average bgr: \",sum/count\n\tcv2.waitKey(0)\n \n\ncv2.destroyAllWindows()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#coding=utf-8
#########################################
# dbscan:
# 用法说明:读取文件
# 生成路径文件及簇文件,输出分类准确率
#########################################
from matplotlib.pyplot import *
import matplotlib.pyplot as plt
from collections import defaultdict
import random
from math import *
import numpy
import datetime
from dateutil.parser import parse
import datetime
import time
def dataset(filename):
#读取原始文件
lines = open(filename,'r').readlines()
l = len(lines)
all_points = []
for i in range(l):
if lines[i].strip():
line = lines[i].split()
time = line[0] +' '+ line[1]
lat = float(line[4])
lon = float(line[6])
all_points.append([lat,lon,time])
return all_points
def datarevise(all_points):
#数据平滑处理
point_new = []
all_points1 = np.array(all_points)
l = len(all_points)
for i in range(2,l-3):
lat_lon = np.array(all_points1[i-2:i+3,:-1],dtype = float).mean(0)
point_new.append([lat_lon[0],lat_lon[1],all_points1[i][-1]])
return point_new
def dist(p1, p2):
#计算亮点之间的距离
a = cos(p1[0])*cos(p2[0])
b = sin(p1[0])*sin(p2[0])*cos(p2[1]-p1[1])
if a+b >=1:
return 0
return acos(float(a+b))*6371*pi/180
def find_core(all_points,E,minPts):
#查找核心点
#输出:核心点,要绘制的点,非核心点
other_points =[]
core_points=[]
plotted_points=[]
for point in all_points:
point.append(0) # 初始点标号为0
total = 0 #计数:对每个点周围大于给定距离的点的个数
for otherPoint in all_points:
distance = dist(otherPoint,point)
if distance <= E:
total += 1
if total > minPts:
core_points.append(point)
plotted_points.append(point)
else:
other_points.append(point)
return core_points,plotted_points,other_points
def find_border(core_points,plotted_points,other_points,E):
#在非核心点查找边界点
#输出:边界点,要绘制的点
border_points=[]
for core in core_points:
for other in other_points:
if dist(core,other) <= E:#边界点的与核心点的距离小于E
border_points.append(other)
plotted_points.append(other)
return border_points,plotted_points
def algorithm(all_points,core_points,border_points,plotted_points,E):
# 返回簇,噪声点
#将所有的核心点分成不同的簇
cluster_label = 0
for point in core_points:
if point[-1] == 0:
cluster_label += 1
point[-1] = cluster_label
for point2 in plotted_points:
distance = dist(point2,point)
if point2[-1] ==0 and distance <= E:
point2[-1] =point[-1]
#将点集标号类型写成字典格式
cluster_dict = {}
for point in plotted_points:
if cluster_dict.get(point[-1]) is None:
cluster_dict[point[-1]] = [point[0:-1]]
else:
cluster_dict[point[-1]].append(point[0:-1])
#将簇中各个点按时间排序
cluster_dict_sort = {}
for lable in cluster_dict:
cluster_dict_sort.setdefault(lable,[])
cl = np.array(cluster_dict[lable])
cl_sort = cl[cl[:,-1].argsort()]
cluster_dict_sort[lable] = cl_sort
#噪声点,既不在边界点也不在核心点中
noise_points=[]
for point in all_points:
if point not in core_points and point not in border_points:
noise_points.append(point[0:-1])
return cluster_dict_sort,noise_points
def durtime(noise_points,difftime):
# 输入:噪声点,时间间隔
# 功能:分成不同的路径
# 输出:路径点[[],[]]
no = np.array(noise_points)
no_sort = no[no[:,-1].argsort()]
l = len(no_sort)
k = [0]
for i in range(l-1):
diff_time = (no_sort[i+1][-1] - no_sort[i][-1]).seconds
if diff_time > difftime:
k.append(i+1)
k.append(l)
no_split = []
for i in range(len(k)-1):
no_split.append(no_sort[k[i]:k[i+1]])
return no_split
def matplotshow(cluster_dict,no_split,name):
#画出各个簇
markers = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '<r', 'pr']
i=0
for lable in cluster_dict:
for j in cluster_dict[lable]:
plot(j[0], j[1],markers[i])
i += 1
i = i%10
print i
#画出路径
markers = ['r', 'b', 'g', 'k', 'c', 'y', 'm',]
l =len(no_split)
for i in range(l):
path = np.array(no_split[i])
plt.plot(path[:,0],path[:,1],markers[i%7])
print i
title(" clusters created with E ="+str(E)+" Min Points="+str(minPts)+" total points="+str(len(all_points))+" noise Points = "+ str(len(noise_points)))
savefig(name)
show()
def datewrite(no_split,filename,mark):
f = open(filename,'w+')
for path in no_split:
f.write( str(mark) +'\n')
for no_path in path:
f.write(str(list(no_path))+'\n')
f.close()
def datewrite1(no_split,filename,mark):
f = open(filename,'w+')
for path in no_split:
for no_path in path:
f.write( str(mark) +'\n')
for j in no_path:
f.write(str(list(j))+'\n')
f.close()
if __name__ == '__main__':
filename = 'D:/sensor_data/sensor/gps/location_zh0710.txt'
all_points_old = dataset(filename)
all_points = datarevise(all_points_old)
E,minPts = 0.1,10
core_points,plotted_points,other_points = find_core(all_points,E,minPts)
border_points,plotted_points = find_border(core_points,plotted_points,other_points,E)
cluster_dict,noise_points = algorithm(all_points,border_points,core_points,plotted_points,E)
difftime = 1200
no_split = durtime(noise_points,difftime)
matplotshow(cluster_dict,no_split,"location_zh0710.png")
filename = 'D:/sensor_data/sensor/gps/location_zh0710_no_split.txt'
datewrite(no_split,filename,'path')
filename = 'D:/sensor_data/sensor/gps/location_zh0710_cluster.txt'
datewrite(cluster_dict.values(),filename,'lable')
|
normal
|
{
"blob_id": "99c839eddcbe985c81e709878d03c59e3be3c909",
"index": 293,
"step-1": "#coding=utf-8\n######################################### \n# dbscan: \n# 用法说明:读取文件\n# 生成路径文件及簇文件,输出分类准确率 \n######################################### \n\n\nfrom matplotlib.pyplot import *\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict \nimport random\nfrom math import *\nimport numpy\nimport datetime\nfrom dateutil.parser import parse\nimport datetime\nimport time\n\n\n\ndef dataset(filename):\n #读取原始文件\n lines = open(filename,'r').readlines()\n l = len(lines)\n all_points = [] \n for i in range(l):\n if lines[i].strip():\n line = lines[i].split()\n time = line[0] +' '+ line[1]\n lat = float(line[4])\n lon = float(line[6])\n all_points.append([lat,lon,time])\n return all_points\n\ndef datarevise(all_points):\n #数据平滑处理\n point_new = []\n all_points1 = np.array(all_points)\n l = len(all_points)\n for i in range(2,l-3):\n lat_lon = np.array(all_points1[i-2:i+3,:-1],dtype = float).mean(0)\n point_new.append([lat_lon[0],lat_lon[1],all_points1[i][-1]])\n return point_new\n\n \ndef dist(p1, p2):\n #计算亮点之间的距离\n a = cos(p1[0])*cos(p2[0])\n b = sin(p1[0])*sin(p2[0])*cos(p2[1]-p1[1])\n if a+b >=1:\n return 0\n return acos(float(a+b))*6371*pi/180\n\ndef find_core(all_points,E,minPts):\n #查找核心点\n #输出:核心点,要绘制的点,非核心点\n other_points =[] \n core_points=[] \n plotted_points=[]\n for point in all_points:\n point.append(0) # 初始点标号为0\n total = 0 #计数:对每个点周围大于给定距离的点的个数\n for otherPoint in all_points:\n distance = dist(otherPoint,point)\n if distance <= E:\n total += 1\n if total > minPts:\n core_points.append(point)\n plotted_points.append(point)\n else:\n other_points.append(point)\n return core_points,plotted_points,other_points\n\ndef find_border(core_points,plotted_points,other_points,E):\n #在非核心点查找边界点\n #输出:边界点,要绘制的点\n border_points=[]\n for core in core_points:\n for other in other_points:\n if dist(core,other) <= E:#边界点的与核心点的距离小于E\n border_points.append(other)\n plotted_points.append(other)\n return border_points,plotted_points\n\n\ndef algorithm(all_points,core_points,border_points,plotted_points,E):\n # 返回簇,噪声点\n \n #将所有的核心点分成不同的簇\n cluster_label = 0\n for point in core_points:\n if point[-1] == 0:\n cluster_label += 1\n point[-1] = cluster_label\n for point2 in plotted_points:\n distance = dist(point2,point)\n if point2[-1] ==0 and distance <= E:\n point2[-1] =point[-1]\n #将点集标号类型写成字典格式 \n cluster_dict = {}\n for point in plotted_points:\n if cluster_dict.get(point[-1]) is None:\n cluster_dict[point[-1]] = [point[0:-1]]\n else:\n cluster_dict[point[-1]].append(point[0:-1])\n\n #将簇中各个点按时间排序\n cluster_dict_sort = {}\n for lable in cluster_dict:\n cluster_dict_sort.setdefault(lable,[])\n cl = np.array(cluster_dict[lable])\n cl_sort = cl[cl[:,-1].argsort()]\n cluster_dict_sort[lable] = cl_sort\n \n #噪声点,既不在边界点也不在核心点中 \n noise_points=[]\n for point in all_points:\n if point not in core_points and point not in border_points:\n noise_points.append(point[0:-1])\n return cluster_dict_sort,noise_points\n\n\n\ndef durtime(noise_points,difftime):\n # 输入:噪声点,时间间隔\n # 功能:分成不同的路径\n # 输出:路径点[[],[]]\n no = np.array(noise_points)\n no_sort = no[no[:,-1].argsort()]\n l = len(no_sort)\n k = [0]\n for i in range(l-1):\n diff_time = (no_sort[i+1][-1] - no_sort[i][-1]).seconds\n if diff_time > difftime:\n k.append(i+1)\n k.append(l)\n no_split = []\n for i in range(len(k)-1):\n no_split.append(no_sort[k[i]:k[i+1]])\n return no_split\n\ndef matplotshow(cluster_dict,no_split,name):\n #画出各个簇\n markers = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', '<r', 'pr']\n i=0\n for lable in cluster_dict:\n for j in cluster_dict[lable]:\n plot(j[0], j[1],markers[i])\n i += 1\n i = i%10\n print i \n #画出路径\n markers = ['r', 'b', 'g', 'k', 'c', 'y', 'm',]\n l =len(no_split)\n for i in range(l):\n path = np.array(no_split[i])\n plt.plot(path[:,0],path[:,1],markers[i%7])\n print i\n title(\" clusters created with E =\"+str(E)+\" Min Points=\"+str(minPts)+\" total points=\"+str(len(all_points))+\" noise Points = \"+ str(len(noise_points)))\n savefig(name)\n show()\n\n \ndef datewrite(no_split,filename,mark): \n f = open(filename,'w+')\n for path in no_split:\n f.write( str(mark) +'\\n')\n for no_path in path:\n f.write(str(list(no_path))+'\\n') \n f.close()\n\ndef datewrite1(no_split,filename,mark): \n f = open(filename,'w+')\n for path in no_split:\n for no_path in path:\n f.write( str(mark) +'\\n')\n for j in no_path:\n f.write(str(list(j))+'\\n') \n f.close()\n \nif __name__ == '__main__':\n filename = 'D:/sensor_data/sensor/gps/location_zh0710.txt'\n all_points_old = dataset(filename)\n all_points = datarevise(all_points_old)\n E,minPts = 0.1,10\n core_points,plotted_points,other_points = find_core(all_points,E,minPts)\n border_points,plotted_points = find_border(core_points,plotted_points,other_points,E)\n cluster_dict,noise_points = algorithm(all_points,border_points,core_points,plotted_points,E)\n difftime = 1200\n no_split = durtime(noise_points,difftime)\n matplotshow(cluster_dict,no_split,\"location_zh0710.png\")\n filename = 'D:/sensor_data/sensor/gps/location_zh0710_no_split.txt'\n datewrite(no_split,filename,'path')\n filename = 'D:/sensor_data/sensor/gps/location_zh0710_cluster.txt'\n datewrite(cluster_dict.values(),filename,'lable')\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import argparse
def wrong_subtraction(n, k):
output = n
for i in range(k):
string_n = str(output)
if string_n[len(string_n) - 1] == '0':
output = int(string_n[:-1])
else:
output -= 1
return output
# d = "Do the wrong subtraction as per https://codeforces.com/problemset/problem/977/A"
#
# parser = argparse.ArgumentParser(description=d)
#
# parser.add_argument("n", type=int, help="input value for n")
# parser.add_argument("k", type=int, help="input value for k")
#
# args = parser.parse_args()
#
# n = args.n
# k = args.k
a = list(map(int, input().split()))
n = a[0]
k = a[1]
print(wrong_subtraction(n, k))
|
normal
|
{
"blob_id": "166a8cd0e09fbec739f43019659eeaf98b1d4fa4",
"index": 4446,
"step-1": "<mask token>\n\n\ndef wrong_subtraction(n, k):\n output = n\n for i in range(k):\n string_n = str(output)\n if string_n[len(string_n) - 1] == '0':\n output = int(string_n[:-1])\n else:\n output -= 1\n return output\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef wrong_subtraction(n, k):\n output = n\n for i in range(k):\n string_n = str(output)\n if string_n[len(string_n) - 1] == '0':\n output = int(string_n[:-1])\n else:\n output -= 1\n return output\n\n\n<mask token>\nprint(wrong_subtraction(n, k))\n",
"step-3": "<mask token>\n\n\ndef wrong_subtraction(n, k):\n output = n\n for i in range(k):\n string_n = str(output)\n if string_n[len(string_n) - 1] == '0':\n output = int(string_n[:-1])\n else:\n output -= 1\n return output\n\n\na = list(map(int, input().split()))\nn = a[0]\nk = a[1]\nprint(wrong_subtraction(n, k))\n",
"step-4": "import argparse\n\n\ndef wrong_subtraction(n, k):\n output = n\n for i in range(k):\n string_n = str(output)\n if string_n[len(string_n) - 1] == '0':\n output = int(string_n[:-1])\n else:\n output -= 1\n return output\n\n\na = list(map(int, input().split()))\nn = a[0]\nk = a[1]\nprint(wrong_subtraction(n, k))\n",
"step-5": "import argparse\n\ndef wrong_subtraction(n, k):\n output = n\n for i in range(k):\n string_n = str(output)\n if string_n[len(string_n) - 1] == '0':\n output = int(string_n[:-1])\n else:\n output -= 1\n return output\n\n# d = \"Do the wrong subtraction as per https://codeforces.com/problemset/problem/977/A\"\n# \n# parser = argparse.ArgumentParser(description=d)\n# \n# parser.add_argument(\"n\", type=int, help=\"input value for n\")\n# parser.add_argument(\"k\", type=int, help=\"input value for k\")\n# \n# args = parser.parse_args()\n# \n# n = args.n\n# k = args.k\n\na = list(map(int, input().split()))\nn = a[0]\nk = a[1]\n\nprint(wrong_subtraction(n, k))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# @Author: Chen yunsheng(Leo YS CHen)
# @Location: Taiwan
# @E-mail:leoyenschen@gmail.com
# @Date: 2017-02-14 00:11:27
# @Last Modified by: Chen yunsheng
import click
from qstrader import settings
from qstrader.compat import queue
from qstrader.price_parser import PriceParser
from qstrader.price_handler.yahoo_daily_csv_bar import YahooDailyCsvBarPriceHandler
from qstrader.strategy import Strategies, DisplayStrategy
from qstrader.risk_manager.example import ExampleRiskManager
from qstrader.portfolio_handler import PortfolioHandler
from qstrader.compliance.example import ExampleCompliance
from qstrader.execution_handler.ib_simulated import IBSimulatedExecutionHandler
from qstrader.statistics.simple import SimpleStatistics
from qstrader.trading_session.backtest import Backtest
#====================================================
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0,dir)
print("parentdir")
print(parentdir)
print("dir")
print(dir)
from custom_strategy import CustomStrategy
from custom_position import CustomPositionSizer
def run(config, testing, tickers, filename):
# Set up variables needed for backtest
events_queue = queue.Queue()
csv_dir = config.CSV_DATA_DIR
initial_equity = PriceParser.parse(500000.00)
# Use Yahoo Daily Price Handler
price_handler = YahooDailyCsvBarPriceHandler(
csv_dir, events_queue, tickers
)
# Use the Buy and Hold Strategy
strategy = CustomStrategy(tickers, events_queue)
strategy = Strategies(strategy, DisplayStrategy())
# Use an example Position Sizer
position_sizer = CustomPositionSizer()
# Use an example Risk Manager
risk_manager = ExampleRiskManager()
# Use the default Portfolio Handler
portfolio_handler = PortfolioHandler(
initial_equity, events_queue, price_handler,
position_sizer, risk_manager
)
# Use the ExampleCompliance component
compliance = ExampleCompliance(config)
# Use a simulated IB Execution Handler
execution_handler = IBSimulatedExecutionHandler(
events_queue, price_handler, compliance
)
# Use the default Statistics
statistics = SimpleStatistics(config, portfolio_handler)
# Set up the backtest
backtest = Backtest(
price_handler, strategy,
portfolio_handler, execution_handler,
position_sizer, risk_manager,
statistics, initial_equity
)
results = backtest.simulate_trading(testing=testing)
statistics.save(filename)
return results
"""
@click.command()
@click.option('--config', default=settings.DEFAULT_CONFIG_FILENAME, help='Config filename')
@click.option('--testing/--no-testing', default=False, help='Enable testing mode')
@click.option('--tickers', default='SP500TR', help='Tickers (use comma)')
@click.option('--filename', default='', help='Pickle (.pkl) statistics filename')
"""
def main(config, testing, tickers, filename):
tickers = tickers.split(",")
config = settings.from_file(config, testing)
run(config, testing, tickers, filename)
if __name__ == "__main__":
main(settings.DEFAULT_CONFIG_FILENAME,False,'SP500TR','')
|
normal
|
{
"blob_id": "0cec92bbfad87020baf5ef1bd005e64bc9a6ed01",
"index": 5232,
"step-1": "<mask token>\n\n\ndef run(config, testing, tickers, filename):\n events_queue = queue.Queue()\n csv_dir = config.CSV_DATA_DIR\n initial_equity = PriceParser.parse(500000.0)\n price_handler = YahooDailyCsvBarPriceHandler(csv_dir, events_queue, tickers\n )\n strategy = CustomStrategy(tickers, events_queue)\n strategy = Strategies(strategy, DisplayStrategy())\n position_sizer = CustomPositionSizer()\n risk_manager = ExampleRiskManager()\n portfolio_handler = PortfolioHandler(initial_equity, events_queue,\n price_handler, position_sizer, risk_manager)\n compliance = ExampleCompliance(config)\n execution_handler = IBSimulatedExecutionHandler(events_queue,\n price_handler, compliance)\n statistics = SimpleStatistics(config, portfolio_handler)\n backtest = Backtest(price_handler, strategy, portfolio_handler,\n execution_handler, position_sizer, risk_manager, statistics,\n initial_equity)\n results = backtest.simulate_trading(testing=testing)\n statistics.save(filename)\n return results\n\n\n<mask token>\n\n\ndef main(config, testing, tickers, filename):\n tickers = tickers.split(',')\n config = settings.from_file(config, testing)\n run(config, testing, tickers, filename)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, dir)\nprint('parentdir')\nprint(parentdir)\nprint('dir')\nprint(dir)\n<mask token>\n\n\ndef run(config, testing, tickers, filename):\n events_queue = queue.Queue()\n csv_dir = config.CSV_DATA_DIR\n initial_equity = PriceParser.parse(500000.0)\n price_handler = YahooDailyCsvBarPriceHandler(csv_dir, events_queue, tickers\n )\n strategy = CustomStrategy(tickers, events_queue)\n strategy = Strategies(strategy, DisplayStrategy())\n position_sizer = CustomPositionSizer()\n risk_manager = ExampleRiskManager()\n portfolio_handler = PortfolioHandler(initial_equity, events_queue,\n price_handler, position_sizer, risk_manager)\n compliance = ExampleCompliance(config)\n execution_handler = IBSimulatedExecutionHandler(events_queue,\n price_handler, compliance)\n statistics = SimpleStatistics(config, portfolio_handler)\n backtest = Backtest(price_handler, strategy, portfolio_handler,\n execution_handler, position_sizer, risk_manager, statistics,\n initial_equity)\n results = backtest.simulate_trading(testing=testing)\n statistics.save(filename)\n return results\n\n\n<mask token>\n\n\ndef main(config, testing, tickers, filename):\n tickers = tickers.split(',')\n config = settings.from_file(config, testing)\n run(config, testing, tickers, filename)\n\n\nif __name__ == '__main__':\n main(settings.DEFAULT_CONFIG_FILENAME, False, 'SP500TR', '')\n",
"step-3": "<mask token>\nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\ndir = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, dir)\nprint('parentdir')\nprint(parentdir)\nprint('dir')\nprint(dir)\n<mask token>\n\n\ndef run(config, testing, tickers, filename):\n events_queue = queue.Queue()\n csv_dir = config.CSV_DATA_DIR\n initial_equity = PriceParser.parse(500000.0)\n price_handler = YahooDailyCsvBarPriceHandler(csv_dir, events_queue, tickers\n )\n strategy = CustomStrategy(tickers, events_queue)\n strategy = Strategies(strategy, DisplayStrategy())\n position_sizer = CustomPositionSizer()\n risk_manager = ExampleRiskManager()\n portfolio_handler = PortfolioHandler(initial_equity, events_queue,\n price_handler, position_sizer, risk_manager)\n compliance = ExampleCompliance(config)\n execution_handler = IBSimulatedExecutionHandler(events_queue,\n price_handler, compliance)\n statistics = SimpleStatistics(config, portfolio_handler)\n backtest = Backtest(price_handler, strategy, portfolio_handler,\n execution_handler, position_sizer, risk_manager, statistics,\n initial_equity)\n results = backtest.simulate_trading(testing=testing)\n statistics.save(filename)\n return results\n\n\n<mask token>\n\n\ndef main(config, testing, tickers, filename):\n tickers = tickers.split(',')\n config = settings.from_file(config, testing)\n run(config, testing, tickers, filename)\n\n\nif __name__ == '__main__':\n main(settings.DEFAULT_CONFIG_FILENAME, False, 'SP500TR', '')\n",
"step-4": "import click\nfrom qstrader import settings\nfrom qstrader.compat import queue\nfrom qstrader.price_parser import PriceParser\nfrom qstrader.price_handler.yahoo_daily_csv_bar import YahooDailyCsvBarPriceHandler\nfrom qstrader.strategy import Strategies, DisplayStrategy\nfrom qstrader.risk_manager.example import ExampleRiskManager\nfrom qstrader.portfolio_handler import PortfolioHandler\nfrom qstrader.compliance.example import ExampleCompliance\nfrom qstrader.execution_handler.ib_simulated import IBSimulatedExecutionHandler\nfrom qstrader.statistics.simple import SimpleStatistics\nfrom qstrader.trading_session.backtest import Backtest\nimport os, sys\nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\ndir = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, dir)\nprint('parentdir')\nprint(parentdir)\nprint('dir')\nprint(dir)\nfrom custom_strategy import CustomStrategy\nfrom custom_position import CustomPositionSizer\n\n\ndef run(config, testing, tickers, filename):\n events_queue = queue.Queue()\n csv_dir = config.CSV_DATA_DIR\n initial_equity = PriceParser.parse(500000.0)\n price_handler = YahooDailyCsvBarPriceHandler(csv_dir, events_queue, tickers\n )\n strategy = CustomStrategy(tickers, events_queue)\n strategy = Strategies(strategy, DisplayStrategy())\n position_sizer = CustomPositionSizer()\n risk_manager = ExampleRiskManager()\n portfolio_handler = PortfolioHandler(initial_equity, events_queue,\n price_handler, position_sizer, risk_manager)\n compliance = ExampleCompliance(config)\n execution_handler = IBSimulatedExecutionHandler(events_queue,\n price_handler, compliance)\n statistics = SimpleStatistics(config, portfolio_handler)\n backtest = Backtest(price_handler, strategy, portfolio_handler,\n execution_handler, position_sizer, risk_manager, statistics,\n initial_equity)\n results = backtest.simulate_trading(testing=testing)\n statistics.save(filename)\n return results\n\n\n<mask token>\n\n\ndef main(config, testing, tickers, filename):\n tickers = tickers.split(',')\n config = settings.from_file(config, testing)\n run(config, testing, tickers, filename)\n\n\nif __name__ == '__main__':\n main(settings.DEFAULT_CONFIG_FILENAME, False, 'SP500TR', '')\n",
"step-5": "# @Author: Chen yunsheng(Leo YS CHen)\n# @Location: Taiwan\n# @E-mail:leoyenschen@gmail.com\n# @Date: 2017-02-14 00:11:27\n# @Last Modified by: Chen yunsheng\n\nimport click\n\nfrom qstrader import settings\nfrom qstrader.compat import queue\nfrom qstrader.price_parser import PriceParser\nfrom qstrader.price_handler.yahoo_daily_csv_bar import YahooDailyCsvBarPriceHandler\nfrom qstrader.strategy import Strategies, DisplayStrategy\nfrom qstrader.risk_manager.example import ExampleRiskManager\nfrom qstrader.portfolio_handler import PortfolioHandler\nfrom qstrader.compliance.example import ExampleCompliance\nfrom qstrader.execution_handler.ib_simulated import IBSimulatedExecutionHandler\nfrom qstrader.statistics.simple import SimpleStatistics\nfrom qstrader.trading_session.backtest import Backtest\n#====================================================\nimport os,sys\nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\ndir = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0,dir)\nprint(\"parentdir\")\nprint(parentdir)\nprint(\"dir\")\nprint(dir)\nfrom custom_strategy import CustomStrategy\nfrom custom_position import CustomPositionSizer\n\ndef run(config, testing, tickers, filename):\n\n # Set up variables needed for backtest\n events_queue = queue.Queue()\n csv_dir = config.CSV_DATA_DIR\n initial_equity = PriceParser.parse(500000.00)\n\n # Use Yahoo Daily Price Handler\n price_handler = YahooDailyCsvBarPriceHandler(\n csv_dir, events_queue, tickers\n )\n\n # Use the Buy and Hold Strategy\n strategy = CustomStrategy(tickers, events_queue)\n strategy = Strategies(strategy, DisplayStrategy())\n\n # Use an example Position Sizer\n position_sizer = CustomPositionSizer()\n\n # Use an example Risk Manager\n risk_manager = ExampleRiskManager()\n\n # Use the default Portfolio Handler\n portfolio_handler = PortfolioHandler(\n initial_equity, events_queue, price_handler,\n position_sizer, risk_manager\n )\n\n # Use the ExampleCompliance component\n compliance = ExampleCompliance(config)\n\n # Use a simulated IB Execution Handler\n execution_handler = IBSimulatedExecutionHandler(\n events_queue, price_handler, compliance\n )\n\n # Use the default Statistics\n statistics = SimpleStatistics(config, portfolio_handler)\n\n # Set up the backtest\n backtest = Backtest(\n price_handler, strategy,\n portfolio_handler, execution_handler,\n position_sizer, risk_manager,\n statistics, initial_equity\n )\n results = backtest.simulate_trading(testing=testing)\n statistics.save(filename)\n return results\n\n\"\"\"\n@click.command()\n@click.option('--config', default=settings.DEFAULT_CONFIG_FILENAME, help='Config filename')\n@click.option('--testing/--no-testing', default=False, help='Enable testing mode')\n@click.option('--tickers', default='SP500TR', help='Tickers (use comma)')\n@click.option('--filename', default='', help='Pickle (.pkl) statistics filename')\n\"\"\"\ndef main(config, testing, tickers, filename):\n tickers = tickers.split(\",\")\n config = settings.from_file(config, testing)\n run(config, testing, tickers, filename)\n\n\nif __name__ == \"__main__\":\n main(settings.DEFAULT_CONFIG_FILENAME,False,'SP500TR','')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from click.testing import CliRunner
from apitest.actions.cli import cli
def test_sendto_cli_runs_ok():
runner = CliRunner()
result = runner.invoke(cli, ["sendto"])
assert result.exit_code == 0
|
normal
|
{
"blob_id": "7537deb4560e880365b23a99584d0b1f8fa3daf4",
"index": 5675,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_sendto_cli_runs_ok():\n runner = CliRunner()\n result = runner.invoke(cli, ['sendto'])\n assert result.exit_code == 0\n",
"step-3": "from click.testing import CliRunner\nfrom apitest.actions.cli import cli\n\n\ndef test_sendto_cli_runs_ok():\n runner = CliRunner()\n result = runner.invoke(cli, ['sendto'])\n assert result.exit_code == 0\n",
"step-4": "from click.testing import CliRunner\nfrom apitest.actions.cli import cli\n\n\ndef test_sendto_cli_runs_ok():\n runner = CliRunner()\n result = runner.invoke(cli, [\"sendto\"])\n \n assert result.exit_code == 0\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class UF(object):
def __init__(self, n):
self.parents = [i for i in range(n)]
self.weights = [1 for i in range(n)]
self.n = n
def find(self, i):
while i != self.parents[i]:
self.parents[i] = self.parents[self.parents[i]]
i = self.parents[i]
return i
def union(self, p, q):
i = self.find(p)
j = self.find(q)
if i == j:
return
if self.weights[i] < self.weights[j]:
self.parents[i] = j
self.weights[j] += self.weights[i]
else:
self.parents[j] = i
self.weights[i] += self.weights[j]
self.n -= 1
def is_connected(self, p, q):
i = self.find(p)
j = self.find(q)
return i== j
def __len__(self):
return self.n
if __name__ == '__main__':
uf = UF(10)
uf.union(1, 2)
uf.union(3, 4)
uf.union(2, 4)
assert len(uf) == 7
assert uf.is_connected(1, 4)
assert not uf.is_connected(1, 5)
|
normal
|
{
"blob_id": "c8d5b8515a468190d14311118e12a7d414908be6",
"index": 8109,
"step-1": "class UF(object):\n <mask token>\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i == j\n <mask token>\n\n\n<mask token>\n",
"step-2": "class UF(object):\n <mask token>\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i == j\n\n def __len__(self):\n return self.n\n\n\n<mask token>\n",
"step-3": "class UF(object):\n\n def __init__(self, n):\n self.parents = [i for i in range(n)]\n self.weights = [(1) for i in range(n)]\n self.n = n\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i == j\n\n def __len__(self):\n return self.n\n\n\n<mask token>\n",
"step-4": "class UF(object):\n\n def __init__(self, n):\n self.parents = [i for i in range(n)]\n self.weights = [(1) for i in range(n)]\n self.n = n\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i == j\n\n def __len__(self):\n return self.n\n\n\nif __name__ == '__main__':\n uf = UF(10)\n uf.union(1, 2)\n uf.union(3, 4)\n uf.union(2, 4)\n assert len(uf) == 7\n assert uf.is_connected(1, 4)\n assert not uf.is_connected(1, 5)\n",
"step-5": "class UF(object):\n def __init__(self, n):\n self.parents = [i for i in range(n)]\n self.weights = [1 for i in range(n)]\n self.n = n\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i== j\n\n def __len__(self):\n return self.n\n\n\nif __name__ == '__main__':\n uf = UF(10)\n uf.union(1, 2)\n uf.union(3, 4)\n uf.union(2, 4)\n\n assert len(uf) == 7\n\n assert uf.is_connected(1, 4)\n assert not uf.is_connected(1, 5)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import random
from turtle import Turtle
colors = ["red", "blue", 'green', 'peru', 'purple', 'pink', 'chocolate', 'grey', 'cyan', 'brown']
class Food(Turtle):
def __init__(self):
super().__init__()
self.shape("circle")
self.penup()
self.color("red")
self.speed("fastest")
self.refresh()
def refresh(self):
self.color(random.choice(colors))
self.goto(random.randint(-280, 280), random.randint(-280, 280))
|
normal
|
{
"blob_id": "8adda42dfebd3f394a1026720465824a836c1dd1",
"index": 7997,
"step-1": "<mask token>\n\n\nclass Food(Turtle):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Food(Turtle):\n\n def __init__(self):\n super().__init__()\n self.shape('circle')\n self.penup()\n self.color('red')\n self.speed('fastest')\n self.refresh()\n\n def refresh(self):\n self.color(random.choice(colors))\n self.goto(random.randint(-280, 280), random.randint(-280, 280))\n",
"step-3": "<mask token>\ncolors = ['red', 'blue', 'green', 'peru', 'purple', 'pink', 'chocolate',\n 'grey', 'cyan', 'brown']\n\n\nclass Food(Turtle):\n\n def __init__(self):\n super().__init__()\n self.shape('circle')\n self.penup()\n self.color('red')\n self.speed('fastest')\n self.refresh()\n\n def refresh(self):\n self.color(random.choice(colors))\n self.goto(random.randint(-280, 280), random.randint(-280, 280))\n",
"step-4": "import random\nfrom turtle import Turtle\ncolors = ['red', 'blue', 'green', 'peru', 'purple', 'pink', 'chocolate',\n 'grey', 'cyan', 'brown']\n\n\nclass Food(Turtle):\n\n def __init__(self):\n super().__init__()\n self.shape('circle')\n self.penup()\n self.color('red')\n self.speed('fastest')\n self.refresh()\n\n def refresh(self):\n self.color(random.choice(colors))\n self.goto(random.randint(-280, 280), random.randint(-280, 280))\n",
"step-5": "import random\nfrom turtle import Turtle\n\ncolors = [\"red\", \"blue\", 'green', 'peru', 'purple', 'pink', 'chocolate', 'grey', 'cyan', 'brown']\n\n\nclass Food(Turtle):\n\n def __init__(self):\n super().__init__()\n\n self.shape(\"circle\")\n self.penup()\n self.color(\"red\")\n self.speed(\"fastest\")\n self.refresh()\n\n def refresh(self):\n self.color(random.choice(colors))\n self.goto(random.randint(-280, 280), random.randint(-280, 280))\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from cartopy.feature import ShapelyFeature
from shapely.geometry import shape
def plot(s):
proj = ccrs.PlateCarree()
ax = plt.axes(projection=proj)
ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs=ccrs.PlateCarree())
shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor='#AAFFAA', edgecolor='k')
ax.add_feature(shape_feature);
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=2, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlabel_style = {'size': 10, 'color': 'black'}
gl.ylabel_style = {'size': 10, 'color': 'black'}
return gl
def plot_merc(s):
proj = ccrs.Mercator()
ax = plt.axes(projection=proj)
ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs=ccrs.PlateCarree())
shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor='#AAFFAA', edgecolor='k')
ax.add_feature(shape_feature);
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=2, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlabel_style = {'size': 10, 'color': 'black'}
gl.ylabel_style = {'size': 10, 'color': 'black'}
return gl
|
normal
|
{
"blob_id": "75754f4032d6e22e53cdbed0f6c640247473faec",
"index": 7606,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot_merc(s):\n proj = ccrs.Mercator()\n ax = plt.axes(projection=proj)\n ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs\n =ccrs.PlateCarree())\n shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=\n '#AAFFAA', edgecolor='k')\n ax.add_feature(shape_feature)\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,\n color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n return gl\n",
"step-3": "<mask token>\n\n\ndef plot(s):\n proj = ccrs.PlateCarree()\n ax = plt.axes(projection=proj)\n ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs\n =ccrs.PlateCarree())\n shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=\n '#AAFFAA', edgecolor='k')\n ax.add_feature(shape_feature)\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,\n color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n return gl\n\n\ndef plot_merc(s):\n proj = ccrs.Mercator()\n ax = plt.axes(projection=proj)\n ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs\n =ccrs.PlateCarree())\n shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=\n '#AAFFAA', edgecolor='k')\n ax.add_feature(shape_feature)\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,\n color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n return gl\n",
"step-4": "import matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nfrom cartopy.feature import ShapelyFeature\nfrom shapely.geometry import shape\n\n\ndef plot(s):\n proj = ccrs.PlateCarree()\n ax = plt.axes(projection=proj)\n ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs\n =ccrs.PlateCarree())\n shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=\n '#AAFFAA', edgecolor='k')\n ax.add_feature(shape_feature)\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,\n color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n return gl\n\n\ndef plot_merc(s):\n proj = ccrs.Mercator()\n ax = plt.axes(projection=proj)\n ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs\n =ccrs.PlateCarree())\n shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor=\n '#AAFFAA', edgecolor='k')\n ax.add_feature(shape_feature)\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=2,\n color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n return gl\n",
"step-5": "import matplotlib.pyplot as plt\nimport cartopy.crs as ccrs\nfrom cartopy.feature import ShapelyFeature\nfrom shapely.geometry import shape\n\n\ndef plot(s):\n proj = ccrs.PlateCarree()\n ax = plt.axes(projection=proj)\n ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs=ccrs.PlateCarree())\n shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor='#AAFFAA', edgecolor='k')\n ax.add_feature(shape_feature);\n \n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,\n linewidth=2, color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n \n return gl\n \n \n \n \n \ndef plot_merc(s):\n proj = ccrs.Mercator()\n ax = plt.axes(projection=proj)\n ax.set_extent((s.bounds[0], s.bounds[2], s.bounds[1], s.bounds[3]), crs=ccrs.PlateCarree())\n shape_feature = ShapelyFeature([s], ccrs.PlateCarree(), facecolor='#AAFFAA', edgecolor='k')\n ax.add_feature(shape_feature);\n\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,\n linewidth=2, color='gray', alpha=0.5, linestyle='--')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xlabel_style = {'size': 10, 'color': 'black'}\n gl.ylabel_style = {'size': 10, 'color': 'black'}\n \n return gl",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
individual = html.Div([
html.Div([ # input container
html.Div([
dcc.RadioItems(id='view-radio',
options=[
{'label': i, 'value': i} for i in ['Players',
'Teams']
],
value='Players'
)
]),
html.Div([
dcc.Dropdown(id='drop-input')
]),
], className='two columns'),
html.Div([ # visuals container
html.Div([ # pic column container
html.H6(id='name-header')
dcc.Image(), # team or player image
], className='two columns'),
html.Div([ # data container
html.Div([ # graph
dcc.Graph()
]),
html.Div([ # table
dash_table.datatable()
])
])
], className='eight columns')
])
@app.callback(
Output('drop-input', 'options'),
[Input('view-radio', 'value')]
)
def update_dropdown(selection):
if selection == 'Players':
return [{'label': i, 'value':i} for i in active_players]
if selection == 'Teams':
return [{'label': i, 'value':i} for i in active_teams]
|
normal
|
{
"blob_id": "6c65d63ef07b6cdb2029e6a6e99f6ee35b448c4b",
"index": 3147,
"step-1": "individual = html.Div([\n\n html.Div([ # input container\n \n html.Div([\n dcc.RadioItems(id='view-radio',\n options=[\n {'label': i, 'value': i} for i in ['Players',\n 'Teams']\n ],\n value='Players'\n )\n ]),\n html.Div([\n dcc.Dropdown(id='drop-input')\n ]),\n ], className='two columns'),\n \n html.Div([ # visuals container\n \n html.Div([ # pic column container\n html.H6(id='name-header')\n dcc.Image(), # team or player image\n ], className='two columns'),\n \n html.Div([ # data container\n html.Div([ # graph\n dcc.Graph()\n ]),\n html.Div([ # table\n dash_table.datatable()\n ])\n ])\n ], className='eight columns')\n])\n\n@app.callback(\n Output('drop-input', 'options'),\n [Input('view-radio', 'value')]\n)\ndef update_dropdown(selection):\n if selection == 'Players':\n return [{'label': i, 'value':i} for i in active_players]\n if selection == 'Teams':\n return [{'label': i, 'value':i} for i in active_teams]",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
losettings.py
Contains a class for profiles and methods to save and load them from xml files.
Author: Stonepaw
Version 2.0
Rewrote pretty much everything. Much more modular and requires no maintence when a new attribute is added.
No longer fully supports profiles from 1.6 and earlier.
Copyright 2010-2012 Stonepaw
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import clr
import System
clr.AddReference("System.Xml")
from System import Convert
from System.IO import File, StreamReader, StreamWriter
from System.Xml import XmlDocument, XmlWriter, XmlWriterSettings
from System.Windows.Forms import MessageBox, MessageBoxIcon, MessageBoxButtons
from locommon import ExcludeRule, ExcludeGroup, Mode, PROFILEFILE, VERSION
class Profile:
"""This class contains all the variables for a profile.
Use save_to_xml to save the profile to a xml file.
Use load_from_xml to load the profile from the file.
Anytime a new variable is added it will automatically be save and loaded.
"""
def __init__(self):
self.Version = 0
self.FolderTemplate = ""
self.BaseFolder = ""
self.FileTemplate = ""
self.Name = ""
self.EmptyFolder = ""
self.EmptyData = {}
self.Postfix = {}
self.Prefix = {}
self.Seperator = {}
self.IllegalCharacters = {"?" : "", "/" : "", "\\" : "", "*" : "", ":" : " - ", "<" : "[", ">" : "]", "|" : "!", "\"" : "'"}
self.Months = {1 : "January", 2 : "February", 3 : "March", 4 : "April", 5 : "May", 6 : "June", 7 : "July", 8 :"August", 9 : "September", 10 : "October",
11 : "November", 12 : "December", 13 : "Spring", 14 : "Summer", 15 : "Fall", 16 : "Winter"}
self.TextBox = {}
self.UseFolder = True
self.UseFileName = True
self.ExcludeFolders = []
self.DontAskWhenMultiOne = True
self.ExcludeRules = []
self.ExcludeOperator = "Any"
self.RemoveEmptyFolder = True
self.ExcludedEmptyFolder = []
self.MoveFileless = False
self.FilelessFormat = ".jpg"
self.ExcludeMode = "Do not"
self.FailEmptyValues = False
self.MoveFailed = False
self.FailedFolder = ""
self.FailedFields = []
self.Mode = Mode.Move
self.CopyMode = True
self.AutoSpaceFields = True
self.ReplaceMultipleSpaces = True
self.CopyReadPercentage = True
def duplicate(self):
"""Returns a duplicate of the profile instance."""
duplicate = Profile()
for i in self.__dict__:
if type(getattr(self, i)) is dict:
setattr(duplicate, i, getattr(self, i).copy())
else:
setattr(duplicate, i, getattr(self, i))
return duplicate
def update(self):
if self.Version < 2.0:
if self.Mode is "Test":
self.Mode = "Simulate"
replacements = {"Language" : "LanguageISO", "Format" : "ShadowFormat", "Count" : "ShadowCount", "Number" : "ShadowNumber", "Series" : "ShadowSeries",
"Title" : "ShadowTitle", "Volume" : "ShadowVolume", "Year" : "ShadowYear"}
for key in self.EmptyData.keys():
if key in replacements:
self.EmptyData[replacements[key]] = self.EmptyData[key]
del(self.EmptyData[key])
insert_control_replacements = {"SeriesComplete" : "Series Complete", "Read" : "Read Percentage", "FirstLetter" : "First Letter", "AgeRating" : "Age Rating",
"AlternateSeriesMulti" : "Alternate Series Multi", "MonthNumber" : "Month Number", "AlternateNumber" : "Alternate Number",
"StartMonth" : "Start Month", "AlternateSeries" : "Alternate Series", "ScanInformation" : "Scan Information", "StartYear" : "Start Year",
"AlternateCount" : "Alternate Count"}
for key in insert_control_replacements :
if key in self.TextBox.keys():
self.TextBox[insert_control_replacements[key]] = self.TextBox[key]
del(self.TextBox[key])
if key in self.Prefix.keys():
self.Prefix[insert_control_replacements[key]] = self.Prefix[key]
del(self.Prefix[key])
if key in self.Postfix.keys():
self.Postfix[insert_control_replacements[key]] = self.Postfix[key]
del(self.Postfix[key])
if key in self.Seperator.keys():
self.Seperator[insert_control_replacements[key]] = self.Seperator[key]
del(self.Seperator[key])
self.Version = VERSION
def save_to_xml(self, xwriter):
"""
To save this profile intance to xml file using a XmlWriter.
xwriter->should be a XmlWriter instance.
"""
xwriter.WriteStartElement("Profile")
xwriter.WriteAttributeString("Name", self.Name)
xwriter.WriteStartAttribute("Version")
xwriter.WriteValue(self.Version)
xwriter.WriteEndAttribute()
for var_name in self.__dict__:
var_type = type(getattr(self, var_name))
if var_type is str and var_name != "Name":
self.write_string_to_xml(var_name, xwriter)
elif var_type is bool:
self.write_bool_to_xml(var_name, xwriter)
elif var_type is dict:
self.write_dict_to_xml(var_name, xwriter)
elif var_type is list and var_name != "ExcludeRules":
self.write_list_to_xml(var_name, xwriter)
xwriter.WriteStartElement("ExcludeRules")
xwriter.WriteAttributeString("Operator", self.ExcludeOperator)
xwriter.WriteAttributeString("ExcludeMode", self.ExcludeMode)
for rule in self.ExcludeRules:
if rule:
rule.save_xml(xwriter)
xwriter.WriteEndElement()
xwriter.WriteEndElement()
def write_dict_to_xml(self, attribute_name, xmlwriter, write_empty=False):
"""Writes a dictionary to an xml file in the form of
<attribute_name>
<Item Name="attribute_name key" Value="attribute_name value" />
<Item Name="attribute_name key" Value="attribute_name value" />
etc.
</attribute_name>
attribute_name->The name of the dictonary attribute to write.
xmlwriter->The xml writer to write with.
write_empty->A bool of whether to write empty values to the xml file. Default is don't write them.
"""
if attribute_name in ("IllegalCharacters", "Months"):
write_empty = True
dictionary = getattr(self, attribute_name)
xmlwriter.WriteStartElement(attribute_name)
for key in dictionary:
if dictionary[key] or write_empty:
xmlwriter.WriteStartElement("Item")
xmlwriter.WriteStartAttribute("Name")
xmlwriter.WriteValue(key)
xmlwriter.WriteEndAttribute()
xmlwriter.WriteStartAttribute("Value")
xmlwriter.WriteValue(dictionary[key])
xmlwriter.WriteEndAttribute()
xmlwriter.WriteEndElement()
xmlwriter.WriteEndElement()
def write_list_to_xml(self, attribute_name, xmlwriter, write_empty=False):
"""Writes a list to an xml file in the form of
<attribute_name>
<Item>value</Item>
<Item>value</Item>
etc.
</attribute_name>
attribute_name->The name of the list attribute to write.
xmlwriter->The xml writer to write with.
write_empty->A bool of whether to write empty values to the xml file. Default is don't write them.
"""
attribute_list = getattr(self, attribute_name)
xmlwriter.WriteStartElement(attribute_name)
for item in attribute_list:
if item or write_empty:
xmlwriter.WriteElementString("Item", item)
xmlwriter.WriteEndElement()
def write_string_to_xml(self, attribute_name, xmlwriter, write_empty=True):
"""Writes a string to an xml file in the form of
<attribute_name>string</attribute_name>
attribute_name->The name of the string attribute to write.
xmlwriter->The xml writer to write with.
write_empty->A bool of whether to write empty strings to the xml file. Default is write empty strings.
"""
string = getattr(self, attribute_name)
if string or write_empty:
xmlwriter.WriteElementString(attribute_name, string)
def write_bool_to_xml(self, attribute_name, xmlwriter):
"""Writes a boolean to an xml file in the form of
<attribute_name>true/false</attribute_name>
attribute_name->The name of the attribute to write.
xmlwriter->The xml writer to write with.
"""
xmlwriter.WriteStartElement(attribute_name)
xmlwriter.WriteValue(getattr(self, attribute_name))
xmlwriter.WriteEndElement()
def load_from_xml(self, Xml):
"""Loads the profile instance from the Xml.
Xml->should be a XmlNode/XmlDocument containing a profile node.
"""
try:
#Text vars
self.Name = Xml.Attributes["Name"].Value
if "Version" in Xml.Attributes:
self.Version = float(Xml.Attributes["Version"].Value)
for var_name in self.__dict__:
if type(getattr(self,var_name)) is str:
self.load_text_from_xml(Xml, var_name)
elif type(getattr(self,var_name)) is bool:
self.load_bool_from_xml(Xml, var_name)
elif type(getattr(self, var_name)) is list and var_name != "ExcludeRules":
self.load_list_from_xml(Xml, var_name)
elif type(getattr(self, var_name)) is dict:
self.load_dict_from_xml(Xml, var_name)
#Exclude Rules
exclude_rules_node = Xml.SelectSingleNode("ExcludeRules")
if exclude_rules_node is not None:
self.ExcludeOperator = exclude_rules_node.Attributes["Operator"].Value
self.ExcludeMode = exclude_rules_node.Attributes["ExcludeMode"].Value
for node in exclude_rules_node.ChildNodes:
if node.Name == "ExcludeRule":
try:
rule = ExcludeRule(node.Attributes["Field"].Value, node.Attributes["Operator"].Value, node.Attributes["Value"].Value)
except AttributeError:
rule = ExcludeRule(node.Attributes["Field"].Value, node.Attributes["Operator"].Value, node.Attributes["Text"].Value)
self.ExcludeRules.append(rule)
elif node.Name == "ExcludeGroup":
group = ExcludeGroup(node.Attributes["Operator"].Value)
group.load_from_xml(node)
self.ExcludeRules.append(group)
self.update()
except Exception, ex:
print ex
return False
def load_text_from_xml(self, xmldoc, name):
"""Loads a string with a specified node name from an XmlDocument and saves it to the attribute. The string should be saved as:
<name>string</name>
xmldoc->The XmlDocment to load from.
name->The attribute to save to and the root node name to load the string from."""
if xmldoc.SelectSingleNode(name) is not None:
setattr(self, name, xmldoc.SelectSingleNode(name).InnerText)
def load_bool_from_xml(self, xmldoc, name):
"""Loads a bool with a specified node name from an XmlDocument and saves it to the attribute. The bool should be saved as:
<name>true/false</name>
xmldoc->The XmlDocment to load from.
name->The attribute to save to and the root node name to load the bool from."""
if xmldoc.SelectSingleNode(name) is not None:
setattr(self, name, Convert.ToBoolean(xmldoc.SelectSingleNode(name).InnerText))
def load_list_from_xml(self, xmldoc, name):
"""Loads a list with a specified node name from an XmlDocument and saves it to the attribute. The list should be saved as:
<name>
<Item>list value</Item>
</name>
xmldoc->The XmlDocment to load from.
name->The attribute to save to and the root node name to load the list from."""
nodes = xmldoc.SelectNodes(name + "/Item")
if nodes.Count > 0:
setattr(self, name, [item.InnerText for item in nodes])
def load_dict_from_xml(self, xmldoc, name):
"""Loads a dict with a specified node name from an XmlDocument and saves it to the attribute. The dict should be saved as:
<name>
<Item Name="key" Value="value" />
</name>
xmldoc->The XmlDocment to load from.
name->The attribute to save to and the root node name to load the dict from."""
nodes = xmldoc.SelectNodes(name + "/Item")
if nodes.Count > 0:
dictionary = getattr(self, name)
for node in nodes:
if node.Attributes.Count == 2:
if name == "Months":
dictionary[int(node.Attributes["Name"].Value)] = node.Attributes["Value"].Value
else:
dictionary[node.Attributes["Name"].Value] = node.Attributes["Value"].Value
def load_profiles(file_path):
"""
Load profiles from a xml file. If no profiles are found it creates a blank profile.
file_path->The absolute path to the profile file
Returns a dict of the found profiles and a list of the lastused profile(s)
"""
profiles, lastused = load_profiles_from_file(file_path)
if len(profiles) == 0:
#Just in case
profiles["Default"] = Profile()
profiles["Default"].Name = "Default"
#Some default templates
profiles["Default"].FileTemplate = "{<series>}{ Vol.<volume>}{ #<number2>}{ (of <count2>)}{ ({<month>, }<year>)}"
profiles["Default"].FolderTemplate = "{<publisher>}\{<imprint>}\{<series>}{ (<startyear>{ <format>})}"
if not lastused:
lastused = [profiles.keys()[0]]
return profiles, lastused
def load_profiles_from_file(file_path):
"""
Loads profiles from a file.
file_path->The absolute path the xml file
Returns a dict of the profiles
"""
profiles = {}
lastused = ""
if File.Exists(file_path):
try:
with StreamReader(file_path) as xmlfile:
xmldoc = XmlDocument()
xmldoc.Load(xmlfile)
if xmldoc.DocumentElement.Name == "Profiles":
nodes = xmldoc.SelectNodes("Profiles/Profile")
#Individual exported profiles are saved with the document element as Profile
elif xmldoc.DocumentElement.Name == "Profile":
nodes = xmldoc.SelectNodes("Profile")
#Changed from 1.7 to 2.0 to use Profiles/Profile instead of Settings/Setting
elif xmldoc.DocumentElement.Name == "Settings":
nodes = xmldoc.SelectNodes("Settings/Setting")
elif xmldoc.DocumentElement.Name == "Setting":
nodes = xmldoc.SelectNodes("Setting")
#No valid root elements
else:
MessageBox.Show(file_path + " is not a valid Library Organizer profile file.", "Not a valid profile file", MessageBoxButtons.OK, MessageBoxIcon.Error)
return profiles, lastused
if nodes.Count > 0:
for node in nodes:
profile = Profile()
profile.Name = node.Attributes["Name"].Value
result = profile.load_from_xml(node)
#Error loading the profile
if result == False:
MessageBox.Show("An error occured loading the profile " + profile.Name + ". That profile has been skipped.")
else:
profiles[profile.Name] = profile
#Load the last used profile
rootnode = xmldoc.DocumentElement
if rootnode.HasAttribute("LastUsed"):
lastused = rootnode.Attributes["LastUsed"].Value.split(",")
except Exception, ex:
MessageBox.Show("Something seems to have gone wrong loading the xml file.\n\nThe error was:\n" + str(ex), "Error loading file", MessageBoxButtons.OK, MessageBoxIcon.Error)
return profiles, lastused
def import_profiles(file_path):
"""
Load profiles from a xml file. If no profiles are found it returns an empty dict.
file_path->The absolute path to the profile file
Returns a dict of the found profiles.
"""
profiles, lastused = load_profiles_from_file(file_path)
return profiles
def save_profiles(file_path, profiles, lastused=""):
"""
Saves the profiles to an xml file.
settings_file: The complete file path of the file to save to.
profiles: a dict of profile objects.
lastused: a string containing the last used profile.
"""
try:
xSettings = XmlWriterSettings()
xSettings.Indent = True
with XmlWriter.Create(file_path, xSettings) as writer:
writer.WriteStartElement("Profiles")
if lastused:
writer.WriteAttributeString("LastUsed", ",".join(lastused))
for profile in profiles:
profiles[profile].save_to_xml(writer)
writer.WriteEndElement()
except Exception, ex:
MessageBox.Show("An error occured writing the settings file. The error was:\n\n" + ex.message, "Error saving settings file", MessageBoxButtons.OK, MessageBoxIcon.Error)
def save_profile(file_path, profile):
"""
Saves a single profile to an xml file.
settings_file: The complete file path of the file to save to.
profile: a Profile object.
"""
try:
xSettings = XmlWriterSettings()
xSettings.Indent = True
with XmlWriter.Create(file_path, xSettings) as writer:
profile.save_to_xml(writer)
except Exception, ex:
MessageBox.Show("An error occured writing the settings file. The error was:\n\n" + ex.message, "Error saving settings file", MessageBoxButtons.OK, MessageBoxIcon.Error)
def save_last_used(file_path, lastused):
"Saves the lastused profiles to the xml file."""
x = XmlDocument()
x.Load(file_path)
x.DocumentElement.SetAttribute("LastUsed", ",".join(lastused))
x.Save(file_path)
|
normal
|
{
"blob_id": "b29c11b11fd357c7c4f774c3c6a857297ff0d021",
"index": 3144,
"step-1": "\"\"\"\r\nlosettings.py\r\n\r\nContains a class for profiles and methods to save and load them from xml files.\r\n\r\nAuthor: Stonepaw\r\n\r\nVersion 2.0\r\n\r\n Rewrote pretty much everything. Much more modular and requires no maintence when a new attribute is added.\r\n No longer fully supports profiles from 1.6 and earlier.\r\n\r\nCopyright 2010-2012 Stonepaw\r\n\r\nLicensed under the Apache License, Version 2.0 (the \"License\");\r\nyou may not use this file except in compliance with the License.\r\nYou may obtain a copy of the License at\r\n\r\n http://www.apache.org/licenses/LICENSE-2.0\r\n\r\nUnless required by applicable law or agreed to in writing, software\r\ndistributed under the License is distributed on an \"AS IS\" BASIS,\r\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\nSee the License for the specific language governing permissions and\r\nlimitations under the License.\r\n\"\"\"\r\n\r\nimport clr\r\nimport System\r\n\r\nclr.AddReference(\"System.Xml\")\r\n\r\nfrom System import Convert\r\nfrom System.IO import File, StreamReader, StreamWriter\r\nfrom System.Xml import XmlDocument, XmlWriter, XmlWriterSettings\r\n\r\nfrom System.Windows.Forms import MessageBox, MessageBoxIcon, MessageBoxButtons\r\n\r\nfrom locommon import ExcludeRule, ExcludeGroup, Mode, PROFILEFILE, VERSION\r\n\r\n\r\n\r\nclass Profile:\r\n \"\"\"This class contains all the variables for a profile.\r\n Use save_to_xml to save the profile to a xml file.\r\n Use load_from_xml to load the profile from the file.\r\n\r\n Anytime a new variable is added it will automatically be save and loaded.\r\n \"\"\"\r\n def __init__(self):\r\n \r\n self.Version = 0\r\n\r\n self.FolderTemplate = \"\"\r\n self.BaseFolder = \"\"\r\n self.FileTemplate = \"\"\r\n self.Name = \"\"\r\n self.EmptyFolder = \"\"\r\n\r\n self.EmptyData = {}\r\n \r\n self.Postfix = {}\r\n\r\n self.Prefix = {}\r\n\r\n self.Seperator = {}\r\n\r\n self.IllegalCharacters = {\"?\" : \"\", \"/\" : \"\", \"\\\\\" : \"\", \"*\" : \"\", \":\" : \" - \", \"<\" : \"[\", \">\" : \"]\", \"|\" : \"!\", \"\\\"\" : \"'\"}\r\n\r\n self.Months = {1 : \"January\", 2 : \"February\", 3 : \"March\", 4 : \"April\", 5 : \"May\", 6 : \"June\", 7 : \"July\", 8 :\"August\", 9 : \"September\", 10 : \"October\",\r\n 11 : \"November\", 12 : \"December\", 13 : \"Spring\", 14 : \"Summer\", 15 : \"Fall\", 16 : \"Winter\"}\r\n\r\n self.TextBox = {}\r\n \r\n self.UseFolder = True\r\n \r\n self.UseFileName = True\r\n \r\n self.ExcludeFolders = []\r\n \r\n self.DontAskWhenMultiOne = True\r\n \r\n self.ExcludeRules = []\r\n \r\n self.ExcludeOperator = \"Any\"\r\n \r\n self.RemoveEmptyFolder = True\r\n self.ExcludedEmptyFolder = []\r\n \r\n self.MoveFileless = False \r\n self.FilelessFormat = \".jpg\"\r\n \r\n self.ExcludeMode = \"Do not\"\r\n \r\n\r\n self.FailEmptyValues = False\r\n self.MoveFailed = False\r\n self.FailedFolder = \"\"\r\n self.FailedFields = []\r\n\r\n self.Mode = Mode.Move\r\n\r\n self.CopyMode = True\r\n\r\n self.AutoSpaceFields = True\r\n\r\n self.ReplaceMultipleSpaces = True\r\n\r\n self.CopyReadPercentage = True\r\n\r\n\r\n def duplicate(self):\r\n \"\"\"Returns a duplicate of the profile instance.\"\"\"\r\n duplicate = Profile()\r\n \r\n for i in self.__dict__:\r\n if type(getattr(self, i)) is dict:\r\n setattr(duplicate, i, getattr(self, i).copy())\r\n else:\r\n setattr(duplicate, i, getattr(self, i))\r\n\r\n return duplicate\r\n\r\n\r\n def update(self):\r\n if self.Version < 2.0:\r\n if self.Mode is \"Test\":\r\n self.Mode = \"Simulate\"\r\n\r\n replacements = {\"Language\" : \"LanguageISO\", \"Format\" : \"ShadowFormat\", \"Count\" : \"ShadowCount\", \"Number\" : \"ShadowNumber\", \"Series\" : \"ShadowSeries\",\r\n \"Title\" : \"ShadowTitle\", \"Volume\" : \"ShadowVolume\", \"Year\" : \"ShadowYear\"}\r\n\r\n for key in self.EmptyData.keys():\r\n if key in replacements:\r\n self.EmptyData[replacements[key]] = self.EmptyData[key]\r\n del(self.EmptyData[key])\r\n\r\n insert_control_replacements = {\"SeriesComplete\" : \"Series Complete\", \"Read\" : \"Read Percentage\", \"FirstLetter\" : \"First Letter\", \"AgeRating\" : \"Age Rating\",\r\n \"AlternateSeriesMulti\" : \"Alternate Series Multi\", \"MonthNumber\" : \"Month Number\", \"AlternateNumber\" : \"Alternate Number\",\r\n \"StartMonth\" : \"Start Month\", \"AlternateSeries\" : \"Alternate Series\", \"ScanInformation\" : \"Scan Information\", \"StartYear\" : \"Start Year\",\r\n \"AlternateCount\" : \"Alternate Count\"}\r\n for key in insert_control_replacements :\r\n if key in self.TextBox.keys():\r\n self.TextBox[insert_control_replacements[key]] = self.TextBox[key]\r\n del(self.TextBox[key])\r\n\r\n if key in self.Prefix.keys():\r\n self.Prefix[insert_control_replacements[key]] = self.Prefix[key]\r\n del(self.Prefix[key])\r\n\r\n if key in self.Postfix.keys():\r\n self.Postfix[insert_control_replacements[key]] = self.Postfix[key]\r\n del(self.Postfix[key])\r\n\r\n if key in self.Seperator.keys():\r\n self.Seperator[insert_control_replacements[key]] = self.Seperator[key]\r\n del(self.Seperator[key])\r\n\r\n self.Version = VERSION\r\n\r\n \r\n def save_to_xml(self, xwriter):\r\n \"\"\"\r\n To save this profile intance to xml file using a XmlWriter.\r\n xwriter->should be a XmlWriter instance.\r\n \"\"\"\r\n\r\n xwriter.WriteStartElement(\"Profile\")\r\n xwriter.WriteAttributeString(\"Name\", self.Name)\r\n xwriter.WriteStartAttribute(\"Version\")\r\n xwriter.WriteValue(self.Version)\r\n xwriter.WriteEndAttribute()\r\n\r\n for var_name in self.__dict__:\r\n var_type = type(getattr(self, var_name))\r\n\r\n if var_type is str and var_name != \"Name\":\r\n self.write_string_to_xml(var_name, xwriter)\r\n\r\n elif var_type is bool:\r\n self.write_bool_to_xml(var_name, xwriter)\r\n\r\n elif var_type is dict:\r\n self.write_dict_to_xml(var_name, xwriter)\r\n\r\n elif var_type is list and var_name != \"ExcludeRules\":\r\n self.write_list_to_xml(var_name, xwriter)\r\n\r\n xwriter.WriteStartElement(\"ExcludeRules\")\r\n xwriter.WriteAttributeString(\"Operator\", self.ExcludeOperator)\r\n xwriter.WriteAttributeString(\"ExcludeMode\", self.ExcludeMode)\r\n for rule in self.ExcludeRules:\r\n if rule:\r\n rule.save_xml(xwriter)\r\n xwriter.WriteEndElement()\r\n \r\n xwriter.WriteEndElement()\r\n\r\n \r\n def write_dict_to_xml(self, attribute_name, xmlwriter, write_empty=False):\r\n \"\"\"Writes a dictionary to an xml file in the form of\r\n <attribute_name>\r\n <Item Name=\"attribute_name key\" Value=\"attribute_name value\" />\r\n <Item Name=\"attribute_name key\" Value=\"attribute_name value\" />\r\n etc.\r\n </attribute_name>\r\n\r\n attribute_name->The name of the dictonary attribute to write.\r\n xmlwriter->The xml writer to write with.\r\n write_empty->A bool of whether to write empty values to the xml file. Default is don't write them.\r\n \"\"\"\r\n if attribute_name in (\"IllegalCharacters\", \"Months\"):\r\n write_empty = True\r\n dictionary = getattr(self, attribute_name)\r\n xmlwriter.WriteStartElement(attribute_name)\r\n for key in dictionary:\r\n if dictionary[key] or write_empty:\r\n xmlwriter.WriteStartElement(\"Item\")\r\n xmlwriter.WriteStartAttribute(\"Name\")\r\n xmlwriter.WriteValue(key)\r\n xmlwriter.WriteEndAttribute()\r\n xmlwriter.WriteStartAttribute(\"Value\")\r\n xmlwriter.WriteValue(dictionary[key])\r\n xmlwriter.WriteEndAttribute()\r\n xmlwriter.WriteEndElement()\r\n xmlwriter.WriteEndElement()\r\n\r\n\r\n def write_list_to_xml(self, attribute_name, xmlwriter, write_empty=False):\r\n \"\"\"Writes a list to an xml file in the form of\r\n <attribute_name>\r\n <Item>value</Item>\r\n <Item>value</Item>\r\n etc.\r\n </attribute_name>\r\n\r\n attribute_name->The name of the list attribute to write.\r\n xmlwriter->The xml writer to write with.\r\n write_empty->A bool of whether to write empty values to the xml file. Default is don't write them.\r\n \"\"\"\r\n attribute_list = getattr(self, attribute_name)\r\n xmlwriter.WriteStartElement(attribute_name)\r\n for item in attribute_list:\r\n if item or write_empty:\r\n xmlwriter.WriteElementString(\"Item\", item)\r\n xmlwriter.WriteEndElement()\r\n\r\n\r\n def write_string_to_xml(self, attribute_name, xmlwriter, write_empty=True):\r\n \"\"\"Writes a string to an xml file in the form of\r\n <attribute_name>string</attribute_name>\r\n\r\n attribute_name->The name of the string attribute to write.\r\n xmlwriter->The xml writer to write with.\r\n write_empty->A bool of whether to write empty strings to the xml file. Default is write empty strings.\r\n \"\"\"\r\n string = getattr(self, attribute_name)\r\n if string or write_empty:\r\n xmlwriter.WriteElementString(attribute_name, string)\r\n\r\n\r\n def write_bool_to_xml(self, attribute_name, xmlwriter):\r\n \"\"\"Writes a boolean to an xml file in the form of\r\n <attribute_name>true/false</attribute_name>\r\n\r\n attribute_name->The name of the attribute to write.\r\n xmlwriter->The xml writer to write with.\r\n \"\"\"\r\n xmlwriter.WriteStartElement(attribute_name)\r\n xmlwriter.WriteValue(getattr(self, attribute_name))\r\n xmlwriter.WriteEndElement()\r\n\r\n\r\n def load_from_xml(self, Xml):\r\n \"\"\"Loads the profile instance from the Xml.\r\n \r\n Xml->should be a XmlNode/XmlDocument containing a profile node.\r\n \"\"\"\r\n try:\r\n #Text vars\r\n self.Name = Xml.Attributes[\"Name\"].Value\r\n\r\n if \"Version\" in Xml.Attributes:\r\n self.Version = float(Xml.Attributes[\"Version\"].Value)\r\n\r\n for var_name in self.__dict__:\r\n if type(getattr(self,var_name)) is str:\r\n self.load_text_from_xml(Xml, var_name)\r\n\r\n\r\n elif type(getattr(self,var_name)) is bool:\r\n self.load_bool_from_xml(Xml, var_name)\r\n\r\n\r\n elif type(getattr(self, var_name)) is list and var_name != \"ExcludeRules\":\r\n self.load_list_from_xml(Xml, var_name)\r\n\r\n elif type(getattr(self, var_name)) is dict:\r\n self.load_dict_from_xml(Xml, var_name)\r\n\r\n #Exclude Rules\r\n exclude_rules_node = Xml.SelectSingleNode(\"ExcludeRules\")\r\n if exclude_rules_node is not None:\r\n self.ExcludeOperator = exclude_rules_node.Attributes[\"Operator\"].Value\r\n\r\n self.ExcludeMode = exclude_rules_node.Attributes[\"ExcludeMode\"].Value\r\n\r\n for node in exclude_rules_node.ChildNodes:\r\n if node.Name == \"ExcludeRule\":\r\n try:\r\n rule = ExcludeRule(node.Attributes[\"Field\"].Value, node.Attributes[\"Operator\"].Value, node.Attributes[\"Value\"].Value)\r\n except AttributeError:\r\n rule = ExcludeRule(node.Attributes[\"Field\"].Value, node.Attributes[\"Operator\"].Value, node.Attributes[\"Text\"].Value)\r\n\r\n self.ExcludeRules.append(rule)\r\n \r\n elif node.Name == \"ExcludeGroup\":\r\n group = ExcludeGroup(node.Attributes[\"Operator\"].Value)\r\n group.load_from_xml(node)\r\n self.ExcludeRules.append(group)\r\n\r\n self.update()\r\n\r\n except Exception, ex:\r\n print ex\r\n return False\r\n\r\n\r\n def load_text_from_xml(self, xmldoc, name):\r\n \"\"\"Loads a string with a specified node name from an XmlDocument and saves it to the attribute. The string should be saved as:\r\n <name>string</name>\r\n\r\n xmldoc->The XmlDocment to load from.\r\n name->The attribute to save to and the root node name to load the string from.\"\"\"\r\n if xmldoc.SelectSingleNode(name) is not None:\r\n setattr(self, name, xmldoc.SelectSingleNode(name).InnerText)\r\n\r\n\r\n def load_bool_from_xml(self, xmldoc, name):\r\n \"\"\"Loads a bool with a specified node name from an XmlDocument and saves it to the attribute. The bool should be saved as:\r\n <name>true/false</name>\r\n\r\n xmldoc->The XmlDocment to load from.\r\n name->The attribute to save to and the root node name to load the bool from.\"\"\"\r\n if xmldoc.SelectSingleNode(name) is not None:\r\n setattr(self, name, Convert.ToBoolean(xmldoc.SelectSingleNode(name).InnerText))\r\n\r\n\r\n def load_list_from_xml(self, xmldoc, name):\r\n \"\"\"Loads a list with a specified node name from an XmlDocument and saves it to the attribute. The list should be saved as:\r\n <name>\r\n <Item>list value</Item>\r\n </name>\r\n\r\n xmldoc->The XmlDocment to load from.\r\n name->The attribute to save to and the root node name to load the list from.\"\"\"\r\n nodes = xmldoc.SelectNodes(name + \"/Item\")\r\n if nodes.Count > 0:\r\n setattr(self, name, [item.InnerText for item in nodes])\r\n\r\n\r\n def load_dict_from_xml(self, xmldoc, name):\r\n \"\"\"Loads a dict with a specified node name from an XmlDocument and saves it to the attribute. The dict should be saved as:\r\n <name>\r\n <Item Name=\"key\" Value=\"value\" />\r\n </name>\r\n\r\n xmldoc->The XmlDocment to load from.\r\n name->The attribute to save to and the root node name to load the dict from.\"\"\"\r\n nodes = xmldoc.SelectNodes(name + \"/Item\")\r\n if nodes.Count > 0:\r\n dictionary = getattr(self, name)\r\n for node in nodes:\r\n if node.Attributes.Count == 2:\r\n if name == \"Months\":\r\n dictionary[int(node.Attributes[\"Name\"].Value)] = node.Attributes[\"Value\"].Value\r\n else:\r\n dictionary[node.Attributes[\"Name\"].Value] = node.Attributes[\"Value\"].Value\r\n\r\n\r\n\r\ndef load_profiles(file_path):\r\n \"\"\"\r\n Load profiles from a xml file. If no profiles are found it creates a blank profile.\r\n file_path->The absolute path to the profile file\r\n\r\n Returns a dict of the found profiles and a list of the lastused profile(s)\r\n \"\"\"\r\n profiles, lastused = load_profiles_from_file(file_path)\r\n\r\n if len(profiles) == 0:\r\n #Just in case\r\n profiles[\"Default\"] = Profile()\r\n profiles[\"Default\"].Name = \"Default\"\r\n #Some default templates\r\n profiles[\"Default\"].FileTemplate = \"{<series>}{ Vol.<volume>}{ #<number2>}{ (of <count2>)}{ ({<month>, }<year>)}\"\r\n profiles[\"Default\"].FolderTemplate = \"{<publisher>}\\{<imprint>}\\{<series>}{ (<startyear>{ <format>})}\"\r\n \r\n if not lastused:\r\n lastused = [profiles.keys()[0]]\r\n \r\n return profiles, lastused \r\n\r\n\r\ndef load_profiles_from_file(file_path):\r\n \"\"\"\r\n Loads profiles from a file.\r\n \r\n file_path->The absolute path the xml file\r\n\r\n Returns a dict of the profiles\r\n \"\"\"\r\n profiles = {}\r\n\r\n lastused = \"\"\r\n\r\n if File.Exists(file_path):\r\n try:\r\n with StreamReader(file_path) as xmlfile:\r\n xmldoc = XmlDocument()\r\n xmldoc.Load(xmlfile)\r\n\r\n if xmldoc.DocumentElement.Name == \"Profiles\":\r\n nodes = xmldoc.SelectNodes(\"Profiles/Profile\")\r\n #Individual exported profiles are saved with the document element as Profile\r\n elif xmldoc.DocumentElement.Name == \"Profile\":\r\n nodes = xmldoc.SelectNodes(\"Profile\")\r\n\r\n #Changed from 1.7 to 2.0 to use Profiles/Profile instead of Settings/Setting\r\n elif xmldoc.DocumentElement.Name == \"Settings\":\r\n nodes = xmldoc.SelectNodes(\"Settings/Setting\")\r\n elif xmldoc.DocumentElement.Name == \"Setting\":\r\n nodes = xmldoc.SelectNodes(\"Setting\")\r\n\r\n #No valid root elements\r\n else:\r\n MessageBox.Show(file_path + \" is not a valid Library Organizer profile file.\", \"Not a valid profile file\", MessageBoxButtons.OK, MessageBoxIcon.Error)\r\n return profiles, lastused\r\n\r\n if nodes.Count > 0:\r\n for node in nodes: \r\n profile = Profile()\r\n profile.Name = node.Attributes[\"Name\"].Value\r\n result = profile.load_from_xml(node)\r\n\r\n #Error loading the profile\r\n if result == False:\r\n MessageBox.Show(\"An error occured loading the profile \" + profile.Name + \". That profile has been skipped.\")\r\n\r\n else:\r\n profiles[profile.Name] = profile\r\n\r\n\r\n #Load the last used profile\r\n rootnode = xmldoc.DocumentElement\r\n if rootnode.HasAttribute(\"LastUsed\"):\r\n lastused = rootnode.Attributes[\"LastUsed\"].Value.split(\",\")\r\n\r\n except Exception, ex:\r\n MessageBox.Show(\"Something seems to have gone wrong loading the xml file.\\n\\nThe error was:\\n\" + str(ex), \"Error loading file\", MessageBoxButtons.OK, MessageBoxIcon.Error)\r\n\r\n return profiles, lastused\r\n\r\n\r\ndef import_profiles(file_path):\r\n \"\"\"\r\n Load profiles from a xml file. If no profiles are found it returns an empty dict.\r\n file_path->The absolute path to the profile file\r\n\r\n Returns a dict of the found profiles.\r\n \"\"\"\r\n profiles, lastused = load_profiles_from_file(file_path)\r\n\r\n return profiles\r\n\r\n\r\ndef save_profiles(file_path, profiles, lastused=\"\"):\r\n \"\"\"\r\n Saves the profiles to an xml file.\r\n\r\n settings_file: The complete file path of the file to save to.\r\n profiles: a dict of profile objects.\r\n lastused: a string containing the last used profile.\r\n \"\"\"\r\n try:\r\n xSettings = XmlWriterSettings()\r\n xSettings.Indent = True\r\n with XmlWriter.Create(file_path, xSettings) as writer:\r\n writer.WriteStartElement(\"Profiles\")\r\n if lastused:\r\n writer.WriteAttributeString(\"LastUsed\", \",\".join(lastused))\r\n for profile in profiles:\r\n profiles[profile].save_to_xml(writer)\r\n writer.WriteEndElement()\r\n except Exception, ex:\r\n MessageBox.Show(\"An error occured writing the settings file. The error was:\\n\\n\" + ex.message, \"Error saving settings file\", MessageBoxButtons.OK, MessageBoxIcon.Error)\r\n\r\n\r\ndef save_profile(file_path, profile):\r\n \"\"\"\r\n Saves a single profile to an xml file.\r\n\r\n settings_file: The complete file path of the file to save to.\r\n profile: a Profile object.\r\n \"\"\"\r\n try:\r\n xSettings = XmlWriterSettings()\r\n xSettings.Indent = True\r\n with XmlWriter.Create(file_path, xSettings) as writer:\r\n profile.save_to_xml(writer)\r\n except Exception, ex:\r\n MessageBox.Show(\"An error occured writing the settings file. The error was:\\n\\n\" + ex.message, \"Error saving settings file\", MessageBoxButtons.OK, MessageBoxIcon.Error)\r\n\r\n\r\ndef save_last_used(file_path, lastused):\r\n \"Saves the lastused profiles to the xml file.\"\"\"\r\n x = XmlDocument()\r\n x.Load(file_path)\r\n x.DocumentElement.SetAttribute(\"LastUsed\", \",\".join(lastused))\r\n x.Save(file_path)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.